text string | size int64 | token_count int64 |
|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from itertools import chain
from google.api_core.protobuf_helpers import get_messages
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
'ad_asset_pb2':"google.ads.google_ads.v4.proto.common",
'ad_type_infos_pb2':"google.ads.google_ads.v4.proto.common",
'asset_types_pb2':"google.ads.google_ads.v4.proto.common",
'bidding_pb2':"google.ads.google_ads.v4.proto.common",
'click_location_pb2':"google.ads.google_ads.v4.proto.common",
'criteria_pb2':"google.ads.google_ads.v4.proto.common",
'criterion_category_availability_pb2':"google.ads.google_ads.v4.proto.common",
'custom_parameter_pb2':"google.ads.google_ads.v4.proto.common",
'dates_pb2':"google.ads.google_ads.v4.proto.common",
'explorer_auto_optimizer_setting_pb2':"google.ads.google_ads.v4.proto.common",
'extensions_pb2':"google.ads.google_ads.v4.proto.common",
'feed_common_pb2':"google.ads.google_ads.v4.proto.common",
'final_app_url_pb2':"google.ads.google_ads.v4.proto.common",
'frequency_cap_pb2':"google.ads.google_ads.v4.proto.common",
'keyword_plan_common_pb2':"google.ads.google_ads.v4.proto.common",
'matching_function_pb2':"google.ads.google_ads.v4.proto.common",
'metrics_pb2':"google.ads.google_ads.v4.proto.common",
'offline_user_data_pb2':"google.ads.google_ads.v4.proto.common",
'policy_pb2':"google.ads.google_ads.v4.proto.common",
'real_time_bidding_setting_pb2':"google.ads.google_ads.v4.proto.common",
'segments_pb2':"google.ads.google_ads.v4.proto.common",
'simulation_pb2':"google.ads.google_ads.v4.proto.common",
'tag_snippet_pb2':"google.ads.google_ads.v4.proto.common",
'targeting_setting_pb2':"google.ads.google_ads.v4.proto.common",
'text_label_pb2':"google.ads.google_ads.v4.proto.common",
'url_collection_pb2':"google.ads.google_ads.v4.proto.common",
'user_lists_pb2':"google.ads.google_ads.v4.proto.common",
'value_pb2':"google.ads.google_ads.v4.proto.common",
'access_reason_pb2':"google.ads.google_ads.v4.proto.enums",
'access_role_pb2':"google.ads.google_ads.v4.proto.enums",
'account_budget_proposal_status_pb2':"google.ads.google_ads.v4.proto.enums",
'account_budget_proposal_type_pb2':"google.ads.google_ads.v4.proto.enums",
'account_budget_status_pb2':"google.ads.google_ads.v4.proto.enums",
'account_link_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_customizer_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_ad_rotation_mode_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_ad_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_criterion_approval_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_criterion_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_group_type_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_network_type_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_serving_optimization_status_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_strength_pb2':"google.ads.google_ads.v4.proto.enums",
'ad_type_pb2':"google.ads.google_ads.v4.proto.enums",
'advertising_channel_sub_type_pb2':"google.ads.google_ads.v4.proto.enums",
'advertising_channel_type_pb2':"google.ads.google_ads.v4.proto.enums",
'affiliate_location_feed_relationship_type_pb2':"google.ads.google_ads.v4.proto.enums",
'affiliate_location_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'age_range_type_pb2':"google.ads.google_ads.v4.proto.enums",
'app_campaign_app_store_pb2':"google.ads.google_ads.v4.proto.enums",
'app_campaign_bidding_strategy_goal_type_pb2':"google.ads.google_ads.v4.proto.enums",
'app_payment_model_type_pb2':"google.ads.google_ads.v4.proto.enums",
'app_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'app_store_pb2':"google.ads.google_ads.v4.proto.enums",
'app_url_operating_system_type_pb2':"google.ads.google_ads.v4.proto.enums",
'asset_field_type_pb2':"google.ads.google_ads.v4.proto.enums",
'asset_performance_label_pb2':"google.ads.google_ads.v4.proto.enums",
'asset_type_pb2':"google.ads.google_ads.v4.proto.enums",
'attribution_model_pb2':"google.ads.google_ads.v4.proto.enums",
'batch_job_status_pb2':"google.ads.google_ads.v4.proto.enums",
'bid_modifier_source_pb2':"google.ads.google_ads.v4.proto.enums",
'bidding_source_pb2':"google.ads.google_ads.v4.proto.enums",
'bidding_strategy_status_pb2':"google.ads.google_ads.v4.proto.enums",
'bidding_strategy_type_pb2':"google.ads.google_ads.v4.proto.enums",
'billing_setup_status_pb2':"google.ads.google_ads.v4.proto.enums",
'brand_safety_suitability_pb2':"google.ads.google_ads.v4.proto.enums",
'budget_delivery_method_pb2':"google.ads.google_ads.v4.proto.enums",
'budget_period_pb2':"google.ads.google_ads.v4.proto.enums",
'budget_status_pb2':"google.ads.google_ads.v4.proto.enums",
'budget_type_pb2':"google.ads.google_ads.v4.proto.enums",
'call_conversion_reporting_state_pb2':"google.ads.google_ads.v4.proto.enums",
'call_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'callout_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_criterion_status_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_draft_status_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_experiment_status_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_experiment_traffic_split_type_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_experiment_type_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_serving_status_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_shared_set_status_pb2':"google.ads.google_ads.v4.proto.enums",
'campaign_status_pb2':"google.ads.google_ads.v4.proto.enums",
'change_status_operation_pb2':"google.ads.google_ads.v4.proto.enums",
'change_status_resource_type_pb2':"google.ads.google_ads.v4.proto.enums",
'click_type_pb2':"google.ads.google_ads.v4.proto.enums",
'content_label_type_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_action_category_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_action_counting_type_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_action_status_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_action_type_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_adjustment_type_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_attribution_event_type_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_lag_bucket_pb2':"google.ads.google_ads.v4.proto.enums",
'conversion_or_adjustment_lag_bucket_pb2':"google.ads.google_ads.v4.proto.enums",
'criterion_category_channel_availability_mode_pb2':"google.ads.google_ads.v4.proto.enums",
'criterion_category_locale_availability_mode_pb2':"google.ads.google_ads.v4.proto.enums",
'criterion_system_serving_status_pb2':"google.ads.google_ads.v4.proto.enums",
'criterion_type_pb2':"google.ads.google_ads.v4.proto.enums",
'custom_interest_member_type_pb2':"google.ads.google_ads.v4.proto.enums",
'custom_interest_status_pb2':"google.ads.google_ads.v4.proto.enums",
'custom_interest_type_pb2':"google.ads.google_ads.v4.proto.enums",
'custom_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'customer_match_upload_key_type_pb2':"google.ads.google_ads.v4.proto.enums",
'customer_pay_per_conversion_eligibility_failure_reason_pb2':"google.ads.google_ads.v4.proto.enums",
'data_driven_model_status_pb2':"google.ads.google_ads.v4.proto.enums",
'day_of_week_pb2':"google.ads.google_ads.v4.proto.enums",
'device_pb2':"google.ads.google_ads.v4.proto.enums",
'display_ad_format_setting_pb2':"google.ads.google_ads.v4.proto.enums",
'display_upload_product_type_pb2':"google.ads.google_ads.v4.proto.enums",
'distance_bucket_pb2':"google.ads.google_ads.v4.proto.enums",
'dsa_page_feed_criterion_field_pb2':"google.ads.google_ads.v4.proto.enums",
'education_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'extension_setting_device_pb2':"google.ads.google_ads.v4.proto.enums",
'extension_type_pb2':"google.ads.google_ads.v4.proto.enums",
'external_conversion_source_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_attribute_type_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_quality_approval_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_quality_disapproval_reason_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_target_device_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_target_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_target_type_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_item_validation_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_link_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_mapping_criterion_type_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_mapping_status_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_origin_pb2':"google.ads.google_ads.v4.proto.enums",
'feed_status_pb2':"google.ads.google_ads.v4.proto.enums",
'flight_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'frequency_cap_event_type_pb2':"google.ads.google_ads.v4.proto.enums",
'frequency_cap_level_pb2':"google.ads.google_ads.v4.proto.enums",
'frequency_cap_time_unit_pb2':"google.ads.google_ads.v4.proto.enums",
'gender_type_pb2':"google.ads.google_ads.v4.proto.enums",
'geo_target_constant_status_pb2':"google.ads.google_ads.v4.proto.enums",
'geo_targeting_restriction_pb2':"google.ads.google_ads.v4.proto.enums",
'geo_targeting_type_pb2':"google.ads.google_ads.v4.proto.enums",
'google_ads_field_category_pb2':"google.ads.google_ads.v4.proto.enums",
'google_ads_field_data_type_pb2':"google.ads.google_ads.v4.proto.enums",
'hotel_date_selection_type_pb2':"google.ads.google_ads.v4.proto.enums",
'hotel_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'hotel_price_bucket_pb2':"google.ads.google_ads.v4.proto.enums",
'hotel_rate_type_pb2':"google.ads.google_ads.v4.proto.enums",
'income_range_type_pb2':"google.ads.google_ads.v4.proto.enums",
'interaction_event_type_pb2':"google.ads.google_ads.v4.proto.enums",
'interaction_type_pb2':"google.ads.google_ads.v4.proto.enums",
'invoice_type_pb2':"google.ads.google_ads.v4.proto.enums",
'job_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'keyword_match_type_pb2':"google.ads.google_ads.v4.proto.enums",
'keyword_plan_competition_level_pb2':"google.ads.google_ads.v4.proto.enums",
'keyword_plan_forecast_interval_pb2':"google.ads.google_ads.v4.proto.enums",
'keyword_plan_network_pb2':"google.ads.google_ads.v4.proto.enums",
'label_status_pb2':"google.ads.google_ads.v4.proto.enums",
'legacy_app_install_ad_app_store_pb2':"google.ads.google_ads.v4.proto.enums",
'linked_account_type_pb2':"google.ads.google_ads.v4.proto.enums",
'listing_group_type_pb2':"google.ads.google_ads.v4.proto.enums",
'local_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'location_extension_targeting_criterion_field_pb2':"google.ads.google_ads.v4.proto.enums",
'location_group_radius_units_pb2':"google.ads.google_ads.v4.proto.enums",
'location_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'location_source_type_pb2':"google.ads.google_ads.v4.proto.enums",
'manager_link_status_pb2':"google.ads.google_ads.v4.proto.enums",
'matching_function_context_type_pb2':"google.ads.google_ads.v4.proto.enums",
'matching_function_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'media_type_pb2':"google.ads.google_ads.v4.proto.enums",
'merchant_center_link_status_pb2':"google.ads.google_ads.v4.proto.enums",
'message_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'mime_type_pb2':"google.ads.google_ads.v4.proto.enums",
'minute_of_hour_pb2':"google.ads.google_ads.v4.proto.enums",
'mobile_app_vendor_pb2':"google.ads.google_ads.v4.proto.enums",
'mobile_device_type_pb2':"google.ads.google_ads.v4.proto.enums",
'month_of_year_pb2':"google.ads.google_ads.v4.proto.enums",
'negative_geo_target_type_pb2':"google.ads.google_ads.v4.proto.enums",
'offline_user_data_job_failure_reason_pb2':"google.ads.google_ads.v4.proto.enums",
'offline_user_data_job_status_pb2':"google.ads.google_ads.v4.proto.enums",
'offline_user_data_job_type_pb2':"google.ads.google_ads.v4.proto.enums",
'operating_system_version_operator_type_pb2':"google.ads.google_ads.v4.proto.enums",
'optimization_goal_type_pb2':"google.ads.google_ads.v4.proto.enums",
'page_one_promoted_strategy_goal_pb2':"google.ads.google_ads.v4.proto.enums",
'parental_status_type_pb2':"google.ads.google_ads.v4.proto.enums",
'payment_mode_pb2':"google.ads.google_ads.v4.proto.enums",
'placeholder_type_pb2':"google.ads.google_ads.v4.proto.enums",
'placement_type_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_approval_status_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_review_status_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_topic_entry_type_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_topic_evidence_destination_mismatch_url_type_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_topic_evidence_destination_not_working_device_pb2':"google.ads.google_ads.v4.proto.enums",
'policy_topic_evidence_destination_not_working_dns_error_type_pb2':"google.ads.google_ads.v4.proto.enums",
'positive_geo_target_type_pb2':"google.ads.google_ads.v4.proto.enums",
'preferred_content_type_pb2':"google.ads.google_ads.v4.proto.enums",
'price_extension_price_qualifier_pb2':"google.ads.google_ads.v4.proto.enums",
'price_extension_price_unit_pb2':"google.ads.google_ads.v4.proto.enums",
'price_extension_type_pb2':"google.ads.google_ads.v4.proto.enums",
'price_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'product_bidding_category_level_pb2':"google.ads.google_ads.v4.proto.enums",
'product_bidding_category_status_pb2':"google.ads.google_ads.v4.proto.enums",
'product_channel_exclusivity_pb2':"google.ads.google_ads.v4.proto.enums",
'product_channel_pb2':"google.ads.google_ads.v4.proto.enums",
'product_condition_pb2':"google.ads.google_ads.v4.proto.enums",
'product_custom_attribute_index_pb2':"google.ads.google_ads.v4.proto.enums",
'product_type_level_pb2':"google.ads.google_ads.v4.proto.enums",
'promotion_extension_discount_modifier_pb2':"google.ads.google_ads.v4.proto.enums",
'promotion_extension_occasion_pb2':"google.ads.google_ads.v4.proto.enums",
'promotion_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'proximity_radius_units_pb2':"google.ads.google_ads.v4.proto.enums",
'quality_score_bucket_pb2':"google.ads.google_ads.v4.proto.enums",
'reach_plan_ad_length_pb2':"google.ads.google_ads.v4.proto.enums",
'reach_plan_age_range_pb2':"google.ads.google_ads.v4.proto.enums",
'reach_plan_network_pb2':"google.ads.google_ads.v4.proto.enums",
'real_estate_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'recommendation_type_pb2':"google.ads.google_ads.v4.proto.enums",
'search_engine_results_page_type_pb2':"google.ads.google_ads.v4.proto.enums",
'search_term_match_type_pb2':"google.ads.google_ads.v4.proto.enums",
'search_term_targeting_status_pb2':"google.ads.google_ads.v4.proto.enums",
'served_asset_field_type_pb2':"google.ads.google_ads.v4.proto.enums",
'shared_set_status_pb2':"google.ads.google_ads.v4.proto.enums",
'shared_set_type_pb2':"google.ads.google_ads.v4.proto.enums",
'simulation_modification_method_pb2':"google.ads.google_ads.v4.proto.enums",
'simulation_type_pb2':"google.ads.google_ads.v4.proto.enums",
'sitelink_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'slot_pb2':"google.ads.google_ads.v4.proto.enums",
'spending_limit_type_pb2':"google.ads.google_ads.v4.proto.enums",
'structured_snippet_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'summary_row_setting_pb2':"google.ads.google_ads.v4.proto.enums",
'system_managed_entity_source_pb2':"google.ads.google_ads.v4.proto.enums",
'target_cpa_opt_in_recommendation_goal_pb2':"google.ads.google_ads.v4.proto.enums",
'target_impression_share_location_pb2':"google.ads.google_ads.v4.proto.enums",
'targeting_dimension_pb2':"google.ads.google_ads.v4.proto.enums",
'time_type_pb2':"google.ads.google_ads.v4.proto.enums",
'tracking_code_page_format_pb2':"google.ads.google_ads.v4.proto.enums",
'tracking_code_type_pb2':"google.ads.google_ads.v4.proto.enums",
'travel_placeholder_field_pb2':"google.ads.google_ads.v4.proto.enums",
'user_interest_taxonomy_type_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_access_status_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_closing_reason_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_combined_rule_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_crm_data_source_type_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_date_rule_item_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_logical_rule_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_membership_status_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_number_rule_item_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_prepopulation_status_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_rule_type_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_size_range_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_string_rule_item_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'user_list_type_pb2':"google.ads.google_ads.v4.proto.enums",
'vanity_pharma_display_url_mode_pb2':"google.ads.google_ads.v4.proto.enums",
'vanity_pharma_text_pb2':"google.ads.google_ads.v4.proto.enums",
'webpage_condition_operand_pb2':"google.ads.google_ads.v4.proto.enums",
'webpage_condition_operator_pb2':"google.ads.google_ads.v4.proto.enums",
'access_invitation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'account_budget_proposal_error_pb2':"google.ads.google_ads.v4.proto.errors",
'account_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_customizer_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_group_ad_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_group_bid_modifier_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_group_criterion_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_group_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_group_feed_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_parameter_error_pb2':"google.ads.google_ads.v4.proto.errors",
'ad_sharing_error_pb2':"google.ads.google_ads.v4.proto.errors",
'adx_error_pb2':"google.ads.google_ads.v4.proto.errors",
'asset_error_pb2':"google.ads.google_ads.v4.proto.errors",
'asset_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'authentication_error_pb2':"google.ads.google_ads.v4.proto.errors",
'authorization_error_pb2':"google.ads.google_ads.v4.proto.errors",
'batch_job_error_pb2':"google.ads.google_ads.v4.proto.errors",
'bidding_error_pb2':"google.ads.google_ads.v4.proto.errors",
'bidding_strategy_error_pb2':"google.ads.google_ads.v4.proto.errors",
'billing_setup_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_budget_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_criterion_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_draft_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_experiment_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_feed_error_pb2':"google.ads.google_ads.v4.proto.errors",
'campaign_shared_set_error_pb2':"google.ads.google_ads.v4.proto.errors",
'change_status_error_pb2':"google.ads.google_ads.v4.proto.errors",
'collection_size_error_pb2':"google.ads.google_ads.v4.proto.errors",
'context_error_pb2':"google.ads.google_ads.v4.proto.errors",
'conversion_action_error_pb2':"google.ads.google_ads.v4.proto.errors",
'conversion_adjustment_upload_error_pb2':"google.ads.google_ads.v4.proto.errors",
'conversion_upload_error_pb2':"google.ads.google_ads.v4.proto.errors",
'country_code_error_pb2':"google.ads.google_ads.v4.proto.errors",
'criterion_error_pb2':"google.ads.google_ads.v4.proto.errors",
'currency_code_error_pb2':"google.ads.google_ads.v4.proto.errors",
'custom_interest_error_pb2':"google.ads.google_ads.v4.proto.errors",
'customer_client_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'customer_error_pb2':"google.ads.google_ads.v4.proto.errors",
'customer_feed_error_pb2':"google.ads.google_ads.v4.proto.errors",
'customer_manager_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'database_error_pb2':"google.ads.google_ads.v4.proto.errors",
'date_error_pb2':"google.ads.google_ads.v4.proto.errors",
'date_range_error_pb2':"google.ads.google_ads.v4.proto.errors",
'distinct_error_pb2':"google.ads.google_ads.v4.proto.errors",
'enum_error_pb2':"google.ads.google_ads.v4.proto.errors",
'errors_pb2':"google.ads.google_ads.v4.proto.errors",
'extension_feed_item_error_pb2':"google.ads.google_ads.v4.proto.errors",
'extension_setting_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_attribute_reference_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_item_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_item_target_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_item_validation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'feed_mapping_error_pb2':"google.ads.google_ads.v4.proto.errors",
'field_error_pb2':"google.ads.google_ads.v4.proto.errors",
'field_mask_error_pb2':"google.ads.google_ads.v4.proto.errors",
'function_error_pb2':"google.ads.google_ads.v4.proto.errors",
'function_parsing_error_pb2':"google.ads.google_ads.v4.proto.errors",
'geo_target_constant_suggestion_error_pb2':"google.ads.google_ads.v4.proto.errors",
'header_error_pb2':"google.ads.google_ads.v4.proto.errors",
'id_error_pb2':"google.ads.google_ads.v4.proto.errors",
'image_error_pb2':"google.ads.google_ads.v4.proto.errors",
'internal_error_pb2':"google.ads.google_ads.v4.proto.errors",
'invoice_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_ad_group_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_ad_group_keyword_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_campaign_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_campaign_keyword_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_error_pb2':"google.ads.google_ads.v4.proto.errors",
'keyword_plan_idea_error_pb2':"google.ads.google_ads.v4.proto.errors",
'label_error_pb2':"google.ads.google_ads.v4.proto.errors",
'language_code_error_pb2':"google.ads.google_ads.v4.proto.errors",
'list_operation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'manager_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'media_bundle_error_pb2':"google.ads.google_ads.v4.proto.errors",
'media_file_error_pb2':"google.ads.google_ads.v4.proto.errors",
'media_upload_error_pb2':"google.ads.google_ads.v4.proto.errors",
'multiplier_error_pb2':"google.ads.google_ads.v4.proto.errors",
'mutate_error_pb2':"google.ads.google_ads.v4.proto.errors",
'new_resource_creation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'not_empty_error_pb2':"google.ads.google_ads.v4.proto.errors",
'not_whitelisted_error_pb2':"google.ads.google_ads.v4.proto.errors",
'null_error_pb2':"google.ads.google_ads.v4.proto.errors",
'offline_user_data_job_error_pb2':"google.ads.google_ads.v4.proto.errors",
'operation_access_denied_error_pb2':"google.ads.google_ads.v4.proto.errors",
'operator_error_pb2':"google.ads.google_ads.v4.proto.errors",
'partial_failure_error_pb2':"google.ads.google_ads.v4.proto.errors",
'payments_account_error_pb2':"google.ads.google_ads.v4.proto.errors",
'policy_finding_error_pb2':"google.ads.google_ads.v4.proto.errors",
'policy_validation_parameter_error_pb2':"google.ads.google_ads.v4.proto.errors",
'policy_violation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'query_error_pb2':"google.ads.google_ads.v4.proto.errors",
'quota_error_pb2':"google.ads.google_ads.v4.proto.errors",
'range_error_pb2':"google.ads.google_ads.v4.proto.errors",
'reach_plan_error_pb2':"google.ads.google_ads.v4.proto.errors",
'recommendation_error_pb2':"google.ads.google_ads.v4.proto.errors",
'region_code_error_pb2':"google.ads.google_ads.v4.proto.errors",
'request_error_pb2':"google.ads.google_ads.v4.proto.errors",
'resource_access_denied_error_pb2':"google.ads.google_ads.v4.proto.errors",
'resource_count_limit_exceeded_error_pb2':"google.ads.google_ads.v4.proto.errors",
'setting_error_pb2':"google.ads.google_ads.v4.proto.errors",
'shared_criterion_error_pb2':"google.ads.google_ads.v4.proto.errors",
'shared_set_error_pb2':"google.ads.google_ads.v4.proto.errors",
'size_limit_error_pb2':"google.ads.google_ads.v4.proto.errors",
'string_format_error_pb2':"google.ads.google_ads.v4.proto.errors",
'string_length_error_pb2':"google.ads.google_ads.v4.proto.errors",
'third_party_app_analytics_link_error_pb2':"google.ads.google_ads.v4.proto.errors",
'time_zone_error_pb2':"google.ads.google_ads.v4.proto.errors",
'url_field_error_pb2':"google.ads.google_ads.v4.proto.errors",
'user_data_error_pb2':"google.ads.google_ads.v4.proto.errors",
'user_list_error_pb2':"google.ads.google_ads.v4.proto.errors",
'youtube_video_registration_error_pb2':"google.ads.google_ads.v4.proto.errors",
'account_budget_pb2':"google.ads.google_ads.v4.proto.resources",
'account_budget_proposal_pb2':"google.ads.google_ads.v4.proto.resources",
'account_link_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_ad_asset_view_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_ad_label_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_ad_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_audience_view_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_bid_modifier_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_criterion_label_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_criterion_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_criterion_simulation_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_extension_setting_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_feed_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_label_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_group_simulation_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_parameter_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_pb2':"google.ads.google_ads.v4.proto.resources",
'ad_schedule_view_pb2':"google.ads.google_ads.v4.proto.resources",
'age_range_view_pb2':"google.ads.google_ads.v4.proto.resources",
'asset_pb2':"google.ads.google_ads.v4.proto.resources",
'batch_job_pb2':"google.ads.google_ads.v4.proto.resources",
'bidding_strategy_pb2':"google.ads.google_ads.v4.proto.resources",
'billing_setup_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_audience_view_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_bid_modifier_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_budget_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_criterion_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_criterion_simulation_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_draft_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_experiment_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_extension_setting_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_feed_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_label_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_pb2':"google.ads.google_ads.v4.proto.resources",
'campaign_shared_set_pb2':"google.ads.google_ads.v4.proto.resources",
'carrier_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'change_status_pb2':"google.ads.google_ads.v4.proto.resources",
'click_view_pb2':"google.ads.google_ads.v4.proto.resources",
'conversion_action_pb2':"google.ads.google_ads.v4.proto.resources",
'currency_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'custom_interest_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_client_link_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_client_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_extension_setting_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_feed_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_label_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_manager_link_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_negative_criterion_pb2':"google.ads.google_ads.v4.proto.resources",
'customer_pb2':"google.ads.google_ads.v4.proto.resources",
'detail_placement_view_pb2':"google.ads.google_ads.v4.proto.resources",
'display_keyword_view_pb2':"google.ads.google_ads.v4.proto.resources",
'distance_view_pb2':"google.ads.google_ads.v4.proto.resources",
'domain_category_pb2':"google.ads.google_ads.v4.proto.resources",
'dynamic_search_ads_search_term_view_pb2':"google.ads.google_ads.v4.proto.resources",
'expanded_landing_page_view_pb2':"google.ads.google_ads.v4.proto.resources",
'extension_feed_item_pb2':"google.ads.google_ads.v4.proto.resources",
'feed_item_pb2':"google.ads.google_ads.v4.proto.resources",
'feed_item_target_pb2':"google.ads.google_ads.v4.proto.resources",
'feed_mapping_pb2':"google.ads.google_ads.v4.proto.resources",
'feed_pb2':"google.ads.google_ads.v4.proto.resources",
'feed_placeholder_view_pb2':"google.ads.google_ads.v4.proto.resources",
'gender_view_pb2':"google.ads.google_ads.v4.proto.resources",
'geo_target_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'geographic_view_pb2':"google.ads.google_ads.v4.proto.resources",
'google_ads_field_pb2':"google.ads.google_ads.v4.proto.resources",
'group_placement_view_pb2':"google.ads.google_ads.v4.proto.resources",
'hotel_group_view_pb2':"google.ads.google_ads.v4.proto.resources",
'hotel_performance_view_pb2':"google.ads.google_ads.v4.proto.resources",
'income_range_view_pb2':"google.ads.google_ads.v4.proto.resources",
'invoice_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_plan_ad_group_keyword_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_plan_ad_group_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_plan_campaign_keyword_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_plan_campaign_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_plan_pb2':"google.ads.google_ads.v4.proto.resources",
'keyword_view_pb2':"google.ads.google_ads.v4.proto.resources",
'label_pb2':"google.ads.google_ads.v4.proto.resources",
'landing_page_view_pb2':"google.ads.google_ads.v4.proto.resources",
'language_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'location_view_pb2':"google.ads.google_ads.v4.proto.resources",
'managed_placement_view_pb2':"google.ads.google_ads.v4.proto.resources",
'media_file_pb2':"google.ads.google_ads.v4.proto.resources",
'merchant_center_link_pb2':"google.ads.google_ads.v4.proto.resources",
'mobile_app_category_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'mobile_device_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'offline_user_data_job_pb2':"google.ads.google_ads.v4.proto.resources",
'operating_system_version_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'paid_organic_search_term_view_pb2':"google.ads.google_ads.v4.proto.resources",
'parental_status_view_pb2':"google.ads.google_ads.v4.proto.resources",
'payments_account_pb2':"google.ads.google_ads.v4.proto.resources",
'product_bidding_category_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'product_group_view_pb2':"google.ads.google_ads.v4.proto.resources",
'recommendation_pb2':"google.ads.google_ads.v4.proto.resources",
'remarketing_action_pb2':"google.ads.google_ads.v4.proto.resources",
'search_term_view_pb2':"google.ads.google_ads.v4.proto.resources",
'shared_criterion_pb2':"google.ads.google_ads.v4.proto.resources",
'shared_set_pb2':"google.ads.google_ads.v4.proto.resources",
'shopping_performance_view_pb2':"google.ads.google_ads.v4.proto.resources",
'third_party_app_analytics_link_pb2':"google.ads.google_ads.v4.proto.resources",
'topic_constant_pb2':"google.ads.google_ads.v4.proto.resources",
'topic_view_pb2':"google.ads.google_ads.v4.proto.resources",
'user_interest_pb2':"google.ads.google_ads.v4.proto.resources",
'user_list_pb2':"google.ads.google_ads.v4.proto.resources",
'user_location_view_pb2':"google.ads.google_ads.v4.proto.resources",
'video_pb2':"google.ads.google_ads.v4.proto.resources",
'account_budget_proposal_service_pb2':"google.ads.google_ads.v4.proto.services",
'account_budget_service_pb2':"google.ads.google_ads.v4.proto.services",
'account_link_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_ad_asset_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_ad_label_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_ad_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_audience_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_bid_modifier_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_criterion_label_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_criterion_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_criterion_simulation_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_extension_setting_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_feed_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_label_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_group_simulation_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_parameter_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_schedule_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'ad_service_pb2':"google.ads.google_ads.v4.proto.services",
'age_range_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'asset_service_pb2':"google.ads.google_ads.v4.proto.services",
'batch_job_service_pb2':"google.ads.google_ads.v4.proto.services",
'bidding_strategy_service_pb2':"google.ads.google_ads.v4.proto.services",
'billing_setup_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_audience_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_bid_modifier_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_budget_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_criterion_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_criterion_simulation_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_draft_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_experiment_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_extension_setting_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_feed_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_label_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_service_pb2':"google.ads.google_ads.v4.proto.services",
'campaign_shared_set_service_pb2':"google.ads.google_ads.v4.proto.services",
'carrier_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'change_status_service_pb2':"google.ads.google_ads.v4.proto.services",
'click_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'conversion_action_service_pb2':"google.ads.google_ads.v4.proto.services",
'conversion_adjustment_upload_service_pb2':"google.ads.google_ads.v4.proto.services",
'conversion_upload_service_pb2':"google.ads.google_ads.v4.proto.services",
'currency_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'custom_interest_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_client_link_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_client_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_extension_setting_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_feed_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_label_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_manager_link_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_negative_criterion_service_pb2':"google.ads.google_ads.v4.proto.services",
'customer_service_pb2':"google.ads.google_ads.v4.proto.services",
'detail_placement_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'display_keyword_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'distance_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'domain_category_service_pb2':"google.ads.google_ads.v4.proto.services",
'dynamic_search_ads_search_term_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'expanded_landing_page_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'extension_feed_item_service_pb2':"google.ads.google_ads.v4.proto.services",
'feed_item_service_pb2':"google.ads.google_ads.v4.proto.services",
'feed_item_target_service_pb2':"google.ads.google_ads.v4.proto.services",
'feed_mapping_service_pb2':"google.ads.google_ads.v4.proto.services",
'feed_placeholder_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'feed_service_pb2':"google.ads.google_ads.v4.proto.services",
'gender_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'geo_target_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'geographic_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'google_ads_field_service_pb2':"google.ads.google_ads.v4.proto.services",
'google_ads_service_pb2':"google.ads.google_ads.v4.proto.services",
'group_placement_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'hotel_group_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'hotel_performance_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'income_range_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'invoice_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_ad_group_keyword_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_ad_group_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_campaign_keyword_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_campaign_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_idea_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_plan_service_pb2':"google.ads.google_ads.v4.proto.services",
'keyword_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'label_service_pb2':"google.ads.google_ads.v4.proto.services",
'landing_page_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'language_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'location_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'managed_placement_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'media_file_service_pb2':"google.ads.google_ads.v4.proto.services",
'merchant_center_link_service_pb2':"google.ads.google_ads.v4.proto.services",
'mobile_app_category_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'mobile_device_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'offline_user_data_job_service_pb2':"google.ads.google_ads.v4.proto.services",
'operating_system_version_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'paid_organic_search_term_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'parental_status_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'payments_account_service_pb2':"google.ads.google_ads.v4.proto.services",
'product_bidding_category_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'product_group_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'reach_plan_service_pb2':"google.ads.google_ads.v4.proto.services",
'recommendation_service_pb2':"google.ads.google_ads.v4.proto.services",
'remarketing_action_service_pb2':"google.ads.google_ads.v4.proto.services",
'search_term_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'shared_criterion_service_pb2':"google.ads.google_ads.v4.proto.services",
'shared_set_service_pb2':"google.ads.google_ads.v4.proto.services",
'shopping_performance_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'third_party_app_analytics_link_service_pb2':"google.ads.google_ads.v4.proto.services",
'topic_constant_service_pb2':"google.ads.google_ads.v4.proto.services",
'topic_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'user_data_service_pb2':"google.ads.google_ads.v4.proto.services",
'user_interest_service_pb2':"google.ads.google_ads.v4.proto.services",
'user_list_service_pb2':"google.ads.google_ads.v4.proto.services",
'user_location_view_service_pb2':"google.ads.google_ads.v4.proto.services",
'video_service_pb2':"google.ads.google_ads.v4.proto.services",
'operations_pb2':"google.longrunning",
'any_pb2':"google.protobuf",
'empty_pb2':"google.protobuf",
'field_mask_pb2':"google.protobuf",
'wrappers_pb2':"google.protobuf",
'status_pb2':"google.rpc",
}
_lazy_class_to_package_map = {
'AccessInvitationErrorEnum':"google.ads.google_ads.v4.proto.errors.access_invitation_error_pb2",
'AccessReasonEnum':"google.ads.google_ads.v4.proto.enums.access_reason_pb2",
'AccessRoleEnum':"google.ads.google_ads.v4.proto.enums.access_role_pb2",
'AccountBudget':"google.ads.google_ads.v4.proto.resources.account_budget_pb2",
'AccountBudgetProposal':"google.ads.google_ads.v4.proto.resources.account_budget_proposal_pb2",
'AccountBudgetProposalErrorEnum':"google.ads.google_ads.v4.proto.errors.account_budget_proposal_error_pb2",
'AccountBudgetProposalOperation':"google.ads.google_ads.v4.proto.services.account_budget_proposal_service_pb2",
'AccountBudgetProposalStatusEnum':"google.ads.google_ads.v4.proto.enums.account_budget_proposal_status_pb2",
'AccountBudgetProposalTypeEnum':"google.ads.google_ads.v4.proto.enums.account_budget_proposal_type_pb2",
'AccountBudgetStatusEnum':"google.ads.google_ads.v4.proto.enums.account_budget_status_pb2",
'AccountLink':"google.ads.google_ads.v4.proto.resources.account_link_pb2",
'AccountLinkOperation':"google.ads.google_ads.v4.proto.services.account_link_service_pb2",
'AccountLinkStatusEnum':"google.ads.google_ads.v4.proto.enums.account_link_status_pb2",
'Ad':"google.ads.google_ads.v4.proto.resources.ad_pb2",
'AdCustomizerErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_customizer_error_pb2",
'AdCustomizerPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.ad_customizer_placeholder_field_pb2",
'AdErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_error_pb2",
'AdGroup':"google.ads.google_ads.v4.proto.resources.ad_group_pb2",
'AdGroupAd':"google.ads.google_ads.v4.proto.resources.ad_group_ad_pb2",
'AdGroupAdAssetPolicySummary':"google.ads.google_ads.v4.proto.resources.ad_group_ad_asset_view_pb2",
'AdGroupAdAssetView':"google.ads.google_ads.v4.proto.resources.ad_group_ad_asset_view_pb2",
'AdGroupAdErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_group_ad_error_pb2",
'AdGroupAdLabel':"google.ads.google_ads.v4.proto.resources.ad_group_ad_label_pb2",
'AdGroupAdLabelOperation':"google.ads.google_ads.v4.proto.services.ad_group_ad_label_service_pb2",
'AdGroupAdOperation':"google.ads.google_ads.v4.proto.services.ad_group_ad_service_pb2",
'AdGroupAdPolicySummary':"google.ads.google_ads.v4.proto.resources.ad_group_ad_pb2",
'AdGroupAdRotationModeEnum':"google.ads.google_ads.v4.proto.enums.ad_group_ad_rotation_mode_pb2",
'AdGroupAdStatusEnum':"google.ads.google_ads.v4.proto.enums.ad_group_ad_status_pb2",
'AdGroupAudienceView':"google.ads.google_ads.v4.proto.resources.ad_group_audience_view_pb2",
'AdGroupBidModifier':"google.ads.google_ads.v4.proto.resources.ad_group_bid_modifier_pb2",
'AdGroupBidModifierErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_group_bid_modifier_error_pb2",
'AdGroupBidModifierOperation':"google.ads.google_ads.v4.proto.services.ad_group_bid_modifier_service_pb2",
'AdGroupCriterion':"google.ads.google_ads.v4.proto.resources.ad_group_criterion_pb2",
'AdGroupCriterionApprovalStatusEnum':"google.ads.google_ads.v4.proto.enums.ad_group_criterion_approval_status_pb2",
'AdGroupCriterionErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_group_criterion_error_pb2",
'AdGroupCriterionLabel':"google.ads.google_ads.v4.proto.resources.ad_group_criterion_label_pb2",
'AdGroupCriterionLabelOperation':"google.ads.google_ads.v4.proto.services.ad_group_criterion_label_service_pb2",
'AdGroupCriterionOperation':"google.ads.google_ads.v4.proto.services.ad_group_criterion_service_pb2",
'AdGroupCriterionSimulation':"google.ads.google_ads.v4.proto.resources.ad_group_criterion_simulation_pb2",
'AdGroupCriterionStatusEnum':"google.ads.google_ads.v4.proto.enums.ad_group_criterion_status_pb2",
'AdGroupErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_group_error_pb2",
'AdGroupExtensionSetting':"google.ads.google_ads.v4.proto.resources.ad_group_extension_setting_pb2",
'AdGroupExtensionSettingOperation':"google.ads.google_ads.v4.proto.services.ad_group_extension_setting_service_pb2",
'AdGroupFeed':"google.ads.google_ads.v4.proto.resources.ad_group_feed_pb2",
'AdGroupFeedErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_group_feed_error_pb2",
'AdGroupFeedOperation':"google.ads.google_ads.v4.proto.services.ad_group_feed_service_pb2",
'AdGroupLabel':"google.ads.google_ads.v4.proto.resources.ad_group_label_pb2",
'AdGroupLabelOperation':"google.ads.google_ads.v4.proto.services.ad_group_label_service_pb2",
'AdGroupOperation':"google.ads.google_ads.v4.proto.services.ad_group_service_pb2",
'AdGroupSimulation':"google.ads.google_ads.v4.proto.resources.ad_group_simulation_pb2",
'AdGroupStatusEnum':"google.ads.google_ads.v4.proto.enums.ad_group_status_pb2",
'AdGroupTypeEnum':"google.ads.google_ads.v4.proto.enums.ad_group_type_pb2",
'AdImageAsset':"google.ads.google_ads.v4.proto.common.ad_asset_pb2",
'AdMediaBundleAsset':"google.ads.google_ads.v4.proto.common.ad_asset_pb2",
'AdNetworkTypeEnum':"google.ads.google_ads.v4.proto.enums.ad_network_type_pb2",
'AdOperation':"google.ads.google_ads.v4.proto.services.ad_service_pb2",
'AdParameter':"google.ads.google_ads.v4.proto.resources.ad_parameter_pb2",
'AdParameterErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_parameter_error_pb2",
'AdParameterOperation':"google.ads.google_ads.v4.proto.services.ad_parameter_service_pb2",
'AdScheduleInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'AdScheduleView':"google.ads.google_ads.v4.proto.resources.ad_schedule_view_pb2",
'AdServingOptimizationStatusEnum':"google.ads.google_ads.v4.proto.enums.ad_serving_optimization_status_pb2",
'AdSharingErrorEnum':"google.ads.google_ads.v4.proto.errors.ad_sharing_error_pb2",
'AdStrengthEnum':"google.ads.google_ads.v4.proto.enums.ad_strength_pb2",
'AdTextAsset':"google.ads.google_ads.v4.proto.common.ad_asset_pb2",
'AdTypeEnum':"google.ads.google_ads.v4.proto.enums.ad_type_pb2",
'AdVideoAsset':"google.ads.google_ads.v4.proto.common.ad_asset_pb2",
'AddBatchJobOperationsRequest':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'AddBatchJobOperationsResponse':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'AddOfflineUserDataJobOperationsRequest':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'AddOfflineUserDataJobOperationsResponse':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'AddressInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'AdvertisingChannelSubTypeEnum':"google.ads.google_ads.v4.proto.enums.advertising_channel_sub_type_pb2",
'AdvertisingChannelTypeEnum':"google.ads.google_ads.v4.proto.enums.advertising_channel_type_pb2",
'AdxErrorEnum':"google.ads.google_ads.v4.proto.errors.adx_error_pb2",
'AffiliateLocationFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'AffiliateLocationFeedRelationshipTypeEnum':"google.ads.google_ads.v4.proto.enums.affiliate_location_feed_relationship_type_pb2",
'AffiliateLocationPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.affiliate_location_placeholder_field_pb2",
'AgeRangeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'AgeRangeTypeEnum':"google.ads.google_ads.v4.proto.enums.age_range_type_pb2",
'AgeRangeView':"google.ads.google_ads.v4.proto.resources.age_range_view_pb2",
'AppAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'AppCampaignAppStoreEnum':"google.ads.google_ads.v4.proto.enums.app_campaign_app_store_pb2",
'AppCampaignBiddingStrategyGoalTypeEnum':"google.ads.google_ads.v4.proto.enums.app_campaign_bidding_strategy_goal_type_pb2",
'AppEngagementAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'AppFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'AppPaymentModelInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'AppPaymentModelTypeEnum':"google.ads.google_ads.v4.proto.enums.app_payment_model_type_pb2",
'AppPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.app_placeholder_field_pb2",
'AppStoreEnum':"google.ads.google_ads.v4.proto.enums.app_store_pb2",
'AppUrlOperatingSystemTypeEnum':"google.ads.google_ads.v4.proto.enums.app_url_operating_system_type_pb2",
'ApplyRecommendationOperation':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'ApplyRecommendationRequest':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'ApplyRecommendationResponse':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'ApplyRecommendationResult':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'Asset':"google.ads.google_ads.v4.proto.resources.asset_pb2",
'AssetErrorEnum':"google.ads.google_ads.v4.proto.errors.asset_error_pb2",
'AssetFieldTypeEnum':"google.ads.google_ads.v4.proto.enums.asset_field_type_pb2",
'AssetLinkErrorEnum':"google.ads.google_ads.v4.proto.errors.asset_link_error_pb2",
'AssetOperation':"google.ads.google_ads.v4.proto.services.asset_service_pb2",
'AssetPerformanceLabelEnum':"google.ads.google_ads.v4.proto.enums.asset_performance_label_pb2",
'AssetTypeEnum':"google.ads.google_ads.v4.proto.enums.asset_type_pb2",
'AttributeFieldMapping':"google.ads.google_ads.v4.proto.resources.feed_mapping_pb2",
'AttributionModelEnum':"google.ads.google_ads.v4.proto.enums.attribution_model_pb2",
'AuthenticationErrorEnum':"google.ads.google_ads.v4.proto.errors.authentication_error_pb2",
'AuthorizationErrorEnum':"google.ads.google_ads.v4.proto.errors.authorization_error_pb2",
'BasicUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'BatchJob':"google.ads.google_ads.v4.proto.resources.batch_job_pb2",
'BatchJobErrorEnum':"google.ads.google_ads.v4.proto.errors.batch_job_error_pb2",
'BatchJobOperation':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'BatchJobResult':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'BatchJobStatusEnum':"google.ads.google_ads.v4.proto.enums.batch_job_status_pb2",
'BidModifierSimulationPoint':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'BidModifierSimulationPointList':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'BidModifierSourceEnum':"google.ads.google_ads.v4.proto.enums.bid_modifier_source_pb2",
'BiddingErrorEnum':"google.ads.google_ads.v4.proto.errors.bidding_error_pb2",
'BiddingSourceEnum':"google.ads.google_ads.v4.proto.enums.bidding_source_pb2",
'BiddingStrategy':"google.ads.google_ads.v4.proto.resources.bidding_strategy_pb2",
'BiddingStrategyErrorEnum':"google.ads.google_ads.v4.proto.errors.bidding_strategy_error_pb2",
'BiddingStrategyOperation':"google.ads.google_ads.v4.proto.services.bidding_strategy_service_pb2",
'BiddingStrategyStatusEnum':"google.ads.google_ads.v4.proto.enums.bidding_strategy_status_pb2",
'BiddingStrategyTypeEnum':"google.ads.google_ads.v4.proto.enums.bidding_strategy_type_pb2",
'BillingSetup':"google.ads.google_ads.v4.proto.resources.billing_setup_pb2",
'BillingSetupErrorEnum':"google.ads.google_ads.v4.proto.errors.billing_setup_error_pb2",
'BillingSetupOperation':"google.ads.google_ads.v4.proto.services.billing_setup_service_pb2",
'BillingSetupStatusEnum':"google.ads.google_ads.v4.proto.enums.billing_setup_status_pb2",
'BookOnGoogleAsset':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'BrandSafetySuitabilityEnum':"google.ads.google_ads.v4.proto.enums.brand_safety_suitability_pb2",
'BudgetDeliveryMethodEnum':"google.ads.google_ads.v4.proto.enums.budget_delivery_method_pb2",
'BudgetPeriodEnum':"google.ads.google_ads.v4.proto.enums.budget_period_pb2",
'BudgetStatusEnum':"google.ads.google_ads.v4.proto.enums.budget_status_pb2",
'BudgetTypeEnum':"google.ads.google_ads.v4.proto.enums.budget_type_pb2",
'CallConversion':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'CallConversionReportingStateEnum':"google.ads.google_ads.v4.proto.enums.call_conversion_reporting_state_pb2",
'CallConversionResult':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'CallFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'CallOnlyAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'CallPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.call_placeholder_field_pb2",
'CallReportingSetting':"google.ads.google_ads.v4.proto.resources.customer_pb2",
'CalloutFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'CalloutPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.callout_placeholder_field_pb2",
'Campaign':"google.ads.google_ads.v4.proto.resources.campaign_pb2",
'CampaignAudienceView':"google.ads.google_ads.v4.proto.resources.campaign_audience_view_pb2",
'CampaignBidModifier':"google.ads.google_ads.v4.proto.resources.campaign_bid_modifier_pb2",
'CampaignBidModifierOperation':"google.ads.google_ads.v4.proto.services.campaign_bid_modifier_service_pb2",
'CampaignBudget':"google.ads.google_ads.v4.proto.resources.campaign_budget_pb2",
'CampaignBudgetErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_budget_error_pb2",
'CampaignBudgetOperation':"google.ads.google_ads.v4.proto.services.campaign_budget_service_pb2",
'CampaignCriterion':"google.ads.google_ads.v4.proto.resources.campaign_criterion_pb2",
'CampaignCriterionErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_criterion_error_pb2",
'CampaignCriterionOperation':"google.ads.google_ads.v4.proto.services.campaign_criterion_service_pb2",
'CampaignCriterionSimulation':"google.ads.google_ads.v4.proto.resources.campaign_criterion_simulation_pb2",
'CampaignCriterionStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_criterion_status_pb2",
'CampaignDraft':"google.ads.google_ads.v4.proto.resources.campaign_draft_pb2",
'CampaignDraftErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_draft_error_pb2",
'CampaignDraftOperation':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'CampaignDraftStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_draft_status_pb2",
'CampaignDuration':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'CampaignErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_error_pb2",
'CampaignExperiment':"google.ads.google_ads.v4.proto.resources.campaign_experiment_pb2",
'CampaignExperimentErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_experiment_error_pb2",
'CampaignExperimentOperation':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'CampaignExperimentStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_experiment_status_pb2",
'CampaignExperimentTrafficSplitTypeEnum':"google.ads.google_ads.v4.proto.enums.campaign_experiment_traffic_split_type_pb2",
'CampaignExperimentTypeEnum':"google.ads.google_ads.v4.proto.enums.campaign_experiment_type_pb2",
'CampaignExtensionSetting':"google.ads.google_ads.v4.proto.resources.campaign_extension_setting_pb2",
'CampaignExtensionSettingOperation':"google.ads.google_ads.v4.proto.services.campaign_extension_setting_service_pb2",
'CampaignFeed':"google.ads.google_ads.v4.proto.resources.campaign_feed_pb2",
'CampaignFeedErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_feed_error_pb2",
'CampaignFeedOperation':"google.ads.google_ads.v4.proto.services.campaign_feed_service_pb2",
'CampaignLabel':"google.ads.google_ads.v4.proto.resources.campaign_label_pb2",
'CampaignLabelOperation':"google.ads.google_ads.v4.proto.services.campaign_label_service_pb2",
'CampaignOperation':"google.ads.google_ads.v4.proto.services.campaign_service_pb2",
'CampaignServingStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_serving_status_pb2",
'CampaignSharedSet':"google.ads.google_ads.v4.proto.resources.campaign_shared_set_pb2",
'CampaignSharedSetErrorEnum':"google.ads.google_ads.v4.proto.errors.campaign_shared_set_error_pb2",
'CampaignSharedSetOperation':"google.ads.google_ads.v4.proto.services.campaign_shared_set_service_pb2",
'CampaignSharedSetStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_shared_set_status_pb2",
'CampaignStatusEnum':"google.ads.google_ads.v4.proto.enums.campaign_status_pb2",
'CarrierConstant':"google.ads.google_ads.v4.proto.resources.carrier_constant_pb2",
'CarrierInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ChangeStatus':"google.ads.google_ads.v4.proto.resources.change_status_pb2",
'ChangeStatusErrorEnum':"google.ads.google_ads.v4.proto.errors.change_status_error_pb2",
'ChangeStatusOperationEnum':"google.ads.google_ads.v4.proto.enums.change_status_operation_pb2",
'ChangeStatusResourceTypeEnum':"google.ads.google_ads.v4.proto.enums.change_status_resource_type_pb2",
'ClickConversion':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'ClickConversionResult':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'ClickLocation':"google.ads.google_ads.v4.proto.common.click_location_pb2",
'ClickTypeEnum':"google.ads.google_ads.v4.proto.enums.click_type_pb2",
'ClickView':"google.ads.google_ads.v4.proto.resources.click_view_pb2",
'CollectionSizeErrorEnum':"google.ads.google_ads.v4.proto.errors.collection_size_error_pb2",
'CombinedRuleUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'Commission':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'ContentLabelInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ContentLabelTypeEnum':"google.ads.google_ads.v4.proto.enums.content_label_type_pb2",
'ContextErrorEnum':"google.ads.google_ads.v4.proto.errors.context_error_pb2",
'ConversionAction':"google.ads.google_ads.v4.proto.resources.conversion_action_pb2",
'ConversionActionCategoryEnum':"google.ads.google_ads.v4.proto.enums.conversion_action_category_pb2",
'ConversionActionCountingTypeEnum':"google.ads.google_ads.v4.proto.enums.conversion_action_counting_type_pb2",
'ConversionActionErrorEnum':"google.ads.google_ads.v4.proto.errors.conversion_action_error_pb2",
'ConversionActionOperation':"google.ads.google_ads.v4.proto.services.conversion_action_service_pb2",
'ConversionActionStatusEnum':"google.ads.google_ads.v4.proto.enums.conversion_action_status_pb2",
'ConversionActionTypeEnum':"google.ads.google_ads.v4.proto.enums.conversion_action_type_pb2",
'ConversionAdjustment':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'ConversionAdjustmentResult':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'ConversionAdjustmentTypeEnum':"google.ads.google_ads.v4.proto.enums.conversion_adjustment_type_pb2",
'ConversionAdjustmentUploadErrorEnum':"google.ads.google_ads.v4.proto.errors.conversion_adjustment_upload_error_pb2",
'ConversionAttributionEventTypeEnum':"google.ads.google_ads.v4.proto.enums.conversion_attribution_event_type_pb2",
'ConversionLagBucketEnum':"google.ads.google_ads.v4.proto.enums.conversion_lag_bucket_pb2",
'ConversionOrAdjustmentLagBucketEnum':"google.ads.google_ads.v4.proto.enums.conversion_or_adjustment_lag_bucket_pb2",
'ConversionTrackingSetting':"google.ads.google_ads.v4.proto.resources.customer_pb2",
'ConversionUploadErrorEnum':"google.ads.google_ads.v4.proto.errors.conversion_upload_error_pb2",
'CountryCodeErrorEnum':"google.ads.google_ads.v4.proto.errors.country_code_error_pb2",
'CpcBidSimulationPoint':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'CpcBidSimulationPointList':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'CpvBidSimulationPoint':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'CpvBidSimulationPointList':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'CreateCampaignExperimentMetadata':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'CreateCampaignExperimentRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'CreateCustomerClientRequest':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'CreateCustomerClientResponse':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'CreateOfflineUserDataJobRequest':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'CreateOfflineUserDataJobResponse':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'CriterionCategoryAvailability':"google.ads.google_ads.v4.proto.common.criterion_category_availability_pb2",
'CriterionCategoryChannelAvailability':"google.ads.google_ads.v4.proto.common.criterion_category_availability_pb2",
'CriterionCategoryChannelAvailabilityModeEnum':"google.ads.google_ads.v4.proto.enums.criterion_category_channel_availability_mode_pb2",
'CriterionCategoryLocaleAvailability':"google.ads.google_ads.v4.proto.common.criterion_category_availability_pb2",
'CriterionCategoryLocaleAvailabilityModeEnum':"google.ads.google_ads.v4.proto.enums.criterion_category_locale_availability_mode_pb2",
'CriterionErrorEnum':"google.ads.google_ads.v4.proto.errors.criterion_error_pb2",
'CriterionSystemServingStatusEnum':"google.ads.google_ads.v4.proto.enums.criterion_system_serving_status_pb2",
'CriterionTypeEnum':"google.ads.google_ads.v4.proto.enums.criterion_type_pb2",
'CrmBasedUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'CurrencyCodeErrorEnum':"google.ads.google_ads.v4.proto.errors.currency_code_error_pb2",
'CurrencyConstant':"google.ads.google_ads.v4.proto.resources.currency_constant_pb2",
'CustomAffinityInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'CustomIntentInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'CustomInterest':"google.ads.google_ads.v4.proto.resources.custom_interest_pb2",
'CustomInterestErrorEnum':"google.ads.google_ads.v4.proto.errors.custom_interest_error_pb2",
'CustomInterestMember':"google.ads.google_ads.v4.proto.resources.custom_interest_pb2",
'CustomInterestMemberTypeEnum':"google.ads.google_ads.v4.proto.enums.custom_interest_member_type_pb2",
'CustomInterestOperation':"google.ads.google_ads.v4.proto.services.custom_interest_service_pb2",
'CustomInterestStatusEnum':"google.ads.google_ads.v4.proto.enums.custom_interest_status_pb2",
'CustomInterestTypeEnum':"google.ads.google_ads.v4.proto.enums.custom_interest_type_pb2",
'CustomParameter':"google.ads.google_ads.v4.proto.common.custom_parameter_pb2",
'CustomPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.custom_placeholder_field_pb2",
'Customer':"google.ads.google_ads.v4.proto.resources.customer_pb2",
'CustomerClient':"google.ads.google_ads.v4.proto.resources.customer_client_pb2",
'CustomerClientLink':"google.ads.google_ads.v4.proto.resources.customer_client_link_pb2",
'CustomerClientLinkErrorEnum':"google.ads.google_ads.v4.proto.errors.customer_client_link_error_pb2",
'CustomerClientLinkOperation':"google.ads.google_ads.v4.proto.services.customer_client_link_service_pb2",
'CustomerErrorEnum':"google.ads.google_ads.v4.proto.errors.customer_error_pb2",
'CustomerExtensionSetting':"google.ads.google_ads.v4.proto.resources.customer_extension_setting_pb2",
'CustomerExtensionSettingOperation':"google.ads.google_ads.v4.proto.services.customer_extension_setting_service_pb2",
'CustomerFeed':"google.ads.google_ads.v4.proto.resources.customer_feed_pb2",
'CustomerFeedErrorEnum':"google.ads.google_ads.v4.proto.errors.customer_feed_error_pb2",
'CustomerFeedOperation':"google.ads.google_ads.v4.proto.services.customer_feed_service_pb2",
'CustomerLabel':"google.ads.google_ads.v4.proto.resources.customer_label_pb2",
'CustomerLabelOperation':"google.ads.google_ads.v4.proto.services.customer_label_service_pb2",
'CustomerManagerLink':"google.ads.google_ads.v4.proto.resources.customer_manager_link_pb2",
'CustomerManagerLinkErrorEnum':"google.ads.google_ads.v4.proto.errors.customer_manager_link_error_pb2",
'CustomerManagerLinkOperation':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'CustomerMatchUploadKeyTypeEnum':"google.ads.google_ads.v4.proto.enums.customer_match_upload_key_type_pb2",
'CustomerMatchUserListMetadata':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'CustomerNegativeCriterion':"google.ads.google_ads.v4.proto.resources.customer_negative_criterion_pb2",
'CustomerNegativeCriterionOperation':"google.ads.google_ads.v4.proto.services.customer_negative_criterion_service_pb2",
'CustomerOperation':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'CustomerPayPerConversionEligibilityFailureReasonEnum':"google.ads.google_ads.v4.proto.enums.customer_pay_per_conversion_eligibility_failure_reason_pb2",
'DataDrivenModelStatusEnum':"google.ads.google_ads.v4.proto.enums.data_driven_model_status_pb2",
'DatabaseErrorEnum':"google.ads.google_ads.v4.proto.errors.database_error_pb2",
'DateErrorEnum':"google.ads.google_ads.v4.proto.errors.date_error_pb2",
'DateRange':"google.ads.google_ads.v4.proto.common.dates_pb2",
'DateRangeErrorEnum':"google.ads.google_ads.v4.proto.errors.date_range_error_pb2",
'DateSpecificRuleUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'DayOfWeekEnum':"google.ads.google_ads.v4.proto.enums.day_of_week_pb2",
'DetailPlacementView':"google.ads.google_ads.v4.proto.resources.detail_placement_view_pb2",
'DeviceEnum':"google.ads.google_ads.v4.proto.enums.device_pb2",
'DeviceInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'DismissRecommendationRequest':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'DismissRecommendationResponse':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'DisplayAdFormatSettingEnum':"google.ads.google_ads.v4.proto.enums.display_ad_format_setting_pb2",
'DisplayCallToAction':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'DisplayKeywordView':"google.ads.google_ads.v4.proto.resources.display_keyword_view_pb2",
'DisplayUploadAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'DisplayUploadProductTypeEnum':"google.ads.google_ads.v4.proto.enums.display_upload_product_type_pb2",
'DistanceBucketEnum':"google.ads.google_ads.v4.proto.enums.distance_bucket_pb2",
'DistanceView':"google.ads.google_ads.v4.proto.resources.distance_view_pb2",
'DistinctErrorEnum':"google.ads.google_ads.v4.proto.errors.distinct_error_pb2",
'DomainCategory':"google.ads.google_ads.v4.proto.resources.domain_category_pb2",
'DsaPageFeedCriterionFieldEnum':"google.ads.google_ads.v4.proto.enums.dsa_page_feed_criterion_field_pb2",
'DynamicSearchAdsSearchTermView':"google.ads.google_ads.v4.proto.resources.dynamic_search_ads_search_term_view_pb2",
'EducationPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.education_placeholder_field_pb2",
'EndCampaignExperimentRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'EnhancedCpc':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'EnumErrorEnum':"google.ads.google_ads.v4.proto.errors.enum_error_pb2",
'ErrorCode':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'ErrorDetails':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'ErrorLocation':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'ExpandedDynamicSearchAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ExpandedLandingPageView':"google.ads.google_ads.v4.proto.resources.expanded_landing_page_view_pb2",
'ExpandedTextAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ExplorerAutoOptimizerSetting':"google.ads.google_ads.v4.proto.common.explorer_auto_optimizer_setting_pb2",
'ExpressionRuleUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'ExtensionFeedItem':"google.ads.google_ads.v4.proto.resources.extension_feed_item_pb2",
'ExtensionFeedItemErrorEnum':"google.ads.google_ads.v4.proto.errors.extension_feed_item_error_pb2",
'ExtensionFeedItemOperation':"google.ads.google_ads.v4.proto.services.extension_feed_item_service_pb2",
'ExtensionSettingDeviceEnum':"google.ads.google_ads.v4.proto.enums.extension_setting_device_pb2",
'ExtensionSettingErrorEnum':"google.ads.google_ads.v4.proto.errors.extension_setting_error_pb2",
'ExtensionTypeEnum':"google.ads.google_ads.v4.proto.enums.extension_type_pb2",
'ExternalAttributionData':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'ExternalConversionSourceEnum':"google.ads.google_ads.v4.proto.enums.external_conversion_source_pb2",
'Feed':"google.ads.google_ads.v4.proto.resources.feed_pb2",
'FeedAttribute':"google.ads.google_ads.v4.proto.resources.feed_pb2",
'FeedAttributeOperation':"google.ads.google_ads.v4.proto.resources.feed_pb2",
'FeedAttributeReferenceErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_attribute_reference_error_pb2",
'FeedAttributeTypeEnum':"google.ads.google_ads.v4.proto.enums.feed_attribute_type_pb2",
'FeedErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_error_pb2",
'FeedItem':"google.ads.google_ads.v4.proto.resources.feed_item_pb2",
'FeedItemAttributeValue':"google.ads.google_ads.v4.proto.resources.feed_item_pb2",
'FeedItemErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_item_error_pb2",
'FeedItemOperation':"google.ads.google_ads.v4.proto.services.feed_item_service_pb2",
'FeedItemPlaceholderPolicyInfo':"google.ads.google_ads.v4.proto.resources.feed_item_pb2",
'FeedItemQualityApprovalStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_item_quality_approval_status_pb2",
'FeedItemQualityDisapprovalReasonEnum':"google.ads.google_ads.v4.proto.enums.feed_item_quality_disapproval_reason_pb2",
'FeedItemStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_item_status_pb2",
'FeedItemTarget':"google.ads.google_ads.v4.proto.resources.feed_item_target_pb2",
'FeedItemTargetDeviceEnum':"google.ads.google_ads.v4.proto.enums.feed_item_target_device_pb2",
'FeedItemTargetErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_item_target_error_pb2",
'FeedItemTargetOperation':"google.ads.google_ads.v4.proto.services.feed_item_target_service_pb2",
'FeedItemTargetStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_item_target_status_pb2",
'FeedItemTargetTypeEnum':"google.ads.google_ads.v4.proto.enums.feed_item_target_type_pb2",
'FeedItemValidationError':"google.ads.google_ads.v4.proto.resources.feed_item_pb2",
'FeedItemValidationErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_item_validation_error_pb2",
'FeedItemValidationStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_item_validation_status_pb2",
'FeedLinkStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_link_status_pb2",
'FeedMapping':"google.ads.google_ads.v4.proto.resources.feed_mapping_pb2",
'FeedMappingCriterionTypeEnum':"google.ads.google_ads.v4.proto.enums.feed_mapping_criterion_type_pb2",
'FeedMappingErrorEnum':"google.ads.google_ads.v4.proto.errors.feed_mapping_error_pb2",
'FeedMappingOperation':"google.ads.google_ads.v4.proto.services.feed_mapping_service_pb2",
'FeedMappingStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_mapping_status_pb2",
'FeedOperation':"google.ads.google_ads.v4.proto.services.feed_service_pb2",
'FeedOriginEnum':"google.ads.google_ads.v4.proto.enums.feed_origin_pb2",
'FeedPlaceholderView':"google.ads.google_ads.v4.proto.resources.feed_placeholder_view_pb2",
'FeedStatusEnum':"google.ads.google_ads.v4.proto.enums.feed_status_pb2",
'FieldErrorEnum':"google.ads.google_ads.v4.proto.errors.field_error_pb2",
'FieldMaskErrorEnum':"google.ads.google_ads.v4.proto.errors.field_mask_error_pb2",
'FinalAppUrl':"google.ads.google_ads.v4.proto.common.final_app_url_pb2",
'FlightPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.flight_placeholder_field_pb2",
'Forecast':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ForecastMetrics':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'FrequencyCap':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'FrequencyCapEntry':"google.ads.google_ads.v4.proto.common.frequency_cap_pb2",
'FrequencyCapEventTypeEnum':"google.ads.google_ads.v4.proto.enums.frequency_cap_event_type_pb2",
'FrequencyCapKey':"google.ads.google_ads.v4.proto.common.frequency_cap_pb2",
'FrequencyCapLevelEnum':"google.ads.google_ads.v4.proto.enums.frequency_cap_level_pb2",
'FrequencyCapTimeUnitEnum':"google.ads.google_ads.v4.proto.enums.frequency_cap_time_unit_pb2",
'FunctionErrorEnum':"google.ads.google_ads.v4.proto.errors.function_error_pb2",
'FunctionParsingErrorEnum':"google.ads.google_ads.v4.proto.errors.function_parsing_error_pb2",
'GclidDateTimePair':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'GenderInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'GenderTypeEnum':"google.ads.google_ads.v4.proto.enums.gender_type_pb2",
'GenderView':"google.ads.google_ads.v4.proto.resources.gender_view_pb2",
'GenerateForecastCurveRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateForecastCurveResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateForecastMetricsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateForecastMetricsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateHistoricalMetricsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateHistoricalMetricsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GenerateKeywordIdeaResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'GenerateKeywordIdeaResult':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'GenerateKeywordIdeasRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'GenerateProductMixIdeasRequest':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'GenerateProductMixIdeasResponse':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'GenerateReachForecastRequest':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'GenerateReachForecastResponse':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'GeoPointInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'GeoTargetConstant':"google.ads.google_ads.v4.proto.resources.geo_target_constant_pb2",
'GeoTargetConstantStatusEnum':"google.ads.google_ads.v4.proto.enums.geo_target_constant_status_pb2",
'GeoTargetConstantSuggestion':"google.ads.google_ads.v4.proto.services.geo_target_constant_service_pb2",
'GeoTargetConstantSuggestionErrorEnum':"google.ads.google_ads.v4.proto.errors.geo_target_constant_suggestion_error_pb2",
'GeoTargetingRestrictionEnum':"google.ads.google_ads.v4.proto.enums.geo_targeting_restriction_pb2",
'GeoTargetingTypeEnum':"google.ads.google_ads.v4.proto.enums.geo_targeting_type_pb2",
'GeographicView':"google.ads.google_ads.v4.proto.resources.geographic_view_pb2",
'GetAccountBudgetProposalRequest':"google.ads.google_ads.v4.proto.services.account_budget_proposal_service_pb2",
'GetAccountBudgetRequest':"google.ads.google_ads.v4.proto.services.account_budget_service_pb2",
'GetAccountLinkRequest':"google.ads.google_ads.v4.proto.services.account_link_service_pb2",
'GetAdGroupAdAssetViewRequest':"google.ads.google_ads.v4.proto.services.ad_group_ad_asset_view_service_pb2",
'GetAdGroupAdLabelRequest':"google.ads.google_ads.v4.proto.services.ad_group_ad_label_service_pb2",
'GetAdGroupAdRequest':"google.ads.google_ads.v4.proto.services.ad_group_ad_service_pb2",
'GetAdGroupAudienceViewRequest':"google.ads.google_ads.v4.proto.services.ad_group_audience_view_service_pb2",
'GetAdGroupBidModifierRequest':"google.ads.google_ads.v4.proto.services.ad_group_bid_modifier_service_pb2",
'GetAdGroupCriterionLabelRequest':"google.ads.google_ads.v4.proto.services.ad_group_criterion_label_service_pb2",
'GetAdGroupCriterionRequest':"google.ads.google_ads.v4.proto.services.ad_group_criterion_service_pb2",
'GetAdGroupCriterionSimulationRequest':"google.ads.google_ads.v4.proto.services.ad_group_criterion_simulation_service_pb2",
'GetAdGroupExtensionSettingRequest':"google.ads.google_ads.v4.proto.services.ad_group_extension_setting_service_pb2",
'GetAdGroupFeedRequest':"google.ads.google_ads.v4.proto.services.ad_group_feed_service_pb2",
'GetAdGroupLabelRequest':"google.ads.google_ads.v4.proto.services.ad_group_label_service_pb2",
'GetAdGroupRequest':"google.ads.google_ads.v4.proto.services.ad_group_service_pb2",
'GetAdGroupSimulationRequest':"google.ads.google_ads.v4.proto.services.ad_group_simulation_service_pb2",
'GetAdParameterRequest':"google.ads.google_ads.v4.proto.services.ad_parameter_service_pb2",
'GetAdRequest':"google.ads.google_ads.v4.proto.services.ad_service_pb2",
'GetAdScheduleViewRequest':"google.ads.google_ads.v4.proto.services.ad_schedule_view_service_pb2",
'GetAgeRangeViewRequest':"google.ads.google_ads.v4.proto.services.age_range_view_service_pb2",
'GetAssetRequest':"google.ads.google_ads.v4.proto.services.asset_service_pb2",
'GetBatchJobRequest':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'GetBiddingStrategyRequest':"google.ads.google_ads.v4.proto.services.bidding_strategy_service_pb2",
'GetBillingSetupRequest':"google.ads.google_ads.v4.proto.services.billing_setup_service_pb2",
'GetCampaignAudienceViewRequest':"google.ads.google_ads.v4.proto.services.campaign_audience_view_service_pb2",
'GetCampaignBidModifierRequest':"google.ads.google_ads.v4.proto.services.campaign_bid_modifier_service_pb2",
'GetCampaignBudgetRequest':"google.ads.google_ads.v4.proto.services.campaign_budget_service_pb2",
'GetCampaignCriterionRequest':"google.ads.google_ads.v4.proto.services.campaign_criterion_service_pb2",
'GetCampaignCriterionSimulationRequest':"google.ads.google_ads.v4.proto.services.campaign_criterion_simulation_service_pb2",
'GetCampaignDraftRequest':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'GetCampaignExperimentRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'GetCampaignExtensionSettingRequest':"google.ads.google_ads.v4.proto.services.campaign_extension_setting_service_pb2",
'GetCampaignFeedRequest':"google.ads.google_ads.v4.proto.services.campaign_feed_service_pb2",
'GetCampaignLabelRequest':"google.ads.google_ads.v4.proto.services.campaign_label_service_pb2",
'GetCampaignRequest':"google.ads.google_ads.v4.proto.services.campaign_service_pb2",
'GetCampaignSharedSetRequest':"google.ads.google_ads.v4.proto.services.campaign_shared_set_service_pb2",
'GetCarrierConstantRequest':"google.ads.google_ads.v4.proto.services.carrier_constant_service_pb2",
'GetChangeStatusRequest':"google.ads.google_ads.v4.proto.services.change_status_service_pb2",
'GetClickViewRequest':"google.ads.google_ads.v4.proto.services.click_view_service_pb2",
'GetConversionActionRequest':"google.ads.google_ads.v4.proto.services.conversion_action_service_pb2",
'GetCurrencyConstantRequest':"google.ads.google_ads.v4.proto.services.currency_constant_service_pb2",
'GetCustomInterestRequest':"google.ads.google_ads.v4.proto.services.custom_interest_service_pb2",
'GetCustomerClientLinkRequest':"google.ads.google_ads.v4.proto.services.customer_client_link_service_pb2",
'GetCustomerClientRequest':"google.ads.google_ads.v4.proto.services.customer_client_service_pb2",
'GetCustomerExtensionSettingRequest':"google.ads.google_ads.v4.proto.services.customer_extension_setting_service_pb2",
'GetCustomerFeedRequest':"google.ads.google_ads.v4.proto.services.customer_feed_service_pb2",
'GetCustomerLabelRequest':"google.ads.google_ads.v4.proto.services.customer_label_service_pb2",
'GetCustomerManagerLinkRequest':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'GetCustomerNegativeCriterionRequest':"google.ads.google_ads.v4.proto.services.customer_negative_criterion_service_pb2",
'GetCustomerRequest':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'GetDetailPlacementViewRequest':"google.ads.google_ads.v4.proto.services.detail_placement_view_service_pb2",
'GetDisplayKeywordViewRequest':"google.ads.google_ads.v4.proto.services.display_keyword_view_service_pb2",
'GetDistanceViewRequest':"google.ads.google_ads.v4.proto.services.distance_view_service_pb2",
'GetDomainCategoryRequest':"google.ads.google_ads.v4.proto.services.domain_category_service_pb2",
'GetDynamicSearchAdsSearchTermViewRequest':"google.ads.google_ads.v4.proto.services.dynamic_search_ads_search_term_view_service_pb2",
'GetExpandedLandingPageViewRequest':"google.ads.google_ads.v4.proto.services.expanded_landing_page_view_service_pb2",
'GetExtensionFeedItemRequest':"google.ads.google_ads.v4.proto.services.extension_feed_item_service_pb2",
'GetFeedItemRequest':"google.ads.google_ads.v4.proto.services.feed_item_service_pb2",
'GetFeedItemTargetRequest':"google.ads.google_ads.v4.proto.services.feed_item_target_service_pb2",
'GetFeedMappingRequest':"google.ads.google_ads.v4.proto.services.feed_mapping_service_pb2",
'GetFeedPlaceholderViewRequest':"google.ads.google_ads.v4.proto.services.feed_placeholder_view_service_pb2",
'GetFeedRequest':"google.ads.google_ads.v4.proto.services.feed_service_pb2",
'GetGenderViewRequest':"google.ads.google_ads.v4.proto.services.gender_view_service_pb2",
'GetGeoTargetConstantRequest':"google.ads.google_ads.v4.proto.services.geo_target_constant_service_pb2",
'GetGeographicViewRequest':"google.ads.google_ads.v4.proto.services.geographic_view_service_pb2",
'GetGoogleAdsFieldRequest':"google.ads.google_ads.v4.proto.services.google_ads_field_service_pb2",
'GetGroupPlacementViewRequest':"google.ads.google_ads.v4.proto.services.group_placement_view_service_pb2",
'GetHotelGroupViewRequest':"google.ads.google_ads.v4.proto.services.hotel_group_view_service_pb2",
'GetHotelPerformanceViewRequest':"google.ads.google_ads.v4.proto.services.hotel_performance_view_service_pb2",
'GetIncomeRangeViewRequest':"google.ads.google_ads.v4.proto.services.income_range_view_service_pb2",
'GetKeywordPlanAdGroupKeywordRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_keyword_service_pb2",
'GetKeywordPlanAdGroupRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_service_pb2",
'GetKeywordPlanCampaignKeywordRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_keyword_service_pb2",
'GetKeywordPlanCampaignRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_service_pb2",
'GetKeywordPlanRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'GetKeywordViewRequest':"google.ads.google_ads.v4.proto.services.keyword_view_service_pb2",
'GetLabelRequest':"google.ads.google_ads.v4.proto.services.label_service_pb2",
'GetLandingPageViewRequest':"google.ads.google_ads.v4.proto.services.landing_page_view_service_pb2",
'GetLanguageConstantRequest':"google.ads.google_ads.v4.proto.services.language_constant_service_pb2",
'GetLocationViewRequest':"google.ads.google_ads.v4.proto.services.location_view_service_pb2",
'GetManagedPlacementViewRequest':"google.ads.google_ads.v4.proto.services.managed_placement_view_service_pb2",
'GetMediaFileRequest':"google.ads.google_ads.v4.proto.services.media_file_service_pb2",
'GetMerchantCenterLinkRequest':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'GetMobileAppCategoryConstantRequest':"google.ads.google_ads.v4.proto.services.mobile_app_category_constant_service_pb2",
'GetMobileDeviceConstantRequest':"google.ads.google_ads.v4.proto.services.mobile_device_constant_service_pb2",
'GetOfflineUserDataJobRequest':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'GetOperatingSystemVersionConstantRequest':"google.ads.google_ads.v4.proto.services.operating_system_version_constant_service_pb2",
'GetPaidOrganicSearchTermViewRequest':"google.ads.google_ads.v4.proto.services.paid_organic_search_term_view_service_pb2",
'GetParentalStatusViewRequest':"google.ads.google_ads.v4.proto.services.parental_status_view_service_pb2",
'GetProductBiddingCategoryConstantRequest':"google.ads.google_ads.v4.proto.services.product_bidding_category_constant_service_pb2",
'GetProductGroupViewRequest':"google.ads.google_ads.v4.proto.services.product_group_view_service_pb2",
'GetRecommendationRequest':"google.ads.google_ads.v4.proto.services.recommendation_service_pb2",
'GetRemarketingActionRequest':"google.ads.google_ads.v4.proto.services.remarketing_action_service_pb2",
'GetSearchTermViewRequest':"google.ads.google_ads.v4.proto.services.search_term_view_service_pb2",
'GetSharedCriterionRequest':"google.ads.google_ads.v4.proto.services.shared_criterion_service_pb2",
'GetSharedSetRequest':"google.ads.google_ads.v4.proto.services.shared_set_service_pb2",
'GetShoppingPerformanceViewRequest':"google.ads.google_ads.v4.proto.services.shopping_performance_view_service_pb2",
'GetThirdPartyAppAnalyticsLinkRequest':"google.ads.google_ads.v4.proto.services.third_party_app_analytics_link_service_pb2",
'GetTopicConstantRequest':"google.ads.google_ads.v4.proto.services.topic_constant_service_pb2",
'GetTopicViewRequest':"google.ads.google_ads.v4.proto.services.topic_view_service_pb2",
'GetUserInterestRequest':"google.ads.google_ads.v4.proto.services.user_interest_service_pb2",
'GetUserListRequest':"google.ads.google_ads.v4.proto.services.user_list_service_pb2",
'GetUserLocationViewRequest':"google.ads.google_ads.v4.proto.services.user_location_view_service_pb2",
'GetVideoRequest':"google.ads.google_ads.v4.proto.services.video_service_pb2",
'GmailAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'GmailTeaser':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'GoogleAdsError':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'GoogleAdsFailure':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'GoogleAdsField':"google.ads.google_ads.v4.proto.resources.google_ads_field_pb2",
'GoogleAdsFieldCategoryEnum':"google.ads.google_ads.v4.proto.enums.google_ads_field_category_pb2",
'GoogleAdsFieldDataTypeEnum':"google.ads.google_ads.v4.proto.enums.google_ads_field_data_type_pb2",
'GoogleAdsRow':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'GraduateCampaignExperimentRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'GraduateCampaignExperimentResponse':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'GroupPlacementView':"google.ads.google_ads.v4.proto.resources.group_placement_view_pb2",
'HeaderErrorEnum':"google.ads.google_ads.v4.proto.errors.header_error_pb2",
'HotelAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'HotelAdvanceBookingWindowInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelCalloutFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'HotelCheckInDayInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelCityInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelClassInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelCountryRegionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelDateSelectionTypeEnum':"google.ads.google_ads.v4.proto.enums.hotel_date_selection_type_pb2",
'HotelDateSelectionTypeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelGroupView':"google.ads.google_ads.v4.proto.resources.hotel_group_view_pb2",
'HotelIdInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelLengthOfStayInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'HotelPerformanceView':"google.ads.google_ads.v4.proto.resources.hotel_performance_view_pb2",
'HotelPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.hotel_placeholder_field_pb2",
'HotelPriceBucketEnum':"google.ads.google_ads.v4.proto.enums.hotel_price_bucket_pb2",
'HotelRateTypeEnum':"google.ads.google_ads.v4.proto.enums.hotel_rate_type_pb2",
'HotelStateInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'IdErrorEnum':"google.ads.google_ads.v4.proto.errors.id_error_pb2",
'ImageAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ImageAsset':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'ImageDimension':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'ImageErrorEnum':"google.ads.google_ads.v4.proto.errors.image_error_pb2",
'IncomeRangeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'IncomeRangeTypeEnum':"google.ads.google_ads.v4.proto.enums.income_range_type_pb2",
'IncomeRangeView':"google.ads.google_ads.v4.proto.resources.income_range_view_pb2",
'InteractionEventTypeEnum':"google.ads.google_ads.v4.proto.enums.interaction_event_type_pb2",
'InteractionTypeEnum':"google.ads.google_ads.v4.proto.enums.interaction_type_pb2",
'InteractionTypeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'InternalErrorEnum':"google.ads.google_ads.v4.proto.errors.internal_error_pb2",
'Invoice':"google.ads.google_ads.v4.proto.resources.invoice_pb2",
'InvoiceErrorEnum':"google.ads.google_ads.v4.proto.errors.invoice_error_pb2",
'InvoiceTypeEnum':"google.ads.google_ads.v4.proto.enums.invoice_type_pb2",
'IpBlockInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'JobPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.job_placeholder_field_pb2",
'Keyword':"google.ads.google_ads.v4.proto.common.segments_pb2",
'KeywordAndUrlSeed':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'KeywordInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'KeywordMatchTypeEnum':"google.ads.google_ads.v4.proto.enums.keyword_match_type_pb2",
'KeywordPlan':"google.ads.google_ads.v4.proto.resources.keyword_plan_pb2",
'KeywordPlanAdGroup':"google.ads.google_ads.v4.proto.resources.keyword_plan_ad_group_pb2",
'KeywordPlanAdGroupErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_ad_group_error_pb2",
'KeywordPlanAdGroupForecast':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanAdGroupKeyword':"google.ads.google_ads.v4.proto.resources.keyword_plan_ad_group_keyword_pb2",
'KeywordPlanAdGroupKeywordErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_ad_group_keyword_error_pb2",
'KeywordPlanAdGroupKeywordOperation':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_keyword_service_pb2",
'KeywordPlanAdGroupOperation':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_service_pb2",
'KeywordPlanCampaign':"google.ads.google_ads.v4.proto.resources.keyword_plan_campaign_pb2",
'KeywordPlanCampaignErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_campaign_error_pb2",
'KeywordPlanCampaignForecast':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanCampaignForecastCurve':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanCampaignKeyword':"google.ads.google_ads.v4.proto.resources.keyword_plan_campaign_keyword_pb2",
'KeywordPlanCampaignKeywordErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_campaign_keyword_error_pb2",
'KeywordPlanCampaignKeywordOperation':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_keyword_service_pb2",
'KeywordPlanCampaignOperation':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_service_pb2",
'KeywordPlanCompetitionLevelEnum':"google.ads.google_ads.v4.proto.enums.keyword_plan_competition_level_pb2",
'KeywordPlanErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_error_pb2",
'KeywordPlanForecastIntervalEnum':"google.ads.google_ads.v4.proto.enums.keyword_plan_forecast_interval_pb2",
'KeywordPlanForecastPeriod':"google.ads.google_ads.v4.proto.resources.keyword_plan_pb2",
'KeywordPlanGeoTarget':"google.ads.google_ads.v4.proto.resources.keyword_plan_campaign_pb2",
'KeywordPlanHistoricalMetrics':"google.ads.google_ads.v4.proto.common.keyword_plan_common_pb2",
'KeywordPlanIdeaErrorEnum':"google.ads.google_ads.v4.proto.errors.keyword_plan_idea_error_pb2",
'KeywordPlanKeywordForecast':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanKeywordHistoricalMetrics':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanMaxCpcBidForecast':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanMaxCpcBidForecastCurve':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordPlanNetworkEnum':"google.ads.google_ads.v4.proto.enums.keyword_plan_network_pb2",
'KeywordPlanOperation':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'KeywordSeed':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'KeywordView':"google.ads.google_ads.v4.proto.resources.keyword_view_pb2",
'Label':"google.ads.google_ads.v4.proto.resources.label_pb2",
'LabelErrorEnum':"google.ads.google_ads.v4.proto.errors.label_error_pb2",
'LabelOperation':"google.ads.google_ads.v4.proto.services.label_service_pb2",
'LabelStatusEnum':"google.ads.google_ads.v4.proto.enums.label_status_pb2",
'LandingPageView':"google.ads.google_ads.v4.proto.resources.landing_page_view_pb2",
'LanguageCodeErrorEnum':"google.ads.google_ads.v4.proto.errors.language_code_error_pb2",
'LanguageConstant':"google.ads.google_ads.v4.proto.resources.language_constant_pb2",
'LanguageInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'LegacyAppInstallAdAppStoreEnum':"google.ads.google_ads.v4.proto.enums.legacy_app_install_ad_app_store_pb2",
'LegacyAppInstallAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'LegacyResponsiveDisplayAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'LinkedAccountTypeEnum':"google.ads.google_ads.v4.proto.enums.linked_account_type_pb2",
'ListAccessibleCustomersRequest':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'ListAccessibleCustomersResponse':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'ListBatchJobResultsRequest':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'ListBatchJobResultsResponse':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'ListCampaignDraftAsyncErrorsRequest':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'ListCampaignDraftAsyncErrorsResponse':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'ListCampaignExperimentAsyncErrorsRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'ListCampaignExperimentAsyncErrorsResponse':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'ListInvoicesRequest':"google.ads.google_ads.v4.proto.services.invoice_service_pb2",
'ListInvoicesResponse':"google.ads.google_ads.v4.proto.services.invoice_service_pb2",
'ListMerchantCenterLinksRequest':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'ListMerchantCenterLinksResponse':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'ListOperationErrorEnum':"google.ads.google_ads.v4.proto.errors.list_operation_error_pb2",
'ListPaymentsAccountsRequest':"google.ads.google_ads.v4.proto.services.payments_account_service_pb2",
'ListPaymentsAccountsResponse':"google.ads.google_ads.v4.proto.services.payments_account_service_pb2",
'ListPlannableLocationsRequest':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ListPlannableLocationsResponse':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ListPlannableProductsRequest':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ListPlannableProductsResponse':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ListingDimensionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ListingGroupInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ListingGroupTypeEnum':"google.ads.google_ads.v4.proto.enums.listing_group_type_pb2",
'ListingScopeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'LocalAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'LocalPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.local_placeholder_field_pb2",
'LocationExtensionTargetingCriterionFieldEnum':"google.ads.google_ads.v4.proto.enums.location_extension_targeting_criterion_field_pb2",
'LocationFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'LocationGroupInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'LocationGroupRadiusUnitsEnum':"google.ads.google_ads.v4.proto.enums.location_group_radius_units_pb2",
'LocationInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'LocationPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.location_placeholder_field_pb2",
'LocationSourceTypeEnum':"google.ads.google_ads.v4.proto.enums.location_source_type_pb2",
'LocationView':"google.ads.google_ads.v4.proto.resources.location_view_pb2",
'LogicalUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'LogicalUserListOperandInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'ManagedPlacementView':"google.ads.google_ads.v4.proto.resources.managed_placement_view_pb2",
'ManagerLinkErrorEnum':"google.ads.google_ads.v4.proto.errors.manager_link_error_pb2",
'ManagerLinkStatusEnum':"google.ads.google_ads.v4.proto.enums.manager_link_status_pb2",
'ManualCpc':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'ManualCpm':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'ManualCpv':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'MatchingFunction':"google.ads.google_ads.v4.proto.common.matching_function_pb2",
'MatchingFunctionContextTypeEnum':"google.ads.google_ads.v4.proto.enums.matching_function_context_type_pb2",
'MatchingFunctionOperatorEnum':"google.ads.google_ads.v4.proto.enums.matching_function_operator_pb2",
'MaximizeConversionValue':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'MaximizeConversions':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'MediaAudio':"google.ads.google_ads.v4.proto.resources.media_file_pb2",
'MediaBundle':"google.ads.google_ads.v4.proto.resources.media_file_pb2",
'MediaBundleAsset':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'MediaBundleErrorEnum':"google.ads.google_ads.v4.proto.errors.media_bundle_error_pb2",
'MediaFile':"google.ads.google_ads.v4.proto.resources.media_file_pb2",
'MediaFileErrorEnum':"google.ads.google_ads.v4.proto.errors.media_file_error_pb2",
'MediaFileOperation':"google.ads.google_ads.v4.proto.services.media_file_service_pb2",
'MediaImage':"google.ads.google_ads.v4.proto.resources.media_file_pb2",
'MediaTypeEnum':"google.ads.google_ads.v4.proto.enums.media_type_pb2",
'MediaUploadErrorEnum':"google.ads.google_ads.v4.proto.errors.media_upload_error_pb2",
'MediaVideo':"google.ads.google_ads.v4.proto.resources.media_file_pb2",
'MerchantCenterLink':"google.ads.google_ads.v4.proto.resources.merchant_center_link_pb2",
'MerchantCenterLinkOperation':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'MerchantCenterLinkStatusEnum':"google.ads.google_ads.v4.proto.enums.merchant_center_link_status_pb2",
'MessagePlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.message_placeholder_field_pb2",
'Metrics':"google.ads.google_ads.v4.proto.common.metrics_pb2",
'MimeTypeEnum':"google.ads.google_ads.v4.proto.enums.mime_type_pb2",
'MinuteOfHourEnum':"google.ads.google_ads.v4.proto.enums.minute_of_hour_pb2",
'MobileAppCategoryConstant':"google.ads.google_ads.v4.proto.resources.mobile_app_category_constant_pb2",
'MobileAppCategoryInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'MobileAppVendorEnum':"google.ads.google_ads.v4.proto.enums.mobile_app_vendor_pb2",
'MobileApplicationInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'MobileDeviceConstant':"google.ads.google_ads.v4.proto.resources.mobile_device_constant_pb2",
'MobileDeviceInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'MobileDeviceTypeEnum':"google.ads.google_ads.v4.proto.enums.mobile_device_type_pb2",
'Money':"google.ads.google_ads.v4.proto.common.feed_common_pb2",
'MonthOfYearEnum':"google.ads.google_ads.v4.proto.enums.month_of_year_pb2",
'MonthlySearchVolume':"google.ads.google_ads.v4.proto.common.keyword_plan_common_pb2",
'MoveManagerLinkRequest':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'MoveManagerLinkResponse':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'MultiplierErrorEnum':"google.ads.google_ads.v4.proto.errors.multiplier_error_pb2",
'MutateAccountBudgetProposalRequest':"google.ads.google_ads.v4.proto.services.account_budget_proposal_service_pb2",
'MutateAccountBudgetProposalResponse':"google.ads.google_ads.v4.proto.services.account_budget_proposal_service_pb2",
'MutateAccountBudgetProposalResult':"google.ads.google_ads.v4.proto.services.account_budget_proposal_service_pb2",
'MutateAccountLinkRequest':"google.ads.google_ads.v4.proto.services.account_link_service_pb2",
'MutateAccountLinkResponse':"google.ads.google_ads.v4.proto.services.account_link_service_pb2",
'MutateAccountLinkResult':"google.ads.google_ads.v4.proto.services.account_link_service_pb2",
'MutateAdGroupAdLabelResult':"google.ads.google_ads.v4.proto.services.ad_group_ad_label_service_pb2",
'MutateAdGroupAdLabelsRequest':"google.ads.google_ads.v4.proto.services.ad_group_ad_label_service_pb2",
'MutateAdGroupAdLabelsResponse':"google.ads.google_ads.v4.proto.services.ad_group_ad_label_service_pb2",
'MutateAdGroupAdResult':"google.ads.google_ads.v4.proto.services.ad_group_ad_service_pb2",
'MutateAdGroupAdsRequest':"google.ads.google_ads.v4.proto.services.ad_group_ad_service_pb2",
'MutateAdGroupAdsResponse':"google.ads.google_ads.v4.proto.services.ad_group_ad_service_pb2",
'MutateAdGroupBidModifierResult':"google.ads.google_ads.v4.proto.services.ad_group_bid_modifier_service_pb2",
'MutateAdGroupBidModifiersRequest':"google.ads.google_ads.v4.proto.services.ad_group_bid_modifier_service_pb2",
'MutateAdGroupBidModifiersResponse':"google.ads.google_ads.v4.proto.services.ad_group_bid_modifier_service_pb2",
'MutateAdGroupCriteriaRequest':"google.ads.google_ads.v4.proto.services.ad_group_criterion_service_pb2",
'MutateAdGroupCriteriaResponse':"google.ads.google_ads.v4.proto.services.ad_group_criterion_service_pb2",
'MutateAdGroupCriterionLabelResult':"google.ads.google_ads.v4.proto.services.ad_group_criterion_label_service_pb2",
'MutateAdGroupCriterionLabelsRequest':"google.ads.google_ads.v4.proto.services.ad_group_criterion_label_service_pb2",
'MutateAdGroupCriterionLabelsResponse':"google.ads.google_ads.v4.proto.services.ad_group_criterion_label_service_pb2",
'MutateAdGroupCriterionResult':"google.ads.google_ads.v4.proto.services.ad_group_criterion_service_pb2",
'MutateAdGroupExtensionSettingResult':"google.ads.google_ads.v4.proto.services.ad_group_extension_setting_service_pb2",
'MutateAdGroupExtensionSettingsRequest':"google.ads.google_ads.v4.proto.services.ad_group_extension_setting_service_pb2",
'MutateAdGroupExtensionSettingsResponse':"google.ads.google_ads.v4.proto.services.ad_group_extension_setting_service_pb2",
'MutateAdGroupFeedResult':"google.ads.google_ads.v4.proto.services.ad_group_feed_service_pb2",
'MutateAdGroupFeedsRequest':"google.ads.google_ads.v4.proto.services.ad_group_feed_service_pb2",
'MutateAdGroupFeedsResponse':"google.ads.google_ads.v4.proto.services.ad_group_feed_service_pb2",
'MutateAdGroupLabelResult':"google.ads.google_ads.v4.proto.services.ad_group_label_service_pb2",
'MutateAdGroupLabelsRequest':"google.ads.google_ads.v4.proto.services.ad_group_label_service_pb2",
'MutateAdGroupLabelsResponse':"google.ads.google_ads.v4.proto.services.ad_group_label_service_pb2",
'MutateAdGroupResult':"google.ads.google_ads.v4.proto.services.ad_group_service_pb2",
'MutateAdGroupsRequest':"google.ads.google_ads.v4.proto.services.ad_group_service_pb2",
'MutateAdGroupsResponse':"google.ads.google_ads.v4.proto.services.ad_group_service_pb2",
'MutateAdParameterResult':"google.ads.google_ads.v4.proto.services.ad_parameter_service_pb2",
'MutateAdParametersRequest':"google.ads.google_ads.v4.proto.services.ad_parameter_service_pb2",
'MutateAdParametersResponse':"google.ads.google_ads.v4.proto.services.ad_parameter_service_pb2",
'MutateAdResult':"google.ads.google_ads.v4.proto.services.ad_service_pb2",
'MutateAdsRequest':"google.ads.google_ads.v4.proto.services.ad_service_pb2",
'MutateAdsResponse':"google.ads.google_ads.v4.proto.services.ad_service_pb2",
'MutateAssetResult':"google.ads.google_ads.v4.proto.services.asset_service_pb2",
'MutateAssetsRequest':"google.ads.google_ads.v4.proto.services.asset_service_pb2",
'MutateAssetsResponse':"google.ads.google_ads.v4.proto.services.asset_service_pb2",
'MutateBatchJobRequest':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'MutateBatchJobResponse':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'MutateBatchJobResult':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'MutateBiddingStrategiesRequest':"google.ads.google_ads.v4.proto.services.bidding_strategy_service_pb2",
'MutateBiddingStrategiesResponse':"google.ads.google_ads.v4.proto.services.bidding_strategy_service_pb2",
'MutateBiddingStrategyResult':"google.ads.google_ads.v4.proto.services.bidding_strategy_service_pb2",
'MutateBillingSetupRequest':"google.ads.google_ads.v4.proto.services.billing_setup_service_pb2",
'MutateBillingSetupResponse':"google.ads.google_ads.v4.proto.services.billing_setup_service_pb2",
'MutateBillingSetupResult':"google.ads.google_ads.v4.proto.services.billing_setup_service_pb2",
'MutateCampaignBidModifierResult':"google.ads.google_ads.v4.proto.services.campaign_bid_modifier_service_pb2",
'MutateCampaignBidModifiersRequest':"google.ads.google_ads.v4.proto.services.campaign_bid_modifier_service_pb2",
'MutateCampaignBidModifiersResponse':"google.ads.google_ads.v4.proto.services.campaign_bid_modifier_service_pb2",
'MutateCampaignBudgetResult':"google.ads.google_ads.v4.proto.services.campaign_budget_service_pb2",
'MutateCampaignBudgetsRequest':"google.ads.google_ads.v4.proto.services.campaign_budget_service_pb2",
'MutateCampaignBudgetsResponse':"google.ads.google_ads.v4.proto.services.campaign_budget_service_pb2",
'MutateCampaignCriteriaRequest':"google.ads.google_ads.v4.proto.services.campaign_criterion_service_pb2",
'MutateCampaignCriteriaResponse':"google.ads.google_ads.v4.proto.services.campaign_criterion_service_pb2",
'MutateCampaignCriterionResult':"google.ads.google_ads.v4.proto.services.campaign_criterion_service_pb2",
'MutateCampaignDraftResult':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'MutateCampaignDraftsRequest':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'MutateCampaignDraftsResponse':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'MutateCampaignExperimentResult':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'MutateCampaignExperimentsRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'MutateCampaignExperimentsResponse':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'MutateCampaignExtensionSettingResult':"google.ads.google_ads.v4.proto.services.campaign_extension_setting_service_pb2",
'MutateCampaignExtensionSettingsRequest':"google.ads.google_ads.v4.proto.services.campaign_extension_setting_service_pb2",
'MutateCampaignExtensionSettingsResponse':"google.ads.google_ads.v4.proto.services.campaign_extension_setting_service_pb2",
'MutateCampaignFeedResult':"google.ads.google_ads.v4.proto.services.campaign_feed_service_pb2",
'MutateCampaignFeedsRequest':"google.ads.google_ads.v4.proto.services.campaign_feed_service_pb2",
'MutateCampaignFeedsResponse':"google.ads.google_ads.v4.proto.services.campaign_feed_service_pb2",
'MutateCampaignLabelResult':"google.ads.google_ads.v4.proto.services.campaign_label_service_pb2",
'MutateCampaignLabelsRequest':"google.ads.google_ads.v4.proto.services.campaign_label_service_pb2",
'MutateCampaignLabelsResponse':"google.ads.google_ads.v4.proto.services.campaign_label_service_pb2",
'MutateCampaignResult':"google.ads.google_ads.v4.proto.services.campaign_service_pb2",
'MutateCampaignSharedSetResult':"google.ads.google_ads.v4.proto.services.campaign_shared_set_service_pb2",
'MutateCampaignSharedSetsRequest':"google.ads.google_ads.v4.proto.services.campaign_shared_set_service_pb2",
'MutateCampaignSharedSetsResponse':"google.ads.google_ads.v4.proto.services.campaign_shared_set_service_pb2",
'MutateCampaignsRequest':"google.ads.google_ads.v4.proto.services.campaign_service_pb2",
'MutateCampaignsResponse':"google.ads.google_ads.v4.proto.services.campaign_service_pb2",
'MutateConversionActionResult':"google.ads.google_ads.v4.proto.services.conversion_action_service_pb2",
'MutateConversionActionsRequest':"google.ads.google_ads.v4.proto.services.conversion_action_service_pb2",
'MutateConversionActionsResponse':"google.ads.google_ads.v4.proto.services.conversion_action_service_pb2",
'MutateCustomInterestResult':"google.ads.google_ads.v4.proto.services.custom_interest_service_pb2",
'MutateCustomInterestsRequest':"google.ads.google_ads.v4.proto.services.custom_interest_service_pb2",
'MutateCustomInterestsResponse':"google.ads.google_ads.v4.proto.services.custom_interest_service_pb2",
'MutateCustomerClientLinkRequest':"google.ads.google_ads.v4.proto.services.customer_client_link_service_pb2",
'MutateCustomerClientLinkResponse':"google.ads.google_ads.v4.proto.services.customer_client_link_service_pb2",
'MutateCustomerClientLinkResult':"google.ads.google_ads.v4.proto.services.customer_client_link_service_pb2",
'MutateCustomerExtensionSettingResult':"google.ads.google_ads.v4.proto.services.customer_extension_setting_service_pb2",
'MutateCustomerExtensionSettingsRequest':"google.ads.google_ads.v4.proto.services.customer_extension_setting_service_pb2",
'MutateCustomerExtensionSettingsResponse':"google.ads.google_ads.v4.proto.services.customer_extension_setting_service_pb2",
'MutateCustomerFeedResult':"google.ads.google_ads.v4.proto.services.customer_feed_service_pb2",
'MutateCustomerFeedsRequest':"google.ads.google_ads.v4.proto.services.customer_feed_service_pb2",
'MutateCustomerFeedsResponse':"google.ads.google_ads.v4.proto.services.customer_feed_service_pb2",
'MutateCustomerLabelResult':"google.ads.google_ads.v4.proto.services.customer_label_service_pb2",
'MutateCustomerLabelsRequest':"google.ads.google_ads.v4.proto.services.customer_label_service_pb2",
'MutateCustomerLabelsResponse':"google.ads.google_ads.v4.proto.services.customer_label_service_pb2",
'MutateCustomerManagerLinkRequest':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'MutateCustomerManagerLinkResponse':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'MutateCustomerManagerLinkResult':"google.ads.google_ads.v4.proto.services.customer_manager_link_service_pb2",
'MutateCustomerNegativeCriteriaRequest':"google.ads.google_ads.v4.proto.services.customer_negative_criterion_service_pb2",
'MutateCustomerNegativeCriteriaResponse':"google.ads.google_ads.v4.proto.services.customer_negative_criterion_service_pb2",
'MutateCustomerNegativeCriteriaResult':"google.ads.google_ads.v4.proto.services.customer_negative_criterion_service_pb2",
'MutateCustomerRequest':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'MutateCustomerResponse':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'MutateCustomerResult':"google.ads.google_ads.v4.proto.services.customer_service_pb2",
'MutateErrorEnum':"google.ads.google_ads.v4.proto.errors.mutate_error_pb2",
'MutateExtensionFeedItemResult':"google.ads.google_ads.v4.proto.services.extension_feed_item_service_pb2",
'MutateExtensionFeedItemsRequest':"google.ads.google_ads.v4.proto.services.extension_feed_item_service_pb2",
'MutateExtensionFeedItemsResponse':"google.ads.google_ads.v4.proto.services.extension_feed_item_service_pb2",
'MutateFeedItemResult':"google.ads.google_ads.v4.proto.services.feed_item_service_pb2",
'MutateFeedItemTargetResult':"google.ads.google_ads.v4.proto.services.feed_item_target_service_pb2",
'MutateFeedItemTargetsRequest':"google.ads.google_ads.v4.proto.services.feed_item_target_service_pb2",
'MutateFeedItemTargetsResponse':"google.ads.google_ads.v4.proto.services.feed_item_target_service_pb2",
'MutateFeedItemsRequest':"google.ads.google_ads.v4.proto.services.feed_item_service_pb2",
'MutateFeedItemsResponse':"google.ads.google_ads.v4.proto.services.feed_item_service_pb2",
'MutateFeedMappingResult':"google.ads.google_ads.v4.proto.services.feed_mapping_service_pb2",
'MutateFeedMappingsRequest':"google.ads.google_ads.v4.proto.services.feed_mapping_service_pb2",
'MutateFeedMappingsResponse':"google.ads.google_ads.v4.proto.services.feed_mapping_service_pb2",
'MutateFeedResult':"google.ads.google_ads.v4.proto.services.feed_service_pb2",
'MutateFeedsRequest':"google.ads.google_ads.v4.proto.services.feed_service_pb2",
'MutateFeedsResponse':"google.ads.google_ads.v4.proto.services.feed_service_pb2",
'MutateGoogleAdsRequest':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'MutateGoogleAdsResponse':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'MutateKeywordPlanAdGroupKeywordResult':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_keyword_service_pb2",
'MutateKeywordPlanAdGroupKeywordsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_keyword_service_pb2",
'MutateKeywordPlanAdGroupKeywordsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_keyword_service_pb2",
'MutateKeywordPlanAdGroupResult':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_service_pb2",
'MutateKeywordPlanAdGroupsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_service_pb2",
'MutateKeywordPlanAdGroupsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_ad_group_service_pb2",
'MutateKeywordPlanCampaignKeywordResult':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_keyword_service_pb2",
'MutateKeywordPlanCampaignKeywordsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_keyword_service_pb2",
'MutateKeywordPlanCampaignKeywordsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_keyword_service_pb2",
'MutateKeywordPlanCampaignResult':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_service_pb2",
'MutateKeywordPlanCampaignsRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_service_pb2",
'MutateKeywordPlanCampaignsResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_campaign_service_pb2",
'MutateKeywordPlansRequest':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'MutateKeywordPlansResponse':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'MutateKeywordPlansResult':"google.ads.google_ads.v4.proto.services.keyword_plan_service_pb2",
'MutateLabelResult':"google.ads.google_ads.v4.proto.services.label_service_pb2",
'MutateLabelsRequest':"google.ads.google_ads.v4.proto.services.label_service_pb2",
'MutateLabelsResponse':"google.ads.google_ads.v4.proto.services.label_service_pb2",
'MutateMediaFileResult':"google.ads.google_ads.v4.proto.services.media_file_service_pb2",
'MutateMediaFilesRequest':"google.ads.google_ads.v4.proto.services.media_file_service_pb2",
'MutateMediaFilesResponse':"google.ads.google_ads.v4.proto.services.media_file_service_pb2",
'MutateMerchantCenterLinkRequest':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'MutateMerchantCenterLinkResponse':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'MutateMerchantCenterLinkResult':"google.ads.google_ads.v4.proto.services.merchant_center_link_service_pb2",
'MutateOperation':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'MutateOperationResponse':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'MutateRemarketingActionResult':"google.ads.google_ads.v4.proto.services.remarketing_action_service_pb2",
'MutateRemarketingActionsRequest':"google.ads.google_ads.v4.proto.services.remarketing_action_service_pb2",
'MutateRemarketingActionsResponse':"google.ads.google_ads.v4.proto.services.remarketing_action_service_pb2",
'MutateSharedCriteriaRequest':"google.ads.google_ads.v4.proto.services.shared_criterion_service_pb2",
'MutateSharedCriteriaResponse':"google.ads.google_ads.v4.proto.services.shared_criterion_service_pb2",
'MutateSharedCriterionResult':"google.ads.google_ads.v4.proto.services.shared_criterion_service_pb2",
'MutateSharedSetResult':"google.ads.google_ads.v4.proto.services.shared_set_service_pb2",
'MutateSharedSetsRequest':"google.ads.google_ads.v4.proto.services.shared_set_service_pb2",
'MutateSharedSetsResponse':"google.ads.google_ads.v4.proto.services.shared_set_service_pb2",
'MutateUserListResult':"google.ads.google_ads.v4.proto.services.user_list_service_pb2",
'MutateUserListsRequest':"google.ads.google_ads.v4.proto.services.user_list_service_pb2",
'MutateUserListsResponse':"google.ads.google_ads.v4.proto.services.user_list_service_pb2",
'NegativeGeoTargetTypeEnum':"google.ads.google_ads.v4.proto.enums.negative_geo_target_type_pb2",
'NewResourceCreationErrorEnum':"google.ads.google_ads.v4.proto.errors.new_resource_creation_error_pb2",
'NotEmptyErrorEnum':"google.ads.google_ads.v4.proto.errors.not_empty_error_pb2",
'NotWhitelistedErrorEnum':"google.ads.google_ads.v4.proto.errors.not_whitelisted_error_pb2",
'NullErrorEnum':"google.ads.google_ads.v4.proto.errors.null_error_pb2",
'OfflineUserAddressInfo':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'OfflineUserDataJob':"google.ads.google_ads.v4.proto.resources.offline_user_data_job_pb2",
'OfflineUserDataJobErrorEnum':"google.ads.google_ads.v4.proto.errors.offline_user_data_job_error_pb2",
'OfflineUserDataJobFailureReasonEnum':"google.ads.google_ads.v4.proto.enums.offline_user_data_job_failure_reason_pb2",
'OfflineUserDataJobOperation':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'OfflineUserDataJobStatusEnum':"google.ads.google_ads.v4.proto.enums.offline_user_data_job_status_pb2",
'OfflineUserDataJobTypeEnum':"google.ads.google_ads.v4.proto.enums.offline_user_data_job_type_pb2",
'OnTargetAudienceMetrics':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'Operand':"google.ads.google_ads.v4.proto.common.matching_function_pb2",
'OperatingSystemVersionConstant':"google.ads.google_ads.v4.proto.resources.operating_system_version_constant_pb2",
'OperatingSystemVersionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'OperatingSystemVersionOperatorTypeEnum':"google.ads.google_ads.v4.proto.enums.operating_system_version_operator_type_pb2",
'OperationAccessDeniedErrorEnum':"google.ads.google_ads.v4.proto.errors.operation_access_denied_error_pb2",
'OperatorErrorEnum':"google.ads.google_ads.v4.proto.errors.operator_error_pb2",
'OptimizationGoalTypeEnum':"google.ads.google_ads.v4.proto.enums.optimization_goal_type_pb2",
'PaidOrganicSearchTermView':"google.ads.google_ads.v4.proto.resources.paid_organic_search_term_view_pb2",
'ParentalStatusInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ParentalStatusTypeEnum':"google.ads.google_ads.v4.proto.enums.parental_status_type_pb2",
'ParentalStatusView':"google.ads.google_ads.v4.proto.resources.parental_status_view_pb2",
'PartialFailureErrorEnum':"google.ads.google_ads.v4.proto.errors.partial_failure_error_pb2",
'PaymentModeEnum':"google.ads.google_ads.v4.proto.enums.payment_mode_pb2",
'PaymentsAccount':"google.ads.google_ads.v4.proto.resources.payments_account_pb2",
'PaymentsAccountErrorEnum':"google.ads.google_ads.v4.proto.errors.payments_account_error_pb2",
'PercentCpc':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'PlaceholderTypeEnum':"google.ads.google_ads.v4.proto.enums.placeholder_type_pb2",
'PlacementInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'PlacementTypeEnum':"google.ads.google_ads.v4.proto.enums.placement_type_pb2",
'PlannableLocation':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'PlannableTargeting':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'PlannedProduct':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'PolicyApprovalStatusEnum':"google.ads.google_ads.v4.proto.enums.policy_approval_status_pb2",
'PolicyFindingDetails':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'PolicyFindingErrorEnum':"google.ads.google_ads.v4.proto.errors.policy_finding_error_pb2",
'PolicyReviewStatusEnum':"google.ads.google_ads.v4.proto.enums.policy_review_status_pb2",
'PolicyTopicConstraint':"google.ads.google_ads.v4.proto.common.policy_pb2",
'PolicyTopicEntry':"google.ads.google_ads.v4.proto.common.policy_pb2",
'PolicyTopicEntryTypeEnum':"google.ads.google_ads.v4.proto.enums.policy_topic_entry_type_pb2",
'PolicyTopicEvidence':"google.ads.google_ads.v4.proto.common.policy_pb2",
'PolicyTopicEvidenceDestinationMismatchUrlTypeEnum':"google.ads.google_ads.v4.proto.enums.policy_topic_evidence_destination_mismatch_url_type_pb2",
'PolicyTopicEvidenceDestinationNotWorkingDeviceEnum':"google.ads.google_ads.v4.proto.enums.policy_topic_evidence_destination_not_working_device_pb2",
'PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum':"google.ads.google_ads.v4.proto.enums.policy_topic_evidence_destination_not_working_dns_error_type_pb2",
'PolicyValidationParameter':"google.ads.google_ads.v4.proto.common.policy_pb2",
'PolicyValidationParameterErrorEnum':"google.ads.google_ads.v4.proto.errors.policy_validation_parameter_error_pb2",
'PolicyViolationDetails':"google.ads.google_ads.v4.proto.errors.errors_pb2",
'PolicyViolationErrorEnum':"google.ads.google_ads.v4.proto.errors.policy_violation_error_pb2",
'PolicyViolationKey':"google.ads.google_ads.v4.proto.common.policy_pb2",
'PositiveGeoTargetTypeEnum':"google.ads.google_ads.v4.proto.enums.positive_geo_target_type_pb2",
'Preferences':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'PreferredContentInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'PreferredContentTypeEnum':"google.ads.google_ads.v4.proto.enums.preferred_content_type_pb2",
'PriceExtensionPriceQualifierEnum':"google.ads.google_ads.v4.proto.enums.price_extension_price_qualifier_pb2",
'PriceExtensionPriceUnitEnum':"google.ads.google_ads.v4.proto.enums.price_extension_price_unit_pb2",
'PriceExtensionTypeEnum':"google.ads.google_ads.v4.proto.enums.price_extension_type_pb2",
'PriceFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'PriceOffer':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'PricePlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.price_placeholder_field_pb2",
'ProductAllocation':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ProductBiddingCategoryConstant':"google.ads.google_ads.v4.proto.resources.product_bidding_category_constant_pb2",
'ProductBiddingCategoryInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductBiddingCategoryLevelEnum':"google.ads.google_ads.v4.proto.enums.product_bidding_category_level_pb2",
'ProductBiddingCategoryStatusEnum':"google.ads.google_ads.v4.proto.enums.product_bidding_category_status_pb2",
'ProductBrandInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductChannelEnum':"google.ads.google_ads.v4.proto.enums.product_channel_pb2",
'ProductChannelExclusivityEnum':"google.ads.google_ads.v4.proto.enums.product_channel_exclusivity_pb2",
'ProductChannelExclusivityInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductChannelInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductConditionEnum':"google.ads.google_ads.v4.proto.enums.product_condition_pb2",
'ProductConditionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductCustomAttributeIndexEnum':"google.ads.google_ads.v4.proto.enums.product_custom_attribute_index_pb2",
'ProductCustomAttributeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductGroupView':"google.ads.google_ads.v4.proto.resources.product_group_view_pb2",
'ProductImage':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ProductItemIdInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductMetadata':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ProductTypeInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProductTypeLevelEnum':"google.ads.google_ads.v4.proto.enums.product_type_level_pb2",
'ProductVideo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'PromoteCampaignDraftRequest':"google.ads.google_ads.v4.proto.services.campaign_draft_service_pb2",
'PromoteCampaignExperimentRequest':"google.ads.google_ads.v4.proto.services.campaign_experiment_service_pb2",
'PromotionExtensionDiscountModifierEnum':"google.ads.google_ads.v4.proto.enums.promotion_extension_discount_modifier_pb2",
'PromotionExtensionOccasionEnum':"google.ads.google_ads.v4.proto.enums.promotion_extension_occasion_pb2",
'PromotionFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'PromotionPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.promotion_placeholder_field_pb2",
'ProximityInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'ProximityRadiusUnitsEnum':"google.ads.google_ads.v4.proto.enums.proximity_radius_units_pb2",
'QualityScoreBucketEnum':"google.ads.google_ads.v4.proto.enums.quality_score_bucket_pb2",
'QueryErrorEnum':"google.ads.google_ads.v4.proto.errors.query_error_pb2",
'QuotaErrorEnum':"google.ads.google_ads.v4.proto.errors.quota_error_pb2",
'RangeErrorEnum':"google.ads.google_ads.v4.proto.errors.range_error_pb2",
'ReachCurve':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ReachForecast':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'ReachPlanAdLengthEnum':"google.ads.google_ads.v4.proto.enums.reach_plan_ad_length_pb2",
'ReachPlanAgeRangeEnum':"google.ads.google_ads.v4.proto.enums.reach_plan_age_range_pb2",
'ReachPlanErrorEnum':"google.ads.google_ads.v4.proto.errors.reach_plan_error_pb2",
'ReachPlanNetworkEnum':"google.ads.google_ads.v4.proto.enums.reach_plan_network_pb2",
'RealEstatePlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.real_estate_placeholder_field_pb2",
'RealTimeBiddingSetting':"google.ads.google_ads.v4.proto.common.real_time_bidding_setting_pb2",
'Recommendation':"google.ads.google_ads.v4.proto.resources.recommendation_pb2",
'RecommendationErrorEnum':"google.ads.google_ads.v4.proto.errors.recommendation_error_pb2",
'RecommendationTypeEnum':"google.ads.google_ads.v4.proto.enums.recommendation_type_pb2",
'RegionCodeErrorEnum':"google.ads.google_ads.v4.proto.errors.region_code_error_pb2",
'RemarketingAction':"google.ads.google_ads.v4.proto.resources.remarketing_action_pb2",
'RemarketingActionOperation':"google.ads.google_ads.v4.proto.services.remarketing_action_service_pb2",
'RemarketingSetting':"google.ads.google_ads.v4.proto.resources.customer_pb2",
'RequestErrorEnum':"google.ads.google_ads.v4.proto.errors.request_error_pb2",
'ResourceAccessDeniedErrorEnum':"google.ads.google_ads.v4.proto.errors.resource_access_denied_error_pb2",
'ResourceCountLimitExceededErrorEnum':"google.ads.google_ads.v4.proto.errors.resource_count_limit_exceeded_error_pb2",
'ResponsiveDisplayAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ResponsiveSearchAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'RestatementValue':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'RuleBasedUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'RunBatchJobRequest':"google.ads.google_ads.v4.proto.services.batch_job_service_pb2",
'RunOfflineUserDataJobRequest':"google.ads.google_ads.v4.proto.services.offline_user_data_job_service_pb2",
'SearchEngineResultsPageTypeEnum':"google.ads.google_ads.v4.proto.enums.search_engine_results_page_type_pb2",
'SearchGoogleAdsFieldsRequest':"google.ads.google_ads.v4.proto.services.google_ads_field_service_pb2",
'SearchGoogleAdsFieldsResponse':"google.ads.google_ads.v4.proto.services.google_ads_field_service_pb2",
'SearchGoogleAdsRequest':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'SearchGoogleAdsResponse':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'SearchGoogleAdsStreamRequest':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'SearchGoogleAdsStreamResponse':"google.ads.google_ads.v4.proto.services.google_ads_service_pb2",
'SearchTermMatchTypeEnum':"google.ads.google_ads.v4.proto.enums.search_term_match_type_pb2",
'SearchTermTargetingStatusEnum':"google.ads.google_ads.v4.proto.enums.search_term_targeting_status_pb2",
'SearchTermView':"google.ads.google_ads.v4.proto.resources.search_term_view_pb2",
'Segments':"google.ads.google_ads.v4.proto.common.segments_pb2",
'ServedAssetFieldTypeEnum':"google.ads.google_ads.v4.proto.enums.served_asset_field_type_pb2",
'SettingErrorEnum':"google.ads.google_ads.v4.proto.errors.setting_error_pb2",
'SharedCriterion':"google.ads.google_ads.v4.proto.resources.shared_criterion_pb2",
'SharedCriterionErrorEnum':"google.ads.google_ads.v4.proto.errors.shared_criterion_error_pb2",
'SharedCriterionOperation':"google.ads.google_ads.v4.proto.services.shared_criterion_service_pb2",
'SharedSet':"google.ads.google_ads.v4.proto.resources.shared_set_pb2",
'SharedSetErrorEnum':"google.ads.google_ads.v4.proto.errors.shared_set_error_pb2",
'SharedSetOperation':"google.ads.google_ads.v4.proto.services.shared_set_service_pb2",
'SharedSetStatusEnum':"google.ads.google_ads.v4.proto.enums.shared_set_status_pb2",
'SharedSetTypeEnum':"google.ads.google_ads.v4.proto.enums.shared_set_type_pb2",
'ShoppingComparisonListingAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ShoppingPerformanceView':"google.ads.google_ads.v4.proto.resources.shopping_performance_view_pb2",
'ShoppingProductAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'ShoppingSmartAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'SimilarUserListInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'SimulationModificationMethodEnum':"google.ads.google_ads.v4.proto.enums.simulation_modification_method_pb2",
'SimulationTypeEnum':"google.ads.google_ads.v4.proto.enums.simulation_type_pb2",
'SiteSeed':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'SitelinkFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'SitelinkPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.sitelink_placeholder_field_pb2",
'SizeLimitErrorEnum':"google.ads.google_ads.v4.proto.errors.size_limit_error_pb2",
'SlotEnum':"google.ads.google_ads.v4.proto.enums.slot_pb2",
'SpendingLimitTypeEnum':"google.ads.google_ads.v4.proto.enums.spending_limit_type_pb2",
'StoreAttribute':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'StoreSalesMetadata':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'StoreSalesThirdPartyMetadata':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'StringFormatErrorEnum':"google.ads.google_ads.v4.proto.errors.string_format_error_pb2",
'StringLengthErrorEnum':"google.ads.google_ads.v4.proto.errors.string_length_error_pb2",
'StructuredSnippetFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'StructuredSnippetPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.structured_snippet_placeholder_field_pb2",
'SuggestGeoTargetConstantsRequest':"google.ads.google_ads.v4.proto.services.geo_target_constant_service_pb2",
'SuggestGeoTargetConstantsResponse':"google.ads.google_ads.v4.proto.services.geo_target_constant_service_pb2",
'SummaryRowSettingEnum':"google.ads.google_ads.v4.proto.enums.summary_row_setting_pb2",
'SystemManagedResourceSourceEnum':"google.ads.google_ads.v4.proto.enums.system_managed_entity_source_pb2",
'TagSnippet':"google.ads.google_ads.v4.proto.common.tag_snippet_pb2",
'TargetCpa':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'TargetCpaOptInRecommendationGoalEnum':"google.ads.google_ads.v4.proto.enums.target_cpa_opt_in_recommendation_goal_pb2",
'TargetCpaSimulationPoint':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'TargetCpaSimulationPointList':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'TargetCpm':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'TargetImpressionShare':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'TargetImpressionShareLocationEnum':"google.ads.google_ads.v4.proto.enums.target_impression_share_location_pb2",
'TargetRestriction':"google.ads.google_ads.v4.proto.common.targeting_setting_pb2",
'TargetRestrictionOperation':"google.ads.google_ads.v4.proto.common.targeting_setting_pb2",
'TargetRoas':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'TargetRoasSimulationPoint':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'TargetRoasSimulationPointList':"google.ads.google_ads.v4.proto.common.simulation_pb2",
'TargetSpend':"google.ads.google_ads.v4.proto.common.bidding_pb2",
'Targeting':"google.ads.google_ads.v4.proto.services.reach_plan_service_pb2",
'TargetingDimensionEnum':"google.ads.google_ads.v4.proto.enums.targeting_dimension_pb2",
'TargetingSetting':"google.ads.google_ads.v4.proto.common.targeting_setting_pb2",
'TextAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'TextAsset':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'TextLabel':"google.ads.google_ads.v4.proto.common.text_label_pb2",
'TextMessageFeedItem':"google.ads.google_ads.v4.proto.common.extensions_pb2",
'ThirdPartyAppAnalyticsLink':"google.ads.google_ads.v4.proto.resources.third_party_app_analytics_link_pb2",
'ThirdPartyAppAnalyticsLinkIdentifier':"google.ads.google_ads.v4.proto.resources.account_link_pb2",
'TimeTypeEnum':"google.ads.google_ads.v4.proto.enums.time_type_pb2",
'TimeZoneErrorEnum':"google.ads.google_ads.v4.proto.errors.time_zone_error_pb2",
'TopicConstant':"google.ads.google_ads.v4.proto.resources.topic_constant_pb2",
'TopicInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'TopicView':"google.ads.google_ads.v4.proto.resources.topic_view_pb2",
'TrackingCodePageFormatEnum':"google.ads.google_ads.v4.proto.enums.tracking_code_page_format_pb2",
'TrackingCodeTypeEnum':"google.ads.google_ads.v4.proto.enums.tracking_code_type_pb2",
'TransactionAttribute':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'TravelPlaceholderFieldEnum':"google.ads.google_ads.v4.proto.enums.travel_placeholder_field_pb2",
'UnknownListingDimensionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'UploadCallConversionsRequest':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'UploadCallConversionsResponse':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'UploadClickConversionsRequest':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'UploadClickConversionsResponse':"google.ads.google_ads.v4.proto.services.conversion_upload_service_pb2",
'UploadConversionAdjustmentsRequest':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'UploadConversionAdjustmentsResponse':"google.ads.google_ads.v4.proto.services.conversion_adjustment_upload_service_pb2",
'UploadUserDataRequest':"google.ads.google_ads.v4.proto.services.user_data_service_pb2",
'UploadUserDataResponse':"google.ads.google_ads.v4.proto.services.user_data_service_pb2",
'UrlCollection':"google.ads.google_ads.v4.proto.common.url_collection_pb2",
'UrlFieldErrorEnum':"google.ads.google_ads.v4.proto.errors.url_field_error_pb2",
'UrlSeed':"google.ads.google_ads.v4.proto.services.keyword_plan_idea_service_pb2",
'UserData':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'UserDataErrorEnum':"google.ads.google_ads.v4.proto.errors.user_data_error_pb2",
'UserDataOperation':"google.ads.google_ads.v4.proto.services.user_data_service_pb2",
'UserIdentifier':"google.ads.google_ads.v4.proto.common.offline_user_data_pb2",
'UserInterest':"google.ads.google_ads.v4.proto.resources.user_interest_pb2",
'UserInterestInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'UserInterestTaxonomyTypeEnum':"google.ads.google_ads.v4.proto.enums.user_interest_taxonomy_type_pb2",
'UserList':"google.ads.google_ads.v4.proto.resources.user_list_pb2",
'UserListAccessStatusEnum':"google.ads.google_ads.v4.proto.enums.user_list_access_status_pb2",
'UserListActionInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListClosingReasonEnum':"google.ads.google_ads.v4.proto.enums.user_list_closing_reason_pb2",
'UserListCombinedRuleOperatorEnum':"google.ads.google_ads.v4.proto.enums.user_list_combined_rule_operator_pb2",
'UserListCrmDataSourceTypeEnum':"google.ads.google_ads.v4.proto.enums.user_list_crm_data_source_type_pb2",
'UserListDateRuleItemInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListDateRuleItemOperatorEnum':"google.ads.google_ads.v4.proto.enums.user_list_date_rule_item_operator_pb2",
'UserListErrorEnum':"google.ads.google_ads.v4.proto.errors.user_list_error_pb2",
'UserListInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'UserListLogicalRuleInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListLogicalRuleOperatorEnum':"google.ads.google_ads.v4.proto.enums.user_list_logical_rule_operator_pb2",
'UserListMembershipStatusEnum':"google.ads.google_ads.v4.proto.enums.user_list_membership_status_pb2",
'UserListNumberRuleItemInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListNumberRuleItemOperatorEnum':"google.ads.google_ads.v4.proto.enums.user_list_number_rule_item_operator_pb2",
'UserListOperation':"google.ads.google_ads.v4.proto.services.user_list_service_pb2",
'UserListPrepopulationStatusEnum':"google.ads.google_ads.v4.proto.enums.user_list_prepopulation_status_pb2",
'UserListRuleInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListRuleItemGroupInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListRuleItemInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListRuleTypeEnum':"google.ads.google_ads.v4.proto.enums.user_list_rule_type_pb2",
'UserListSizeRangeEnum':"google.ads.google_ads.v4.proto.enums.user_list_size_range_pb2",
'UserListStringRuleItemInfo':"google.ads.google_ads.v4.proto.common.user_lists_pb2",
'UserListStringRuleItemOperatorEnum':"google.ads.google_ads.v4.proto.enums.user_list_string_rule_item_operator_pb2",
'UserListTypeEnum':"google.ads.google_ads.v4.proto.enums.user_list_type_pb2",
'UserLocationView':"google.ads.google_ads.v4.proto.resources.user_location_view_pb2",
'Value':"google.ads.google_ads.v4.proto.common.value_pb2",
'VanityPharmaDisplayUrlModeEnum':"google.ads.google_ads.v4.proto.enums.vanity_pharma_display_url_mode_pb2",
'VanityPharmaTextEnum':"google.ads.google_ads.v4.proto.enums.vanity_pharma_text_pb2",
'Video':"google.ads.google_ads.v4.proto.resources.video_pb2",
'VideoAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'VideoBumperInStreamAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'VideoNonSkippableInStreamAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'VideoOutstreamAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'VideoTrueViewDiscoveryAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'VideoTrueViewInStreamAdInfo':"google.ads.google_ads.v4.proto.common.ad_type_infos_pb2",
'WebpageConditionInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'WebpageConditionOperandEnum':"google.ads.google_ads.v4.proto.enums.webpage_condition_operand_pb2",
'WebpageConditionOperatorEnum':"google.ads.google_ads.v4.proto.enums.webpage_condition_operator_pb2",
'WebpageInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'YouTubeChannelInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'YouTubeVideoInfo':"google.ads.google_ads.v4.proto.common.criteria_pb2",
'YoutubeVideoAsset':"google.ads.google_ads.v4.proto.common.asset_types_pb2",
'YoutubeVideoRegistrationErrorEnum':"google.ads.google_ads.v4.proto.errors.youtube_video_registration_error_pb2",
}
DEPENDENT_MODULE_LIST = [
"google.longrunning.operations_pb2",
"google.protobuf.any_pb2",
"google.protobuf.empty_pb2",
"google.protobuf.field_mask_pb2",
"google.protobuf.wrappers_pb2",
"google.rpc.status_pb2",
]
def _get_class_from_module(module_name):
module = importlib.import_module(module_name)
for class_name in get_messages(module).keys(): # from inspect module
yield class_name
def _populate_dependent_classes(module_list=DEPENDENT_MODULE_LIST):
class_list = {}
for module_name in module_list:
for cls in _get_class_from_module(module_name):
class_list[cls] = module_name
return class_list
_lazy_dependent_class_to_package_map = _populate_dependent_classes()
def _load_module(module_name):
"""Load a module by it's name.
Args:
module_name: a str of the name of a sub-module to load.
Returns:
A module class instance.
Raises:
AttributeError if the given module can't be found.
"""
try:
if module_name in _lazy_name_to_package_map:
module_path = (
f"{_lazy_name_to_package_map[module_name]}.{module_name}"
)
else:
module_path = module_name
return importlib.import_module(module_path)
except KeyError:
raise AttributeError(f"unknown sub-module {module_name!r}.")
def _get_module_by_name(module_name):
"""Get a module containing one or more message classes.
For example: google.ads.google_ads.v2.proto.services.video_service_pb2.
Args:
module_name: a str of the name of a module.
Returns:
a module class instance.
"""
module = _load_module(module_name)
globals()[module_name] = module
for name, message in get_messages(module).items():
if name.endswith("_service_pb2"):
message.__module__ = "google.ads.google_ads.v2.types"
globals()[name] = message
return module
def _get_message_class_by_name(class_name):
"""Get a message class instance by name.
For example: VideoService
Args:
module_name: a str of the name of a protobuf class to load.
Returns:
a protobuf message class definition that inherits from
google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType.
"""
if class_name in _lazy_dependent_class_to_package_map:
module_path = _lazy_dependent_class_to_package_map[class_name]
elif class_name in _lazy_class_to_package_map:
module_path = _lazy_class_to_package_map[class_name]
else:
raise AttributeError(f"unknown sub-module {class_name!r}.")
try:
module = _load_module(module_path)
message = getattr(module, class_name)
except AttributeError:
raise AttributeError(f"unknown message class {class_name!r}.")
if class_name.endswith("Service"):
message.__module__ = "google.ads.google_ads.v2.types"
globals()[class_name] = message
return message
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in chain(
_lazy_name_to_package_map,
_lazy_class_to_package_map,
_lazy_dependent_class_to_package_map,
)
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("_pb2"):
return _get_module_by_name(name)
elif name.endswith("Pb2"):
module_name = f"{util.convert_upper_case_to_snake_case(name)}"
return _get_module_by_name(module_name)
else:
return _get_message_class_by_name(name)
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| 152,330 | 57,306 |
import logging
import asyncio
from concurrent.futures import CancelledError
from discord.ext import commands
from utils import Config, permission_node
log = logging.getLogger('charfred')
formats = {
'MSG': '[**{}**] {}: {}',
'STF': '**{}**: {}',
'DTH': '[**{}**] {} {}',
'ME': '[**{}**] {}: {}',
'SAY': '[**{}**] {}: {}',
'SYS': '{}'
}
def escape(string):
return string.strip().replace('\n', '\\n').replace('::', ':\:').replace('::', ':\:')
class ChatRelay(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.server = None
self.inqueue = asyncio.Queue(maxsize=64, loop=self.loop)
self.clients = {}
self.inqueue_worker_task = None
self.relaycfg = Config(f'{bot.dir}/configs/chatrelaycfg.toml',
load=True, loop=self.loop)
if 'ch_to_clients' not in self.relaycfg:
self.relaycfg['ch_to_clients'] = {}
self.relaycfg._save()
if 'client_to_ch' not in self.relaycfg:
self.relaycfg['client_to_ch'] = {}
self.relaycfg._save()
def cog_unload(self):
if self.server:
log.info('CR: Closing relay server.')
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
self.loop.create_task(self.server.wait_closed())
@commands.Cog.listener()
async def on_message(self, message):
if self.server is None: # Don't even do anything if the server isn't running.
return
if message.author.bot or (message.guild is None):
return
ch_id = str(message.channel.id)
if message.content and (ch_id in self.relaycfg['ch_to_clients']):
# Check whether the message is a command, as determined
# by having a valid prefix, and don't proceed if it is.
prefix = await self.bot.get_prefix(message)
if isinstance(prefix, str):
if message.content.startswith(prefix):
return
else:
try:
if message.content.startswith(tuple(prefix)):
return
except TypeError:
# If we get here, then the prefixes are borked.
raise
content = f'MSG::Discord::{escape(message.author.display_name)}:' \
f':{escape(message.clean_content)}::\n'
for client in self.relaycfg['ch_to_clients'][ch_id]:
try:
self.clients[client]['queue'].put_nowait((5, content))
except KeyError:
pass
except asyncio.QueueFull:
pass
@commands.group(invoke_without_command=True)
async def chatrelay(self, ctx):
"""Minecraft chat relay commands.
This returns a list of all Minecraft servers currently
connected and what channel they're linked to.
"""
info = ['# Chat Relay Status:']
if self.server and self.server.sockets:
info.append('\n# Relay server is online.\n')
else:
info.append('\n< Relay server is offline! >\n')
if self.clients:
info.append('\n# Currently connected clients:')
for client in self.clients:
info.append(f'- {client}')
if self.relaycfg['ch_to_clients']:
info.append('\n# Relay configuration:')
for channel_id, clients in self.relaycfg['ch_to_clients'].items():
channel = self.bot.get_channel(int(channel_id))
info.append(f'{channel.name if channel else channel_id}:')
if clients:
for client in clients:
info.append(f'- {client}')
else:
info.append('\n')
else:
info.append('> No clients configured.\n')
if len(info) == 2:
info.append('> No clients connected, nothing configured.')
await ctx.sendmarkdown('\n'.join(info))
async def incoming_worker(self, reader, client):
log.info(f'CR-Incoming: Worker for {client} started.')
try:
while True:
data = await reader.readline()
if not data:
log.info(f'CR-Incoming: {client} appears to have disconnected!')
break
try:
data = data.decode()
except UnicodeDecodeError as e:
log.info(f'CR-Incoming: {e}')
continue
try:
self.inqueue.put_nowait((client, data))
except asyncio.QueueFull:
log.warning(f'CR-Incoming: Incoming queue full, message dropped!')
except CancelledError:
raise
finally:
log.info(f'CR-Incoming: Worker for {client} exited.')
async def outgoing_worker(self, writer, client):
log.info(f'CR-Outgoing: Worker for {client} started.')
try:
while True:
try:
_, data = await self.clients[client]['queue'].get()
except (KeyError, AttributeError):
log.error(f'CR-Outgoing: Outqueue for {client} is gone!'
' Connection shutting down!')
break
else:
data = data.encode()
writer.write(data)
await writer.drain()
except CancelledError:
raise
finally:
log.info(f'CR-Outgoing: Worker for {client} exited.')
async def connection_handler(self, reader, writer):
peer = str(writer.get_extra_info("peername"))
log.info(f'CR-Connection: New connection established with {peer}!')
handshake = await reader.readline()
if not handshake:
log.warning(f'CR-Connection: No handshake from {peer} recieved!'
' Connection shutting down!')
writer.close()
return
handshake = handshake.decode()
hshk = handshake.split('::')
if hshk[0] == 'HSHK':
try:
client = hshk[1]
except IndexError:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
else:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
if client is None:
log.warning(f'CR-Connection: Using client address as name.')
client = peer
await self.inqueue.put((client, f'SYS::```markdown\n# {client} connected!\n```'))
if client in self.clients and self.clients[client]:
if 'worker' in self.clients[client]:
log.warning(f'CR-Connection: {client} reconnecting after messy exit, cleaning up!')
for worker in self.clients[client]['workers']:
worker.cancel()
self.clients[client] = {}
self.clients[client]['queue'] = asyncio.PriorityQueue(maxsize=24, loop=self.loop)
in_task = self.loop.create_task(self.incoming_worker(reader, client))
out_task = self.loop.create_task(self.outgoing_worker(writer, client))
self.clients[client]['workers'] = (in_task, out_task)
_, waiting = await asyncio.wait([in_task, out_task],
return_when=asyncio.FIRST_COMPLETED)
for task in waiting:
task.cancel()
try:
baggage = self.clients.pop(client)
except KeyError:
pass
else:
log.info(f'CR-Connection: Outqueue for {client} removed with'
f' {baggage["queue"].qsize()} items.')
writer.close()
log.info(f'CR-Connection: Connection with {client} closed!')
await self.inqueue.put((client, f'SYS::```markdown\n< {client} disconnected! >\n```'))
async def inqueue_worker(self):
log.info('CR-Inqueue: Worker started!')
try:
while True:
client, data = await self.inqueue.get()
# Check if the data has a valid format.
_data = data.split('::')
if _data[0] not in formats:
log.debug(f'CR-Inqueue: Data from {client} with invalid format: {data}')
continue
# If we get here, then the format is valid and we can relay to other clients.
if _data[0] != 'SYS':
for other in self.clients:
if other == client:
continue
try:
self.clients[other]['queue'].put_nowait((5, data))
except KeyError:
pass
except asyncio.QueueFull:
pass
# Check if we have a channel to send this message to.
if client not in self.relaycfg['client_to_ch']:
log.debug(f'CR-Inqueue: No channel for: "{client} : {data}", dropping!')
continue
# If we get here, we have a channel and can process according to format map.
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if not channel:
log.warning(f'CR-Inqueue: {_data[0]} message from {client} could not be sent.'
' Registered channel does not exist!')
continue
try:
await channel.send(formats[_data[0]].format(*_data[1:]))
except IndexError as e:
log.debug(f'{e}: {data}')
pass
except CancelledError:
raise
finally:
log.info('CR-Inqueue: Worker exited.')
@chatrelay.command(aliases=['start', 'init'])
@permission_node(f'{__name__}.init')
async def initialize(self, ctx, port):
"""This initializes the relay server on the given port,
allowing connections from Minecraft servers to be established.
Be sure to also set up at least one channel to relay chat
to and from, using the 'register' subcommand, otherwise
chat recieved from clients will just be dropped!
"""
if self.server:
log.warning('CR: Server already established!')
await ctx.sendmarkdown('> Relay server already running!')
return
self.inqueue_worker_task = self.loop.create_task(self.inqueue_worker())
self.server = await asyncio.start_server(self.connection_handler, '127.0.0.1', port,
loop=self.loop)
log.info('CR: Server started!')
await ctx.sendmarkdown('# Relay server started.')
@chatrelay.command(aliases=['stop'])
@permission_node(f'{__name__}.init')
async def close(self, ctx):
"""This closes the relay server, disconnecting all clients.
"""
if not self.server:
log.info('CR: No server to be closed.')
await ctx.sendmarkdown('> No relay server to be closed.')
return
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
await self.server.wait_closed()
log.info('CR: Server closed!')
self.server = None
await ctx.sendmarkdown('# Relay server closed, all clients disconnected!')
@chatrelay.command(aliases=['listen'])
@permission_node(f'{__name__}.register')
async def register(self, ctx, client: str):
"""Registers a channel to recieve chat from a given client,
and send chat from the channel to the client.
The channel you run this in will be the registered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
if client not in self.clients:
await ctx.sendmarkdown('< Client unknown, registering anyway. >\n'
'< Please check if you got the name right,'
' when the client eventually connects. >')
log.info(f'CR: Trying to register {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch'] and self.relaycfg['client_to_ch'][client]:
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if channel == ctx.channel:
await ctx.sendmarkdown(f'> {client} is already registered with this channel!')
else:
await ctx.sendmarkdown(f'< {client} is already registered with {channel.name}! >\n'
'> A client can only be registered to one channel.\n'
'> Please unregister the other channel first!')
return
else:
self.relaycfg['client_to_ch'][client] = channel_id
if channel_id in self.relaycfg['ch_to_clients']:
self.relaycfg['ch_to_clients'][channel_id].append(client)
else:
self.relaycfg['ch_to_clients'][channel_id] = [client]
await self.relaycfg.save()
await ctx.sendmarkdown(f'# {ctx.channel.name} is now registered for'
f' recieving chat from, and sending chat to {client}.')
@chatrelay.command(aliases=['unlisten'])
@permission_node(f'{__name__}.register')
async def unregister(self, ctx, client: str):
"""Unregisters a channel from recieving chat from a given
client or sending chat to that client.
The channel you run this in will be the unregistered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
log.info(f'CR: Trying to unregister {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch']:
if self.relaycfg['client_to_ch'][client] == channel_id:
del self.relaycfg['client_to_ch'][client]
else:
await ctx.sendmarkdown(f'< {client} is not registered for this channel! >')
return
try:
self.relaycfg['ch_to_clients'][channel_id].remove(client)
except ValueError:
log.critical(f'CR: Relay mapping inconsistency detected!')
raise
else:
await ctx.sendmarkdown('# This channel will no longer send chat to'
f' or recieve chat from {client}!')
finally:
await self.relaycfg.save()
else:
await ctx.sendmarkdown(f'> {client} is not registered with any channel.')
def setup(bot):
permission_nodes = ['init', 'register']
bot.register_nodes([f'{__name__}.{node}' for node in permission_nodes])
bot.add_cog(ChatRelay(bot))
| 15,841 | 4,350 |
# This file has all the functions required to load the information of a city.
# - Definition of the class Station
# - Definition of the class CityInfo
# - Read functions from files
# - Structure of the information
#
__authors__='TO_BE_FILLED'
__group__='DL01'
# _________________________________________________________________________________________
# Intel.ligencia Artificial
# Grau en Enginyeria Informatica
# Curs 2016- 2017
# Universitat Autonoma de Barcelona
# _________________________________________________________________________________________
class Station:
# __init__ Constructor of Station Class.
def __init__(self, id, name, line, x, y):
self.id = id # station id
self.destinationDic = {} # Dictionary where principal keys refers to the set of stations that it is connected.
# The value of this dictionary refers to the time cost between two stations.
self.name = name # station Name
self.line = int(line) # line name string
self.x = x # coordinate X of the station
self.y = y # coordinate Y of the station
class CityInfo:
# __init__ Constructor of CityInfo class
def __init__(self, vel_lines, station_list, connection_time, multipleLines=0):
self.num_lines=len(vel_lines) # Number of different lines
self.velocity_lines=vel_lines # velocity of each line
self.max_velocity=max(vel_lines) # maximum velocity of the subways (faster subway)
self.min_velocity=min(vel_lines) # minimum velocity of the subways (slower subway)
self.max_transfer=20 # slower transfer time
self.min_transfer=6 # faster transfer time
self.multipleLines=multipleLines
self.StationList =station_list
self.setNextStations(connection_time)
self.walking_velocity = 4
# setNextStations: Given a stationList (- id, name, line, x, y - information), and the set of possible connections between stations,
# This function set the dictionary of the possible destinations for each station (including the cost )
def setNextStations( self, connections):
for i in self.StationList:
if int(i.id) in connections:
i.destinationDic.update(connections[int(i.id)])
def getTransfers(self):
for i in self.StationList:
for j in self.StationList[i].destinationDic:
if i.line != j.line:
self.max_transfer = max(self.max_transfer,self.StationList[i].destinationDic[j])
self.min_transfer = min(self.min_transfer, self.StationList[i].destinationDic[j])
def search_multiple_lines(stationList):
"""
search_multiple_lines: Searches the set of stations that have different lines.
:param
- stationList: LIST of the stations of the current cicty (-id, destinationDic, name, line, x, y -)
:return:
- multiplelines: DICTIONARY which relates the different stations with the same name and different id's
(stations that have different metro lines)
"""
multipleLines = {}
for i in stationList:
for j in stationList:
if i.id != j.id:
if i.x == j.x and i.y == j.y:
if i.id in multipleLines:
if j.id not in multipleLines[i.id]:
multipleLines[i.id].append(j.id)
else:
multipleLines[i.id] = []
multipleLines[i.id].append(j.id)
if j.id in multipleLines:
if j.id not in multipleLines[i.id]:
multipleLines[j.id].append(i.id)
else:
multipleLines[j.id] = []
multipleLines[j.id].append(i.id)
return multipleLines
# readStationInformation: Given a filename, it reads the information of this file.
# The file should keep the format:
# id <\t> name <\t> line <\t> x <\t> y <\n>
def readStationInformation(filename):
fileMetro = open(filename, 'r')
stationList = []
for line in fileMetro:
information = line.split('\t')
station_read = Station(int(information[0]), information[1], information[2], int(information[3]),
int((information[4].replace('\n', '')).replace(' ', '')))
stationList.append(station_read)
fileMetro.close()
return stationList
def readInformation(filename):
vector=[]
fp = open(filename,'r')
line = fp.readline()
while line:
# tmp=fp.readline()
try:
value=line.split(" : ")
value=value[1].split("\n")
vector.append(int(value[0]))
line = fp.readline()
except :
line = fp.readline()
del vector[-1] #remove min value
del vector[-1] #remove max value
fp.close()
return (vector)
# readCostTable: Given a filename, it reads the information of this file.
# The file should be an inferior matrix with the cost between two different stations.
def readCostTable(filename):
fileCorrespondencia = open(filename, 'r')
connections = {}
origin = 1
for i in fileCorrespondencia:
informations = i.split('\t')
destination = 1 # because ID of the stations started at '1' instead of '0'
for j in informations:
j = j.replace('\n', '')
if j != '':
if j != '0':
if int(origin) not in connections:
connections[int(origin)] = {}
if int(destination) not in connections[int(origin)]:
connections[int(origin)][int(destination)] = float(j)
# as the matrix is an inferior matrix, we should duplicate the information to the superior missing part.
if int(destination) not in connections:
connections[int(destination)] = {}
if int(origin) not in connections[int(destination)]:
connections[int(destination)][int(origin)] = float(j)
destination = destination + 1
origin = origin + 1
return connections
# print_stationList: Given a stationList (- id, name, line, x, y - information), it prints the information by terminal
def print_stationList(stationList):
print("\n")
print (" ______________ STATION LIST________________")
print ("\n")
for i in stationList:
print (" ID : " + str(i.id) + " - " + str(i.name) + " linea: " + str(i.line) + " pos: (" + str(i.x) + "," + str(i.y) + ")")
print ("\n")
print ("\n")
# print_connections: Given a connections dictionary, it prints the information by terminal
def print_connections(connections):
print ("\n")
print (" ______________ CONNECTIONS ________________")
print ("\n")
for i in connections.keys():
print (" ID : " + str(i) + " ")
for j in connections[i]:
print (" " + str(j) + " : " + str(connections[i][j]))
#print ("\n")
#print ("\n")
def print_dictionary(stationList):
print ("\n")
print (" ______________ DICTIONARY ________________")
print ("\n")
for i in stationList:
print (" ID : "+ str(i.id) + " --> " + str(i.destinationDic))
print ("\n")
print ("\n") | 7,504 | 2,061 |
import json
import sys
import os
from tqdm import tqdm
from mdf_refinery.validator import Validator
from mdf_refinery.parsers.tab_parser import parse_tab
# VERSION 0.3.0
# This is the converter for the GW100 dataset.
# Arguments:
# input_path (string): The file or directory where the data resides.
# NOTE: Do not hard-code the path to the data in the converter. The converter should be portable.
# metadata (string or dict): The path to the JSON dataset metadata file, a dict or json.dumps string containing the dataset metadata, or None to specify the metadata here. Default None.
# verbose (bool): Should the script print status messages to standard output? Default False.
# NOTE: The converter should have NO output if verbose is False, unless there is an error.
def convert(input_path, metadata=None, verbose=False):
if verbose:
print("Begin converting")
# Collect the metadata
if not metadata:
dataset_metadata = {
"mdf": {
"title": "Benchmark of G0W0 on 100 Molecules",
"acl": ["public"],
"source_name": "gw100",
"citation": ["M.J. van Setten, F. Caruso, S. Sharifzadeh, X. Ren, M. Scheffler, F. Liu, J. Lischner, L. Lin, J.R. Deslippe, S.G. Louie, C. Yang, F. Weigend, J.B. Neaton, F. Evers, and P. Rinke, GW100: Benchmarking G0W0 for Molecular Systems, J. Chem. Theory Comput. 11, 5665 (2015).", "M. Govoni et al., (2016). In preparation.", "P.J. Linstrom and W.G. Mallard, Eds., NIST Chemistry WebBook, NIST Standard Reference Database Number 69, National Institute of Standards and Technology, Gaithersburg MD, 20899, http://webbook.nist.gov."],
"data_contact": {
"given_name": "Michiel",
"family_name": "van Setten",
"email": "michiel.vansetten@uclouvain.be",
"institution": "Université catholique de Louvain",
},
# "author":
# "license": ,
"collection": "GW100",
# "tags": ,
"description": "This is a benchmark of G0W0 on 100 molecules.",
"year": 2015,
"links": {
"landing_page": "http://www.west-code.org/database/gw100/index.php",
"publication": "https://dx.doi.org/10.1021/acs.jctc.5b00453",
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#}
},
# "mrr": ,
"data_contributor": {
"given_name": "Jonathon",
"family_name": "Gaff",
"email": "jgaff@uchicago.edu",
"institution": "The University of Chicago",
"github": "jgaff"
}
}
}
elif type(metadata) is str:
try:
dataset_metadata = json.loads(metadata)
except Exception:
try:
with open(metadata, 'r') as metadata_file:
dataset_metadata = json.load(metadata_file)
except Exception as e:
sys.exit("Error: Unable to read metadata: " + repr(e))
elif type(metadata) is dict:
dataset_metadata = metadata
else:
sys.exit("Error: Invalid metadata parameter")
dataset_validator = Validator(dataset_metadata)
# Get the data
with open(os.path.join(input_path, "gw100.csv")) as in_file:
data = in_file.read()
for record in tqdm(parse_tab(data), desc="Processing records", disable= not verbose):
record_metadata = {
"mdf": {
"title": "GW100 - " + record["name"],
"acl": ["public"],
# "tags": ,
# "description": ,
"composition": record["formula"],
# "raw": ,
"links": {
"landing_page": "http://www.west-code.org/database/gw100/pag/" + record["cas"] + ".php",
# "publication": ,
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#},
},
# "citation": ,
# "data_contact": {
# "given_name": ,
# "family_name": ,
# "email": ,
# "institution":,
# IDs
# },
# "author": ,
# "license": ,
# "collection": ,
# "data_format": ,
# "data_type": ,
# "year": ,
# "mrr":
# "processing": ,
# "structure":,
}
}
# Pass each individual record to the Validator
result = dataset_validator.write_record(record_metadata)
# Check if the Validator accepted the record, and print a message if it didn't
# If the Validator returns "success" == True, the record was written successfully
if result["success"] is not True:
print("Error:", result["message"])
if verbose:
print("Finished converting")
| 5,290 | 1,586 |
from abstract_keyboard import KeyData, AbstractKeyboard, Colours
from physical_keyboard import PhysicalKeyboard
| 112 | 28 |
import os
import uuid
import json
import yaml
import re
from nltk.tokenize import RegexpTokenizer
import requests
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from get_root_access_token_for_sp import get_token
from pydantic import BaseModel
from vkaudiotoken import (
TokenReceiverOfficial,
CommonParams,
TokenException,
TwoFAHelper,
supported_clients
)
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
with open('creds.yaml', 'r') as c:
config = yaml.safe_load(c)
SPOTIFY_REDIRECT_URL = os.environ.get('SPOTIFY_REDIRECT_URL', 'http://localhost:3000/spotify-callback')
VK_API_DEFAULT_VERSION = '5.95'
sp_code = ''
sp_access_token = ''
sp_refresh_token = ''
sp_playlist_id =''
vk_session = None
vk_access_token = ''
vk_total_tracks = 0
last_iteration = False
batch = 0
offset = 0
page_size=200
class SpotifyLoginInputDto(BaseModel):
code: str
class VkLoginInputDto(BaseModel):
vkLogin: str
vkPass: strt
class BatchSizeDto(BaseModel):
size: str
@app.post("/login/spotify", status_code=200)
def login_to_spotify(dto: SpotifyLoginInputDto):
print("Code " + dto.code)
global sp_code
sp_code = dto.code
response = requests.post(
url='https://accounts.spotify.com/api/token',
data={
'grant_type': 'authorization_code',
'code': dto.code,
'redirect_uri': SPOTIFY_REDIRECT_URL
},
headers={
"Authorization": 'Basic {}'.format(config.get('sp_basic_auth'))
}).json()
try:
global sp_access_token
sp_access_token = response['access_token']
global sp_refresh_token
sp_refresh_token = response['refresh_token']
except KeyError:
raise HTTPException(status_code=400, detail='Invalid code provided')
@app.post("/login/vk", status_code=200)
def login_to_vk(dto: VkLoginInputDto):
print("Login: " + dto.vkLogin + ", pass: " + dto.vkPass)
params = CommonParams(supported_clients.VK_OFFICIAL.user_agent)
receiver = TokenReceiverOfficial(dto.vkLogin, dto.vkPass, params)
try:
credentials_from_vk = receiver.get_token()
except TokenException as err:
if err.code == TokenException.TWOFA_REQ and 'validation_sid' in err.extra:
TwoFAHelper(params).validate_phone(err.extra['validation_sid'])
print('2FA auth enabled. SMS should be sent')
""" auth_code = input('Please, wait for SMS and insert your authorization code below: \n')
receiver = TokenReceiverOfficial(self._config.get('vk_login'), self._config.get('vk_password'), params, auth_code)
try:
credentials_from_vk = receiver.get_token()
except Exception as e:
raise """
else:
raise
token = credentials_from_vk['access_token']
print("VK token: " + token)
session = requests.session()
session.headers.update({'User-Agent': supported_clients.VK_OFFICIAL.user_agent})
try:
global vk_session
vk_session = session
global vk_access_token
vk_access_token = token
except KeyError:
raise HTTPException(status_code=400, detail='Invalid code provided')
@app.post("/init-transfer", status_code=200)
def init_process():
print("Process has started")
global vk_total_tracks
vk_total_tracks = get_total_tracks()
print("VK total tracks: ")
print(vk_total_tracks)
global sp_playlist_id
sp_playlist_id = create_playlist_in_spotify()
print("SP playlist id: " + sp_playlist_id)
@app.get('/get-batch', status_code=200)
def process_batch(dto: BatchSizeDto):
print("yee " + dto.size)
batch = getTracksFromVK(dto.size)
print(batch)
tracks = batch_track_search(batch)
add_tracks_to_playlist([track['id'] for track in tracks], sp_playlist_id)
def get_total_tracks() -> int:
return vk_session.get(
url="https://api.vk.com/method/audio.get",
params=[
('access_token', vk_access_token),
('v', config.get('vk_version', VK_API_DEFAULT_VERSION))
]
).json()['response']['count']
def _revoke_root_token():
config['sp_root_token'] = get_token()
def revoke_user_token():
response = requests.post(
url='https://accounts.spotify.com/api/token',
data={
'refresh_token': sp_refresh_token,
'grant_type': 'refresh_token'
},
headers={
"Authorization": 'Basic {}'.format(sp_code)
}
).json()
global sp_access_token
sp_access_token = response['access_token']
def create_playlist_in_spotify(level=0) -> str:
if level > 2:
raise Exception
result = requests.post(
url='https://api.spotify.com/v1/users/{}/playlists'.format(config.get('sp_user_id')),
json={
"name": config.get("sp_playlist_name"),
"description": config.get("sp_playlist_description"),
"public": config.get("sp_is_playlist_public")
},
headers={
"Authorization": 'Bearer {}'.format(sp_access_token)
}
)
if result.status_code == 401:
revoke_user_token()
return create_playlist_in_spotify(level + 1)
try:
playlist_id = result.json()['id']
except Exception:
raise Exception
return playlist_id
def getTracksFromVK(offset):
current_page_tracks = vk_session.get(
url="https://api.vk.com/method/audio.get",
params=[
('access_token', vk_access_token),
('v', config.get('vk_version', VK_API_DEFAULT_VERSION)),
('count', page_size),
('offset', offset)
])
current_page_tracks = current_page_tracks.json()['response']['items']
offset += page_size
return [{'artist': l['artist'], 'title': l['title']} for l in current_page_tracks]
def batch_track_search(track_list) -> list:
track_list_spotify = []
for song in track_list:
title = song['title']
artist = song['artist']
cleaned_title = clean(title)
cleaned_artist = clean(artist)
try:
track_id, track_name = search_track_on_spotify(cleaned_title + " " + cleaned_artist)
except Exception:
try:
track_id, track_name = search_track_on_spotify(cleaned_title)
except Exception as ex:
print(cleaned_title + " " + cleaned_artist + ' not found! ' + ex.__str__())
else:
track_list_spotify.append({'Track name': track_name, 'id': track_id})
else:
track_list_spotify.append({'Track name': track_name, 'id': track_id})
time.sleep(0.2)
return track_list_spotify
def search_track_on_spotify(query, level=0) -> (str, str):
if level > 2:
raise SpotifyAuthException
response = requests.get(
url='https://spclient.wg.spotify.com/searchview/km/v4/search/{}'.format(query),
params={
'catalogue': '',
'country': 'RU'
},
headers={
'Authorization': "Bearer {}".format(self._config.get('sp_root_token')),
'Host': "spclient.wg.spotify.com"
}
)
if response.status_code == 401:
revoke_root_token()
return search_track_on_spotify(query, level + 1)
elif response.status_code == 404:
raise Exception
else:
try:
results = response.json()
except Exception:
raise Exception
try:
track_id = results['results']['tracks']['hits'][0]['uri']
track_returned_name = results['results']['tracks']['hits'][0]['name']
except Exception:
raise Exception
return track_id, track_returned_name
def add_tracks_to_playlist(tracks, id, level=0) -> None:
if level > 2:
raise Exception
tracks_str = ','.join(tracks)
res = requests.post(
url='https://api.spotify.com/v1/playlists/{}/tracks?uris={}'.format(id, tracks_str),
headers={
"Authorization": 'Bearer {}'.format(self._config.get('sp_access_token'))
}
)
if res.status_code == 401:
revoke_user_token()
return add_tracks_to_playlist(tracks, id, level + 1)
@staticmethod
def clean(clean_sting) -> str:
# Remove "()"
clean_sting = re.sub(r'\([^)]*\)', '', clean_sting)
# Remove "[]"
clean_sting = re.sub(r'\[[^)]*\]', '', clean_sting)
# Remove "feat."
clean_sting = re.sub(r'(?i)(\s*)f(?:ea)?t(?:(?:\.?|\s)|uring)(?=\s).*$', '', clean_sting)
# Remove date
clean_sting = re.sub(r'(0[1-9]|[12][0-9]|3[01])[- /.](0[1-9]|1[012])[- /.](19|20)\d\d', '', clean_sting)
# Remove numbers
if re.match(r'\s*[^0-9]+\s*', clean_sting):
clean_sting = re.sub(r'[0-9]+', '', clean_sting)
# Remove other garbage
tokenizer = RegexpTokenizer(r'\w+')
return " ".join(tokenizer.tokenize(clean_sting)) | 9,098 | 3,068 |
import math
# TODO: Implement acceptibility tests
class Appendix13_7_cParams:
def __init__(
self,
internal_pressure,
corner_radius,
short_side_half_length,
long_side_half_length,
thickness,
eval_at_outer_walls = False):
self.P = internal_pressure
self.R = corner_radius
self.L_1 = short_side_half_length
self.L_2 = long_side_half_length
self.t_1 = thickness
self.eval_at_outer_walls = eval_at_outer_walls
class Appendix13_7_cCalcs:
def __init__(self, params: Appendix13_7_cParams):
self.P = params.P
self.R = params.R
self.L_1 = params.L_1
self.L_2 = params.L_2
self.t_1 = params.t_1
self.isOuterWallEval = params.eval_at_outer_walls
def c(self):
"""
:return: The distance from the neutral axis of cross section to extreme fibers. Will return c_i or c_o for its thickness, depending on pressure
"""
sign = 1
if self.isOuterWallEval:
sign = -1
return 0.5 * sign * self.t_1
def I_1(self):
return (1 / 12.0) * self.t_1 ** 3
def alpha3(self):
return self.L_2 / self.L_1
def phi(self):
return self.R / self.L_1
def K_3(self):
"""
:return: Equation 40
"""
return (-1.0) * (self.L_1 ** 2) * (
6.0 * (self.phi() ** 2) * self.alpha3()
- 3.0 * math.pi * (self.phi() ** 2)
+ 6.0 * (self.phi() ** 2)
+ (self.alpha3() ** 3)
+ (3.0 * self.alpha3() ** 2)
- 6.0 * self.phi()
- 2.0
+ 1.5 * math.pi * self.phi() * (self.alpha3() ** 2)
+ 6.0 * self.phi() * self.alpha3()
) / (3.0 * (2.0 * self.alpha3() + math.pi * self.phi() + 2.0))
def M_A(self):
"""
:return: Equation 38
"""
return self.P * self.K_3()
def M_r(self):
"""
:return: equation 39
"""
raise ValueError("Looks like it's time to implement M_r")
def S_m_C(self):
"""
:return: Short side membrane stress at point C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 25
"""
return (self.P * (self.R + self.L_2)) / self.t_1
def S_m_D(self):
"""
:return: Same as S_m_C
"""
return self.S_m_C()
def S_m_A(self):
"""
:return: Long side membrane stress at point A for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 26
"""
return (self.P *(self.L_1 + self.R)) / self.t_1
def S_m_B(self):
"""
:return: Same as S_m_A
"""
return self.S_m_A()
def S_m_BC(self):
"""
:return: Membrane stress in radius, between points B and C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 27
"""
return (self.P / self.t_1) * (math.sqrt((self.L_2 ** 2) + self.L_1 ** 2) + self.R)
def S_b_C(self):
"""
:return: Bending stress at C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 28
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * (2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_D(self):
"""
:return: Bending stress at D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 29
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * ((self.L_2 ** 2) + 2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_A(self):
"""
:return: Bending stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 30
"""
return self.M_A() * self.c() / self.I_1()
def S_b_B(self):
"""
:return: Bending stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 31
"""
return (self.c() / (2 * self.I_1())) * (2 * self.M_A() + self.P * self.L_2 ** 2)
def S_b_BC(self):
"""
:return: Max bending stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 32
"""
maxStressTheta = math.atan(self.L_1 / self.L_2)
geom = self.c() / self.I_1()
moment = 0.5 * (2 * self.M_A() + self.P * (2 * self.R * (self.L_2 * math.cos(maxStressTheta) - self.L_1 * (1 - math.sin(maxStressTheta))) + self.L_2 ** 2))
return geom * moment
def S_T_C(self):
"""
:return: Total stress at point C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 33
"""
return self.S_m_C() + self.S_b_C()
def S_T_D(self):
"""
:return: Total stress at point D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 34
"""
return self.S_m_D() + self.S_b_D()
def S_T_A(self):
"""
:return: Total stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 35
"""
return self.S_m_A() + self.S_b_A()
def S_T_B(self):
"""
:return: Total stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 36
"""
return self.S_m_B() + self.S_b_B()
def S_T_BC(self):
"""
:return: Total stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 37
"""
return self.S_m_BC() + self.S_b_BC()
if __name__ == "__main__":
import copy
params_inner = Appendix13_7_cParams(
internal_pressure=100,
corner_radius=3,
short_side_half_length=5,
long_side_half_length=10,
thickness=1
)
calc_inner = Appendix13_7_cCalcs(params_inner)
params_outer = copy.deepcopy(params_inner)
params_outer.eval_at_outer_walls = True
calc_outer = Appendix13_7_cCalcs(params_outer)
print("*** Input ***")
print("P = " + str(params_inner.P))
print("R = " + str(params_inner.R))
print("L_1 = " + str(params_inner.L_1))
print("L_2 = " + str(params_inner.L_2))
print("t_1 = " + str(params_inner.t_1))
print("")
print("*** Output ***")
print("")
print("*** Inner Walls ***")
print("c = " + str(calc_inner.c()))
print("I_1 = " + str(calc_inner.I_1()))
print("alpha3 = " + str(calc_inner.alpha3()))
print("phi = " + str(calc_inner.phi()))
print("K_3 = " + str(calc_inner.K_3()))
print("M_A = " + str(calc_inner.M_A()))
# print("M_r = " + str(calc_inner.M_r()))
print("S_m_C = " + str(calc_inner.S_m_C()))
print("S_m_D = " + str(calc_inner.S_m_D()))
print("S_m_A = " + str(calc_inner.S_m_A()))
print("S_m_B = " + str(calc_inner.S_m_B()))
print("S_m_BC = " + str(calc_inner.S_m_BC()))
print("S_b_C = " + str(calc_inner.S_b_C()))
print("S_b_D = " + str(calc_inner.S_b_D()))
print("S_b_A = " + str(calc_inner.S_b_A()))
print("S_b_B = " + str(calc_inner.S_b_B()))
print("S_b_BC = " + str(calc_inner.S_b_BC()))
print("S_T_C = " + str(calc_inner.S_T_C()))
print("S_T_D = " + str(calc_inner.S_T_D()))
print("S_T_A = " + str(calc_inner.S_T_A()))
print("S_T_B = " + str(calc_inner.S_T_B()))
print("S_T_BC = " + str(calc_inner.S_T_BC()))
print("")
print("*** Outer Walls ***")
print("c = " + str(calc_outer.c()))
print("I_1 = " + str(calc_outer.I_1()))
print("alpha3 = " + str(calc_outer.alpha3()))
print("phi = " + str(calc_outer.phi()))
print("K_3 = " + str(calc_outer.K_3()))
print("M_A = " + str(calc_outer.M_A()))
# print("M_r = " + str(calc_outer.M_r()))
print("S_m_C = " + str(calc_outer.S_m_C()))
print("S_m_D = " + str(calc_outer.S_m_D()))
print("S_m_A = " + str(calc_outer.S_m_A()))
print("S_m_B = " + str(calc_outer.S_m_B()))
print("S_m_BC = " + str(calc_outer.S_m_BC()))
print("S_b_C = " + str(calc_outer.S_b_C()))
print("S_b_D = " + str(calc_outer.S_b_D()))
print("S_b_A = " + str(calc_outer.S_b_A()))
print("S_b_B = " + str(calc_outer.S_b_B()))
print("S_b_BC = " + str(calc_outer.S_b_BC()))
print("S_T_C = " + str(calc_outer.S_T_C()))
print("S_T_D = " + str(calc_outer.S_T_D()))
print("S_T_A = " + str(calc_outer.S_T_A()))
print("S_T_B = " + str(calc_outer.S_T_B()))
print("S_T_BC = " + str(calc_outer.S_T_BC())) | 8,616 | 3,465 |
from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "link_123"
class TestFileLink(object):
async def test_is_listable(self, request_mock):
resources = await stripe.FileLink.list()
request_mock.assert_requested("get", "/v1/file_links")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.FileLink)
async def test_is_retrievable(self, request_mock):
resource = await stripe.FileLink.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.FileLink)
async def test_is_creatable(self, request_mock):
resource = await stripe.FileLink.create(file="file_123")
request_mock.assert_requested("post", "/v1/file_links")
assert isinstance(resource, stripe.FileLink)
async def test_is_saveable(self, request_mock):
resource = await stripe.FileLink.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.FileLink.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.FileLink)
| 1,619 | 524 |
import FWCore.ParameterSet.Config as cms
l1GtBeamModeFilter = cms.EDFilter("L1GtBeamModeFilter",
# input tag for input tag for ConditionInEdm products
CondInEdmInputTag = cms.InputTag("conditionsInEdm"),
# input tag for the L1 GT EVM product
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
#
# vector of allowed beam modes
# default value: 11 (STABLE)
AllowedBeamMode = cms.vuint32(11),
# return the inverted result, to be used instead of NOT
# normal result: true if filter true
# false if filter false or error (no product found)
# inverted result: true if filter false
# false if filter true or error (no product found)
InvertResult = cms.bool( False )
)
| 762 | 245 |
def kernel_01_hydro(LEN_1D):
#! init
import numpy as np
r = np.random.randn(1)
t = np.random.randn(1)
q = np.random.randn(1)
x = np.zeros(LEN_1D)
y = np.random.randn(LEN_1D)
zx = np.random.randn(LEN_1D + 11)
#! loop
for k in range(LEN_1D):
x[k] = q + y[k] * (r * zx[k+10] + t * zx[k+11])
#! array_op
x = q + y * (r * zx[10:(LEN_1D+10)] + t * zx[11:(LEN_1D+11)])
| 421 | 230 |
import copy
def _direction():
# If array index start at 0, 0 and we say that is top left, (x, y)
yield -1, -1 # UL
yield -1, 0 # L
yield -1, 1 # UR
yield 0, -1 # U
yield 0, 1 # D
yield 1, -1 # DL
yield 1, 0 # R
yield 1, 1 # DR
# def _in_matrix(pos, seats):
# return 0 <= pos[0] < len(seats[0]) and 0 <= pos[1] < len(seats)
class Seating:
def __init__(self, file):
with open(file) as f:
# A list of char arrays.
self._seats = [list(x) for x in f.read().splitlines()]
def _valid_position(self, pos):
return 0 <= pos[0] < len(self._seats[0]) and 0 <= pos[1] < len(self._seats)
def _calc_pos(self, pos, d, ignore_floor):
n_pos = (pos[0] + d[0], pos[1] + d[1])
if ignore_floor:
while True:
if not self._valid_position(n_pos) or not self._floor(self._seats[n_pos[1]][n_pos[0]]):
break
n_pos = (n_pos[0] + d[0], n_pos[1] + d[1])
return n_pos
def _get_neighbor_seats(self, pos, ignore_floor):
ns_pos = [self._calc_pos(pos, d, ignore_floor) for d in _direction()]
ns_pos_valid = filter(self._valid_position, ns_pos)
return [self._seats[x[1]][x[0]] for x in ns_pos_valid]
@staticmethod
def _free(seat):
return seat == 'L'
@staticmethod
def _floor(seat):
return seat == '.'
@staticmethod
def _occupied(seat):
return seat == '#'
def _seat_change(self, pos, neighbors, tolerant):
curr = self._seats[pos[1]][pos[0]]
occupied_cnt = len([n for n in neighbors if self._occupied(n)])
if self._free(curr) and occupied_cnt == 0:
curr = '#'
elif self._occupied(curr):
if not tolerant:
if occupied_cnt >= 4:
curr = 'L'
else:
if occupied_cnt >= 5:
curr = 'L'
return curr
def _iterate(self, ignore_floor, tolerant):
new_seats = copy.deepcopy(self._seats)
for y, row in enumerate(self._seats):
for x, seat in enumerate(row):
neighbors = self._get_neighbor_seats((x, y), ignore_floor)
seat = self._seat_change((x, y), neighbors, tolerant)
if seat != self._seats[y][x]:
new_seats[y][x] = seat
if self._seats == new_seats:
return True
else:
self._seats = copy.deepcopy(new_seats)
return False
def iterate_until_stable(self, ignore_floor, tolerant):
while True:
if self._iterate(ignore_floor, tolerant):
break
return
def iterate_times(self, iterations, ignore_floor, tolerant):
while True:
if iterations == 0 or self._iterate(ignore_floor, tolerant):
break
iterations -= 1
return
def count_occupied(self):
cnt = 0
for r in self._seats:
for s in r:
cnt += self._occupied(s)
return cnt
def get_seats(self):
return copy.deepcopy(self._seats)
| 3,177 | 1,084 |
import os
import json
from challenge import FileReader, Product, Listing, MatchSearch
import challenge
reader = FileReader()
search = MatchSearch()
products = reader.read_products('products.txt');
listings = reader.read_listings('listings.txt');
listings = listings[0:1000]
result = search.match_listings(listings, products, debug = lambda c: print(c))
f = open('output.txt', 'w')
key_list = list(result.keys())
key_list = sorted(key_list,key=lambda s: s.lower())
for key in key_list:
f.write(json.dumps({ "product_name" : key, "listings" : result[key] }))
f.write('\n')
f.close()
print("non matches: " + str(len(search.non_matches)))
f = open('output_non_matches.txt', 'w')
for non_match in search.non_matches:
f.write(json.dumps(non_match.dict_without_tags()))
f.write('\n')
f.close()
#verify solution
to_verify_list = reader.read_json_list('correct_partial_solution.txt')
products_expected = []
for item in to_verify_list:
products_expected.append(item['product_name'])
expected_missing = []
for correct in products_expected:
if correct not in key_list:
expected_missing.append(correct)
print("expected to be on output:")
for error in expected_missing:
print(error)
non_expected_list = []
for o in key_list:
if o not in products_expected:
non_expected_list.append(o)
print("Non expected to be on output:")
for error in non_expected_list:
print(error)
| 1,408 | 481 |
SEX_ID_TO_NAME = {
1: "male",
2: "female",
3: "both",
}
SEX_NAME_TO_ID = {v: k for (k, v) in SEX_ID_TO_NAME.items()}
| 130 | 70 |
import paramiko
from django.conf import settings
remotepath = settings.SQUID_LOGDIR_REMOTE
remotepath_messages = settings.PPTP_LOGDIR_REMOTE
username = settings.SQUID_USERNAME
password = settings.SQUID_PASSWORD
# local path for both log types and programs
localpath = settings.SQUID_LOGDIR
log_filename = settings.LOG_FILENAME
def download_logs_sftp():
"""
:return:
"""
# download squid access.log
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect('10.87.250.12', username=username, password=password)
stdin, stdout, stderr = client.exec_command('cd {} && ls'.format(remotepath))
sftp = client.open_sftp()
for line in stdout:
for logfile in log_filename:
if logfile in line:
remote = remotepath + line.rstrip()
local = localpath + line.rstrip()
sftp.get(remote, local)
# download poptop messages.log
stdin, stdout, stderr = client.exec_command('cd {} && ls'.format(remotepath_messages))
sftp = client.open_sftp()
for line in stdout:
for logfile in log_filename:
if logfile in line:
remote = remotepath_messages + line.rstrip()
local = localpath + line.rstrip()
sftp.get(remote, local)
sftp.close()
client.close() | 1,431 | 463 |
import sys
import datetime
# Sample program to be initiated by the Simio Step RunExecutable with "Python" ArgumentLogic.
# This runs python scripts with argument convention of: 1st arg is the script name, followed
# by arguments. All args are surrounded with a double-quote.
# The script append-prints the arguments it finds and redirects to a file.
def logit( message ):
dt = datetime.datetime.now()
print(dt.strftime("[%H:%M:%S.%f] "), message)
# redirect stdout to a file
from contextlib import redirect_stdout
try:
with open('c:\\test\\testRunExecutable\PythonScriptTakingArgumentsOutput.txt', 'a') as f:
with redirect_stdout(f):
logit('Name of the script: ' + sys.argv[0])
numArgs = len(sys.argv)
logit('Number of arguments: ' + str(numArgs))
for arg in range(0,numArgs):
logit("Arg[" + str(arg) + "]=" + sys.argv[arg] )
logit('The list of arguments: ' + str(sys.argv))
except:
e = sys.exc_info()[0]
print("Error= %s" % e)
| 1,057 | 325 |
# Copyright 2021 Drexel University
# Author: Geoffrey Mainland <mainland@drexel.edu>
try:
from _dragonradio.net import *
except:
pass
| 139 | 54 |
import logging
from contextlib import suppress
from math import fabs
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery, Message, ReplyKeyboardRemove
from aiogram.utils.exceptions import (MessageToDeleteNotFound,
MessageToEditNotFound)
from app.__main__ import bot
from ..database.base import Item, Shop, User
from ..handlers.user_handlers import user_inventory
from ..helpers.dev_text import gear_info_text
from ..helpers.keyboards import (CONFIRM_Kb, CRAFT_Kb, EQUIPMENT_Kb, IDLE_Kb,
UNDRESS_Kb)
from ..utils.states import MainStates
async def gear_info_check(m: Message):
try:
gear = await Item.get(int(m.text[1:]))
if gear:
await m.answer(text=gear_info_text(gear))
else:
with suppress(MessageToDeleteNotFound):
await m.delete()
await m.answer('❗ Такого предмета не существует')
except ValueError:
return
async def gear_equip(c: CallbackQuery, user: User):
if c.data[6:] == 'back':
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await user_inventory(c.message, user)
else:
gear = await Item.get(int(c.data[6:]))
if gear.id in user.inventory:
if getattr(user, gear.item_class) is None:
user.inventory.remove(gear.id)
await user.update(inventory=user.inventory, defence=user.defence + gear.defence_boost,
max_defence=user.max_defence + gear.defence_boost,
damage=user.damage + gear.attack_boost).apply()
await user.update(weapon=gear.id).apply() if gear.item_class == 'weapon' else await user.update(armor=gear.id).apply()
await c.message.delete()
await c.message.answer(text="❕ Вы надели экипировку", reply_markup=IDLE_Kb())
else:
await c.message.delete()
await c.message.answer(text="❗ Сначала снимите экипировку", reply_markup=EQUIPMENT_Kb())
else:
await c.message.delete()
await c.message.answer(text="❗ У вас нету такого предмета", reply_markup=IDLE_Kb())
async def gear_unequip(m: Message, user: User):
if (user.weapon or user.armor) != None:
eq = [user.weapon, user.armor]
data = []
for i in range(len(eq)):
if eq[i] != None:
gear = await Item.get(eq[i])
data.extend([gear.name, gear.id])
else:
data.extend(['- Пусто -', 'empty'])
await m.answer('❔ Выбери какую экипировку снимать:',
reply_markup=UNDRESS_Kb(data))
else:
await m.answer('❗ У тебя нету экипировки', reply_markup=IDLE_Kb())
async def gear_unequip_query(c: CallbackQuery, user: User):
gear = await Item.get(int(c.data[8:]))
# user.weapon => Common Sword (example)
if gear:
user.inventory.append(gear.id)
await user.update(defence=user.defence - gear.defence_boost if user.defence - gear.defence_boost >= 0 else 0,
max_defence=user.max_defence - gear.defence_boost,
damage=user.damage - gear.attack_boost, inventory=user.inventory).apply()
await user.update(weapon=None).apply() if gear.item_class == 'weapon' else await user.update(armor=None).apply()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer(f"❕ Вы сняли \"{gear.name}\"", reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❗ У тебя нету экипировки', reply_markup=IDLE_Kb())
async def gear_craft(m: Message, user: User):
raw = []
if user.inventory:
inv = dict((x, int(user.inventory.count(x) / 2)) for x in set(user.inventory) if user.inventory.count(x) != 1)
if inv:
for x, y in inv.items():
raw_items = await Item.get(int(x))
if raw_items:
for _ in range(y):
raw.append(raw_items)
print(inv, '|', raw_items, '|', raw)
await m.answer(text='🧳❕ Выберите какую пару предметов крафтить:', reply_markup=CRAFT_Kb(raw))
else:
await m.answer(text='❗ У вас нету подходящих предметов', reply_markup=IDLE_Kb())
else:
await m.answer(text='❗ Инвентарь пуст', reply_markup=IDLE_Kb())
async def gear_craft_query(c: CallbackQuery, user: User):
curr_gear = await Item.get(int(c.data[6:]))
if curr_gear:
for _ in range(2):
if curr_gear.id in user.inventory:
user.inventory.remove(curr_gear.id)
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❕ В вашем инвентаре больше нету такого предмета', reply_markup=IDLE_Kb())
return
craft_result = await Item.get(curr_gear.id + 1)
if curr_gear.item_class == craft_result.item_class:
user.inventory.append(craft_result.id)
await user.update(inventory=user.inventory).apply()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer(
text=f"❕ Вы успешно скрафтили предмет:\n\n{gear_info_text(craft_result)}",
reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❗ Предметы уже максимального качества', reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('<b>Error:</b> Broken item (Свяжитесь с администрацией)', reply_markup=IDLE_Kb())
raise NameError("Broken item")
async def gear_sell_confirm(c: CallbackQuery, user: User):
await c.message.edit_text(f'💸 <b>Продажа предмета.</b>\n\n<i> - Продажа предмета осуществляется между игроками, без участия администрации. Советуем ставить разумную цену\n\n'
f' - Продавая предмет вы не получите прибыль <u>моментально</u>! Вы лишь регистрируете его \"в очередь\" где другие пользователи могут купить его. </i>',
reply_markup=CONFIRM_Kb(text=('💸 Продолжить', '🔚 Отменить'), callback=f'sell_register_{c.data[5:]}'))
async def gear_sell_register(c: CallbackQuery, user: User, state: FSMContext):
item = await Item.get(int(c.data[14:]))
if item:
await MainStates.selling.set()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
trash = await c.message.answer('❔ <b>Как зарегистрировать предмет:</b>\n\n<i> - На данном этапе всё просто ведь Башня делает почти всё за вас, '
'вам же нужно отправить боту <u>стоимость</u> предмета</i>. \n\nПример: '
'\"999\"', reply_markup=ReplyKeyboardRemove())
async with state.proxy() as data:
data['sell_item'] = item
data['trash'] = trash
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('<b>Error:</b> Broken item (Свяжитесь с администрацией)', reply_markup=IDLE_Kb())
raise NameError("Broken item")
async def gear_sell_registered(m: Message, user: User, state: FSMContext):
async with state.proxy() as data:
item = data['sell_item']
trash = data['trash']
try:
request = await Shop.create(item_id=item.id, item=item.name, rank=item.rank, price=int(fabs(int(m.text))), user_id=user.id)
# removing from the inventory
user.inventory.remove(request.item_id)
await m.delete()
with suppress(MessageToDeleteNotFound):
await trash.delete()
await m.answer(text=f'❕ Лот №{request.id} на продажу создан:\n\n{request.item}: /{request.item_id}\n'
f'🏆 Ранг предмета: {request.rank}\n💸 Цена: {request.price}', reply_markup=IDLE_Kb())
await user.update(inventory=user.inventory).apply()
except (ValueError):
await m.delete()
with suppress(MessageToDeleteNotFound):
await trash.delete()
await m.answer(text='❗️ Вы не ввели число.', reply_markup=IDLE_Kb())
finally:
await state.reset_data()
await state.reset_state()
| 8,712 | 2,854 |
'''from pygame import mixer
mixer.init()
mixer.music.load('ex021.mp3')
mixer.music.play()
input('Agora dá para escutar')'''
# Pode ser feito assim também:
import playsound
playsound.playsound('ex021.mp3')
| 206 | 81 |
"""
project.conf
Configuration module holding all the options
"""
DEBUG = True
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
MONGO_DBNAME = os.environ.get("MONGOHQ_URL") or "mongodb://localhost:27017/shakuni"
THREADS_PER_PAGE = 2
CSRF_ENABLED = True
CSRF_SESSION_KEY = "secret"
SECRET_KEY = "secret"
STATIC_FOLDER = 'app/static'
TEMPLATES_FOLDER = 'app/templates'
FACEBOOK_APP_ID = os.environ.get("FACEBOOK_APP_ID") or '672966529447612'
FACEBOOK_APP_SECRET = os.environ.get("FACEBOOK_APP_SECRET") or '8e4a083bb66fc0e81d18e3acbd3b52aa'
# supported currencies
CURRENCIES = (
('INR', 'Indian Rupee'),
('USD', 'US Dollar'),
('GBP', 'Pound'),
('EUR', 'Euro'),
)
| 701 | 325 |
# -*- coding: utf-8 -*-
__about__ = """
This project comes with the bare minimum set of applications and templates
to get you started. It includes no extra tabs, only the profile and notices
tabs are included by default. From here you can add any extra functionality
and applications that you would like.
"""
| 310 | 78 |
# -*- coding: utf-8 -*-
from base import BaseHandler, LanguageHandler, NullHandler
from text import SingleSentenceHandler
__all__ = [
"LanguageHandler",
"NullHandler",
"SingleSentenceHandler",
]
| 209 | 66 |
import os
import sys
import numpy as np
import pandas as pd
import logging
import gc
import tqdm
import pickle
import json
import time
import tempfile
from gensim.models import Word2Vec
cwd = os.getcwd()
embed_path = os.path.join(cwd, 'embed_artifact')
# Training corpus for w2v model
corpus_dic = {
'creative': os.path.join(embed_path, 'embed_train_creative_id_seq.pkl'),
'ad': os.path.join(embed_path, 'embed_train_ad_id_seq.pkl'),
'advertiser': os.path.join(embed_path, 'embed_train_advertiser_id_seq.pkl'),
'product': os.path.join(embed_path, 'embed_train_product_id_seq.pkl')
}
def initiate_logger(log_path):
"""
Initialize a logger with file handler and stream handler
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-s: %(message)s', datefmt='%H:%M:%S')
fh = logging.FileHandler(log_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info('===================================')
logger.info('Begin executing at {}'.format(time.ctime()))
logger.info('===================================')
return logger
def train(target, embed_size, logger=None):
"""
Train a Word2Vec Model and save the model artifact
"""
global corpus_dic, embed_path
assert target in corpus_dic
start = time.time()
with open(corpus_dic[target], 'rb') as f:
corpus = pickle.load(f)
if logger: logger.info('{} corpus is loaded after {:.2f}s'.format(target.capitalize(), time.time()-start))
model = Word2Vec(sentences=corpus, size=embed_size, window=175, sg=1, hs=1, min_count=1, workers=16)
if logger: logger.info('{} w2v training is done after {:.2f}s'.format(target.capitalize(), time.time()-start))
save_path = os.path.join(embed_path, '{}_sg_embed_s{}_'.format(target, embed_size))
with tempfile.NamedTemporaryFile(prefix=save_path, delete=False) as tmp:
tmp_file_path = tmp.name
model.save(tmp_file_path)
if logger: logger.info('{} w2v model is saved to {} after {:.2f}s'.format(target.capitalize(), tmp_file_path, time.time()-start))
return tmp_file_path
if __name__=='__main__':
assert len(sys.argv)==3
target, embed_size = sys.argv[1], int(sys.argv[2])
# Set up w2v model registry
registry_path = os.path.join(embed_path, 'w2v_registry.json')
if os.path.isfile(registry_path):
with open(registry_path, 'r') as f:
w2v_registry = json.load(f)
else:
w2v_registry = {}
logger = initiate_logger('train_w2v.log')
# Train w2v model if there hasn't been one registered
if target not in w2v_registry:
w2v_path = train(target, embed_size, logger=logger)
w2v_registry[target] = w2v_path
else:
logger.info('{} w2v model found, skip'.format(target.capitalize()))
# Save w2v model registry
with open(registry_path, 'w') as f:
json.dump(w2v_registry, f)
| 2,934 | 1,121 |
import os
import gc
import glob
import time
import random
import imageio
import logging
from functools import wraps
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision.utils as torch_utils
from postprocess import SegDetectorRepresenter
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'
def setup_determinism(seed=42):
"""
https://github.com/pytorch/pytorch/issues/7068#issuecomment-487907668
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def setup_logger(logger_name='dbtext', log_file_path=None):
logging._warn_preinit_stderr = 0
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s: %(message)s')
if log_file_path is not None:
file_handle = logging.FileHandler(log_file_path)
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
logger.setLevel(logging.DEBUG)
return logger
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(">>> Function {}: {}'s".format(func.__name__, end - start))
return result
return wrapper
def to_device(batch, device='cuda'):
new_batch = []
for ele in batch:
if isinstance(ele, torch.Tensor):
new_batch.append(ele.to(device))
else:
new_batch.append(ele)
return new_batch
def dict_to_device(batch, device='cuda'):
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(device)
return batch
def to_list_tuples_coords(anns):
new_anns = []
for ann in anns:
points = []
for x, y in ann:
points.append((x[0].tolist(), y[0].tolist()))
new_anns.append(points)
return new_anns
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def str_to_bool(value):
if value.lower() in {'False', 'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'True', 'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
def minmax_scaler_img(img):
img = ((img - img.min()) * (1 / (img.max() - img.min()) * 255)).astype(
'uint8') # noqa
return img
def visualize_tfb(tfb_writer,
imgs,
preds,
global_steps,
thresh=0.5,
mode="TRAIN"):
# origin img
# imgs.shape = (batch_size, 3, image_size, image_size)
imgs = torch.stack([
torch.Tensor(
minmax_scaler_img(img_.to('cpu').numpy().transpose((1, 2, 0))))
for img_ in imgs
])
imgs = torch.Tensor(imgs.numpy().transpose((0, 3, 1, 2)))
imgs_grid = torch_utils.make_grid(imgs)
imgs_grid = torch.unsqueeze(imgs_grid, 0)
# imgs_grid.shape = (3, image_size, image_size * batch_size)
tfb_writer.add_images('{}/origin_imgs'.format(mode), imgs_grid,
global_steps)
# pred_prob_map / pred_thresh_map
pred_prob_map = preds[:, 0, :, :]
pred_thred_map = preds[:, 1, :, :]
pred_prob_map[pred_prob_map <= thresh] = 0
pred_prob_map[pred_prob_map > thresh] = 1
# make grid
pred_prob_map = pred_prob_map.unsqueeze(1)
pred_thred_map = pred_thred_map.unsqueeze(1)
probs_grid = torch_utils.make_grid(pred_prob_map, padding=0)
probs_grid = torch.unsqueeze(probs_grid, 0)
probs_grid = probs_grid.detach().to('cpu')
thres_grid = torch_utils.make_grid(pred_thred_map, padding=0)
thres_grid = torch.unsqueeze(thres_grid, 0)
thres_grid = thres_grid.detach().to('cpu')
tfb_writer.add_images('{}/prob_imgs'.format(mode), probs_grid,
global_steps)
tfb_writer.add_images('{}/thres_imgs'.format(mode), thres_grid,
global_steps)
def test_resize(img, size=640, pad=False):
h, w, c = img.shape
scale_w = size / w
scale_h = size / h
scale = min(scale_w, scale_h)
h = int(h * scale)
w = int(w * scale)
new_img = None
if pad:
new_img = np.zeros((size, size, c), img.dtype)
new_img[:h, :w] = cv2.resize(img, (w, h))
else:
new_img = cv2.resize(img, (w, h))
return new_img
def read_img(img_fp):
img = cv2.imread(img_fp)[:, :, ::-1]
h_origin, w_origin, _ = img.shape
return img, h_origin, w_origin
def test_preprocess(img,
mean=[103.939, 116.779, 123.68],
to_tensor=True,
pad=False):
img = test_resize(img, size=640, pad=pad)
img = img.astype(np.float32)
img[..., 0] -= mean[0]
img[..., 1] -= mean[1]
img[..., 2] -= mean[2]
img = np.expand_dims(img, axis=0)
if to_tensor:
img = torch.Tensor(img.transpose(0, 3, 1, 2))
return img
def draw_bbox(img, result, color=(255, 0, 0), thickness=3):
"""
:input: RGB img
"""
if isinstance(img, str):
img = cv2.imread(img)
img = img.copy()
for point in result:
point = point.astype(int)
cv2.polylines(img, [point], True, color, thickness)
return img
def visualize_heatmap(args, img_fn, tmp_img, tmp_pred):
pred_prob = tmp_pred[0]
pred_prob[pred_prob <= args.prob_thred] = 0
pred_prob[pred_prob > args.prob_thred] = 1
np_img = minmax_scaler_img(tmp_img[0].to(device).numpy().transpose(
(1, 2, 0)))
plt.imshow(np_img)
plt.imshow(pred_prob, cmap='jet', alpha=args.alpha)
img_fn = "heatmap_result_{}".format(img_fn)
plt.savefig(os.path.join(args.save_dir, img_fn),
dpi=200,
bbox_inches='tight')
gc.collect()
def visualize_polygon(args, img_fn, origin_info, batch, preds, vis_char=False):
img_origin, h_origin, w_origin = origin_info
seg_obj = SegDetectorRepresenter(thresh=args.thresh,
box_thresh=args.box_thresh,
unclip_ratio=args.unclip_ratio)
box_list, score_list = seg_obj(batch,
preds,
is_output_polygon=args.is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if args.is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
tmp_img = draw_bbox(img_origin, np.array(box_list))
tmp_pred = cv2.resize(preds[0, 0, :, :].cpu().numpy(),
(w_origin, h_origin))
# https://stackoverflow.com/questions/42262198
h_, w_ = 32, 100
if not args.is_output_polygon and vis_char:
char_img_fps = glob.glob(os.path.join("./tmp/reconized", "*"))
for char_img_fp in char_img_fps:
os.remove(char_img_fp)
for index, (box_list_,
score_list_) in enumerate(zip(box_list,
score_list)): # noqa
src_pts = np.array(box_list_.tolist(), dtype=np.float32)
dst_pts = np.array([[0, 0], [w_, 0], [w_, h_], [0, h_]],
dtype=np.float32)
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warp = cv2.warpPerspective(img_origin, M, (w_, h_))
imageio.imwrite("./tmp/reconized/word_{}.jpg".format(index), warp)
plt.imshow(tmp_img)
plt.imshow(tmp_pred, cmap='inferno', alpha=args.alpha)
if args.is_output_polygon:
img_fn = "poly_result_{}".format(img_fn)
else:
img_fn = "rect_result_{}".format(img_fn)
plt.savefig(os.path.join(args.save_dir, img_fn),
dpi=200,
bbox_inches='tight')
gc.collect()
| 8,632 | 3,239 |
# -*- coding: utf-8 -*-
from random import random
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.views.generic import TemplateView
from uncharted.chart import *
class Area100PercentStacked(TemplateView):
template_name = 'area/chart.html'
chartData = [
{
'year': 2000,
'cars': 1587,
'motorcycles': 650,
'bicycles': 121
}, {
'year': 1995,
'cars': 1567,
'motorcycles': 683,
'bicycles': 146
}, {
'year': 1996,
'cars': 1617,
'motorcycles': 691,
'bicycles': 138
}, {
'year': 1997,
'cars': 1630,
'motorcycles': 642,
'bicycles': 127
}, {
'year': 1998,
'cars': 1660,
'motorcycles': 699,
'bicycles': 105
}, {
'year': 1999,
'cars': 1683,
'motorcycles': 721,
'bicycles': 109
}, {
'year': 2000,
'cars': 1691,
'motorcycles': 737,
'bicycles': 112
}, {
'year': 2001,
'cars': 1298,
'motorcycles': 680,
'bicycles': 101
}, {
'year': 2002,
'cars': 1275,
'motorcycles': 664,
'bicycles': 97
}, {
'year': 2003,
'cars': 1246,
'motorcycles': 648,
'bicycles': 93
}, {
'year': 2004,
'cars': 1218,
'motorcycles': 637,
'bicycles': 101
}, {
'year': 2005,
'cars': 1213,
'motorcycles': 633,
'bicycles': 87
}, {
'year': 2006,
'cars': 1199,
'motorcycles': 621,
'bicycles': 79
}, {
'year': 2007,
'cars': 1110,
'motorcycles': 210,
'bicycles': 81
}, {
'year': 2008,
'cars': 1165,
'motorcycles': 232,
'bicycles': 75
}, {
'year': 2009,
'cars': 1145,
'motorcycles': 219,
'bicycles': 88
}, {
'year': 2010,
'cars': 1163,
'motorcycles': 201,
'bicycles': 82
}, {
'year': 2011,
'cars': 1180,
'motorcycles': 285,
'bicycles': 87
}, {
'year': 2012,
'cars': 1159,
'motorcycles': 277,
'bicycles': 71
}]
def get_context_data(self, *args, **kwargs):
context = super(Area100PercentStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addTitle("Traffic incidents per year", 15)
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(title="percent", stackType="100%", gridAlpha=0.07)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(align="center")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
area100PercentStacked = Area100PercentStacked.as_view()
class AreaStacked(Area100PercentStacked):
def get_context_data(self, *args, **kwargs):
context = super(AreaStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginTop=10,
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(
title="Traffic incidents",
stackType="regular", # this line makes the chart "stacked"
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GUIDES are vertical (can also be horizontal) lines (or areas) marking some event.
# first guide
guide1 = amGuide(
category="2001",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="fines for speeding increased",
)
chart.categoryAxis.addGuide(guide1);
# second guide
guide2 = amGuide(
category="2007",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="motorcycle maintenance fee introduced",
)
chart.categoryAxis.addGuide(guide2);
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6, # setting fillAlphas to > 0 value makes it area graph
hidden=True,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(position="top")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
areaStacked = AreaStacked.as_view()
class AreaWithTimeBasedData(Area100PercentStacked):
@property
def chartData(self):
output = []
d = timezone.now() - timedelta(minutes=1000)
for i in xrange(0, 1000):
d = d + timedelta(minutes=1)
value = int((random() * 40) + 10)
output.append({
'date': d,#.isoformat(),
'visits': value,
})
return output
def get_context_data(self, *args, **kwargs):
context = super(AreaWithTimeBasedData, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginRight=30,
dataProvider=self.chartData,
categoryField="date",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addListener("dataUpdated", "zoomChart");
# AXES
# Category
chart.categoryAxis.parseDates = True
chart.categoryAxis.minPeriod = "mm"
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
# Value
valueAxis = amValueAxis(
title="Unique visitors",
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="red line",
valueField="visits",
lineAlpha=1,
lineColor="#d1cf2a",
fillAlphas=0.3, # setting fillAlphas to > 0 value makes it area graph
)
chart.addGraph(graph)
# CURSOR
chartCursor = amChartCursor(
cursorPosition="mouse",
categoryBalloonDateFormat="JJ:NN, DD MMMM",
)
chart.addChartCursor(chartCursor)
# SCROLLBAR
chartScrollbar = amChartScrollbar()
chart.addChartScrollbar(chartScrollbar)
context['chart'] = chart
return context
areaWithTimeBasedData = AreaWithTimeBasedData.as_view()
class Bar3D(TemplateView):
template_name = 'bar/chart.html'
chartData = [
{
'year': 2005,
'income': 23.5
}, {
'year': 2006,
'income': 26.2
}, {
'year': 2007,
'income': 30.1
}, {
'year': 2008,
'income': 29.5
}, {
'year': 2009,
'income': 24.6
}]
def get_context_data(self, *args, **kwargs):
context = super(Bar3D, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
rotate=True,
depth3D=20,
angle=30,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.fillAlpha = 1
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.fillColor = "#FAFAFA"
# Value
valueAxis = amValueAxis(title="Income in millions, USD", axisColor="#DADADA", gridAlpha=0.1)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income in [[category]]:[[value]]",
lineAlpha=0,
fillColors=["#bf1c25"],
fillAlphas=1,
)
chart.addGraph(graph)
context['chart'] = chart
return context
bar3D = Bar3D.as_view()
class BarAndLineMix(Bar3D):
chartData = [
{
'year': 2005,
'income': 23.5,
'expenses': 18.1
}, {
'year': 2006,
'income': 26.2,
'expenses': 22.8
}, {
'year': 2007,
'income': 30.1,
'expenses': 23.9
}, {
'year': 2008,
'income': 29.5,
'expenses': 25.1
}, {
'year': 2009,
'income': 24.6,
'expenses': 25.0
}]
def get_context_data(self, *args, **kwargs):
context = super(BarAndLineMix, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.dashLength = 5
# Value
valueAxis = amValueAxis(
title="Million USD",
dashLength=5,
axisAlpha=0.2,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# column graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# line graph
graph2 = amGraph(
type="line",
title="Expenses",
valueField="expenses",
lineThickness=2,
bullet="round",
fillAlphas=0,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barAndLineMix = BarAndLineMix.as_view()
class BarClustered(BarAndLineMix):
def get_context_data(self, *args, **kwargs):
context = super(BarClustered, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
plotAreaBorderColor="#DADADA",
plotAreaBorderAlpha=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income:[[value]]",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="Expenses",
valueField="expenses",
balloonText="Expenses:[[value]]",
lineAlpha=0,
fillColors=["#81acd9"],
fillAlphas=1,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barClustered = BarClustered.as_view()
class BarFloating(BarClustered):
template_name = 'area/chart.html'
chartData = [
{
'name': "John",
'startTime': 8,
'endTime': 11,
'color': "#FF0F00"
}, {
'name': "Joe",
'startTime': 10,
'endTime': 13,
'color': "#FF9E01"
}, {
'name': "Susan",
'startTime': 11,
'endTime': 18,
'color': "#F8FF01"
}, {
'name': "Eaton",
'startTime': 15,
'endTime': 19,
'color': "#04D215"
}]
def get_context_data(self, *args, **kwargs):
context = super(BarFloating, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="name",
startDuration=1,
columnWidth=0.9,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
unit=":00",
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph1 = amGraph(
type="column",
valueField="endTime",
openField="startTime",
balloonText="Income:[[value]]",
lineAlpha=0,
colorField="color",
fillAlphas=0.8,
)
chart.addGraph(graph1)
context['chart'] = chart
return context
barFloating = BarFloating.as_view()
class BarStacked(BarFloating):
template_name = 'bar/3d.html'
chartData = [
{
'year': "2003",
'europe': 2.5,
'namerica': 2.5,
'asia': 2.1,
'lamerica': 0.3,
'meast': 0.2,
'africa': 0.1
}, {
'year': "2004",
'europe': 2.6,
'namerica': 2.7,
'asia': 2.2,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}, {
'year': "2005",
'europe': 2.8,
'namerica': 2.9,
'asia': 2.4,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}]
def get_context_data(self, *args, **kwargs):
context = super(BarStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
plotAreaBorderAlpha=0.2,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
stackType="regular",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Europe",
labelText="[[value]]",
valueField="europe",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="North America",
labelText="[[value]]",
valueField="namerica",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
type="column",
title="Asia-Pacific",
labelText="[[value]]",
valueField="asia",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# forth graph
graph4 = amGraph(
type="column",
title="Latin America",
labelText="[[value]]",
valueField="lamerica",
lineAlpha=0,
fillAlphas=1,
lineColor="#69A55C",
)
chart.addGraph(graph4)
# fifth graph
graph5 = amGraph(
type="column",
title="Middle-East",
labelText="[[value]]",
valueField="meast",
lineAlpha=0,
fillAlphas=1,
lineColor="#B5B8D3",
)
chart.addGraph(graph5)
# sixth graph
graph6 = amGraph(
type="column",
title="Africa",
labelText="[[value]]",
valueField="africa",
lineAlpha=0,
fillAlphas=1,
lineColor="#F4E23B",
)
chart.addGraph(graph6)
# LEGEND
legend = amLegend()
legend.position = "right"
legend.borderAlpha = 0.3
legend.horizontalGap = 10
legend.switchType = "v"
chart.addLegend(legend)
context['chart'] = chart
return context
barStacked = BarStacked.as_view()
class BarWithBackgroundImage(BarStacked):
template_name = 'bar/bg.html'
chartData = [
{
'country': "Czech Republic",
'litres': 156.90,
'short': "CZ"
}, {
'country': "Ireland",
'litres': 131.10,
'short': "IR"
}, {
'country': "Germany",
'litres': 115.80,
'short': "DE"
}, {
'country': "Australia",
'litres': 109.90,
'short': "AU"
}, {
'country': "Austria",
'litres': 108.30,
'short': "AT"
}, {
'country': "UK",
'litres': 99.00,
'short': "UK"
}, {
'country': "Belgium",
'litres': 93.00,
'short': "BE"
}]
def get_context_data(self, *args, **kwargs):
context = super(BarWithBackgroundImage, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
color="#FFFFFF",
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# this line makes the chart to show image in the background
chart.backgroundImage = "%simages/bg.jpg" % settings.STATIC_URL
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginTop = 100
chart.marginLeft = 50
chart.marginRight = 30
chart.startDuration = 2
# AXES
# Category
chart.categoryAxis.labelsEnabled = False
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0,
labelsEnabled=False,
minimum=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
valueField="litres",
lineAlpha=0,
fillAlphas=0.5,
# you can pass any number of colors in array to create more fancy gradients
fillColors=["#000000", "#FF6600"],
gradientOrientation="horizontal",
labelPosition="bottom",
labelText="[[category]]: [[value]] Litres",
balloonText="[[category]]: [[value]] Litres",
)
chart.addGraph(graph)
# LABEL
chart.addLabel(50, 40, "Beer Consumption by country", "left", 15, "#000000", 0, 1, True);
context['chart'] = chart
return context
barWithBackgroundImage = BarWithBackgroundImage.as_view()
class Column100PercentStacked(TemplateView):
template_name = 'column/stacked.html'
chartData = [
{
"year": "2003",
"europe": 2.5,
"namerica": 2.5,
"asia": 2.1,
"lamerica": 0.3,
"meast": 0.2,
"africa": 0.1
}, {
"year": "2004",
"europe": 2.6,
"namerica": 2.7,
"asia": 2.2,
"lamerica": 0.3,
"meast": 0.3,
"africa": 0.1
}, {
"year": "2005",
"europe": 2.8,
"namerica": 2.9,
"asia": 2.4,
"lamerica": 0.3,
"meast": 0.3,
"africa": 0.1
}]
def get_context_data(self, *args, **kwargs):
context = super(Column100PercentStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginLeft = 0
chart.marginRight = 0
chart.marginTop = 30
chart.marginBottom = 40
# AXES
# Category
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
stackType="100%", # this line makes the chart 100% stacked
gridAlpha=0,
axisAlpha=0,
labelsEnabled=False,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="Europe",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="europe",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="North America",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="namerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
title="Asia-Pacific",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="asia",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# fourth graph
graph4 = amGraph(
title="Latin America",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="lamerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#69A55C",
)
chart.addGraph(graph4)
# fifth graph
graph5 = amGraph(
title="Middle-East",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="meast",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B5B8D3",
)
chart.addGraph(graph5)
# sixth graph
graph6 = amGraph(
title="Africa",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="africa",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#F4E23B",
)
chart.addGraph(graph6)
# LEGEND
legend = amLegend(
borderAlpha=0.2,
horizontalGap=10,
autoMargins=False,
marginLeft=30,
marginRight=30,
switchType="v",
)
chart.addLegend(legend)
context['chart'] = chart
return context
column100PercentStacked = Column100PercentStacked.as_view()
class Column3D(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"country": "USA",
"visits": 4025,
"color": "#FF0F00"
}, {
"country": "China",
"visits": 1882,
"color": "#FF6600"
}, {
"country": "Japan",
"visits": 1809,
"color": "#FF9E01"
}, {
"country": "Germany",
"visits": 1322,
"color": "#FCD202"
}, {
"country": "UK",
"visits": 1122,
"color": "#F8FF01"
}, {
"country": "France",
"visits": 1114,
"color": "#B0DE09"
}, {
"country": "India",
"visits": 984,
"color": "#04D215"
}, {
"country": "Spain",
"visits": 711,
"color": "#0D8ECF"
}, {
"country": "Netherlands",
"visits": 665,
"color": "#0D52D1"
}, {
"country": "Russia",
"visits": 580,
"color": "#2A0CD0"
}, {
"country": "South Korea",
"visits": 443,
"color": "#8A0CCF"
}, {
"country": "Canada",
"visits": 441,
"color": "#CD0D74"
}, {
"country": "Brazil",
"visits": 395,
"color": "#754DEB"
}, {
"country": "Italy",
"visits": 386,
"color": "#DDDDDD"
}, {
"country": "Australia",
"visits": 384,
"color": "#999999"
}, {
"country": "Taiwan",
"visits": 338,
"color": "#333333"
}, {
"country": "Poland",
"visits": 328,
"color": "#000000"
}]
def get_context_data(self, *args, **kwargs):
context = super(Column3D, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# the following two lines makes chart 3D
chart.depth3D = 20
chart.angle = 30
# AXES
# Category
chart.categoryAxis.labelRotation = 90
chart.categoryAxis.dashLength = 5
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
dashLength=5,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
valueField="visits",
colorField="color",
lineAlpha=0,
fillAlphas=1,
balloonText="[[category]]: [[value]]",
)
chart.addGraph(graph)
context['chart'] = chart
return context
column3D = Column3D.as_view()
class Column3DStacked(Column100PercentStacked):
template_name = 'column/3d.html'
chartData = [
{
"country": "USA",
"year2004": 3.5,
"year2005": 4.2
}, {
"country": "UK",
"year2004": 1.7,
"year2005": 3.1
}, {
"country": "Canada",
"year2004": 2.8,
"year2005": 2.9
}, {
"country": "Japan",
"year2004": 2.6,
"year2005": 2.3
}, {
"country": "France",
"year2004": 1.4,
"year2005": 2.1
}, {
"country": "Brazil",
"year2004": 2.6,
"year2005": 4.9
}, {
"country": "Russia",
"year2004": 6.4,
"year2005": 7.2
}, {
"country": "India",
"year2004": 8.0,
"year2005": 7.1
}, {
"country": "China",
"year2004": 9.9,
"year2005": 10.1
}]
def get_context_data(self, *args, **kwargs):
context = super(Column3DStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
color="#FFFFFF",
startDuration=1,
plotAreaFillAlphas=0.2,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# the following two lines makes chart 3D
chart.angle = 30
chart.depth3D = 60
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.2
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridColor = "#FFFFFF"
chart.categoryAxis.axisColor = "#FFFFFF"
chart.categoryAxis.axisAlpha = 0.5
chart.categoryAxis.dashLength = 5
# Value
valueAxis = amValueAxis(
stackType="3d", # This line makes chart 3D stacked (columns are placed one behind another)
gridAlpha=0.2,
gridColor="#FFFFFF",
axisColor="#FFFFFF",
axisAlpha=0.5,
dashLength=5,
title="GDP growth rate",
titleBold=False,
unit="%",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="2004",
valueField="year2004",
type="column",
lineAlpha=0,
lineColor="#D2CB00",
fillAlphas=1,
balloonText="GDP grow in [[category]] (2004): [[value]]",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="2005",
valueField="year2005",
type="column",
lineAlpha=0,
lineColor="#BEDF66",
fillAlphas=1,
balloonText="GDP grow in [[category]] (2005): [[value]]",
)
chart.addGraph(graph2)
context['chart'] = chart
return context
column3DStacked = Column3DStacked.as_view()
class ColumnAndLineMix(Column100PercentStacked):
chartData = [
{
"year": 2005,
"income": 23.5,
"expenses": 18.1
}, {
"year": 2006,
"income": 26.2,
"expenses": 22.8
}, {
"year": 2007,
"income": 30.1,
"expenses": 23.9
}, {
"year": 2008,
"income": 29.5,
"expenses": 25.1
}, {
"year": 2009,
"income": 24.6,
"expenses": 25.0
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnAndLineMix, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
axisAlpha=0,
tickLength=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# column graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
lineAlpha=0,
fillAlphas=1,
)
chart.addGraph(graph1)
# line graph
graph2 = amGraph(
type="line",
title="Expenses",
valueField="expenses",
lineThickness=2,
bullet="round",
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
columnAndLineMix = ColumnAndLineMix.as_view()
class ColumnWithRotatedSeries(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"country": "USA",
"visits": 3025,
"color": "#FF0F00"
}, {
"country": "China",
"visits": 1882,
"color": "#FF6600"
}, {
"country": "Japan",
"visits": 1809,
"color": "#FF9E01"
}, {
"country": "Germany",
"visits": 1322,
"color": "#FCD202"
}, {
"country": "UK",
"visits": 1122,
"color": "#F8FF01"
}, {
"country": "France",
"visits": 1114,
"color": "#B0DE09"
}, {
"country": "India",
"visits": 984,
"color": "#04D215"
}, {
"country": "Spain",
"visits": 711,
"color": "#0D8ECF"
}, {
"country": "Netherlands",
"visits": 665,
"color": "#0D52D1"
}, {
"country": "Russia",
"visits": 580,
"color": "#2A0CD0"
}, {
"country": "South Korea",
"visits": 443,
"color": "#8A0CCF"
}, {
"country": "Canada",
"visits": 441,
"color": "#CD0D74"
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithRotatedSeries, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.labelRotation = 45 # this line makes category values to be rotated
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.fillAlpha = 1
chart.categoryAxis.fillColor = "#FAFAFA"
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
dashLength=5,
title="Visitors from country",
axisAlpha=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
valueField="visits",
colorField="color",
balloonText="[[category]]: [[value]]",
type="column",
lineAlpha=0,
fillAlphas=1,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithRotatedSeries = ColumnWithRotatedSeries.as_view()
class ColumnSimple(Column3D):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnSimple, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.labelRotation = 90
chart.categoryAxis.gridPosition = "start"
# Value
# in case you don"t want to change default settings of value axis,
# you don"t need to create it, as one value axis is created automatically.
# GRAPHS
graph = amGraph(
valueField="visits",
balloonText="[[category]]: [[value]]",
type="column",
lineAlpha=0,
fillAlphas=0.8,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnSimple = ColumnSimple.as_view()
class ColumnStacked(Column100PercentStacked):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
stackType="regular",
gridAlpha=0.1,
axisAlpha=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="Europe",
labelText="[[value]]",
balloonText="[[value]]",
valueField="europe",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="North America",
labelText="[[value]]",
balloonText="[[value]]",
valueField="namerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
title="Asia-Pacific",
labelText="[[value]]",
balloonText="[[value]]",
valueField="asia",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
columnStacked = ColumnStacked.as_view()
class ColumnWithGradient(BarWithBackgroundImage):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithGradient, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=2,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# change balloon text color
chart.balloon.color = "#000000"
# AXES
# Category
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.labelsEnabled = False
# Value
valueAxis = amValueAxis(
gridAlpha=0,
axisAlpha=0,
labelsEnabled=False,
minimum=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
balloonText="[[category]]: [[value]] Litres",
valueField="litres",
descriptionField="short",
type="column",
lineAlpha=0,
fillAlphas=1,
fillColors=["#ffe78e", "#bf1c25"],
labelText="[[description]]",
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithGradient = ColumnWithGradient.as_view()
class ColumnWithImagesOnTop(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"name": "John",
"points": 35654,
"color": "#7F8DA9",
"bullet": "%simages/0.gif" % settings.STATIC_URL,
}, {
"name": "Damon",
"points": 65456,
"color": "#FEC514",
"bullet": "%simages/1.gif" % settings.STATIC_URL,
}, {
"name": "Patrick",
"points": 45724,
"color": "#DB4C3C",
"bullet": "%simages/2.gif" % settings.STATIC_URL,
}, {
"name": "Mark",
"points": 13654,
"color": "#DAF0FD",
"bullet": "%simages/3.gif" % settings.STATIC_URL,
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithImagesOnTop, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="name",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginRight = 0
chart.marginLeft = 0
# AXES
# Category
chart.categoryAxis.inside = True
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.tickLength = 0
# Value
valueAxis = amValueAxis(
minimum=0,
axisAlpha=0,
gridAlpha=0,
maximum=80000,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
valueField="points",
customBulletField="bullet", # field of the bullet in data provider
bulletOffset=16, # distance from the top of the column to the bullet
colorField="color",
bulletSize=34, # bullet image should be rectangle (width = height)
type="column",
fillAlphas=0.8,
cornerRadiusTop=8,
lineAlpha=0,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithImagesOnTop = ColumnWithImagesOnTop.as_view()
| 45,021 | 14,230 |
from dotenv import load_dotenv
import os
import discord
from generator import (
GeneratorProcess,
GenerateRequest,
StopRequest,
ResponseType,
)
from multiprocessing import Pipe
import util
import asyncio
load_dotenv()
ALLOWED_CHANNELS = {"secret-channel-name", "beyond-ideas"}
TEST_SERVER_ID = 748228407472423015
MATHNEWS_SERVER_ID = 739575273443426305
ALLOWED_SERVER_IDS = {TEST_SERVER_ID, MATHNEWS_SERVER_ID}
COMMAND = "!idea"
RESP_CHECK_INTERVAL_S = 1
class IdeaBotClient(discord.Client):
def __init__(self):
super().__init__()
self.logger = util.create_logger("idea-bot")
parent_conn, child_conn = Pipe()
self.conn = parent_conn
self.generator_process = GeneratorProcess(conn=child_conn)
self.generator_process.start()
self.loop.create_task(self.check_responses())
def should_respond(self, message):
return (
message.channel.name in ALLOWED_CHANNELS
and message.guild.id in ALLOWED_SERVER_IDS
and message.content.startswith(COMMAND)
)
def terminate_worker_process(self):
self.conn.send(StopRequest())
async def on_message(self, message):
if message.author == self.user:
return
if not self.should_respond(message):
return
space_idx = message.content.find(" ")
initial_text = None
if space_idx != -1:
initial_text = message.content[space_idx + 1 :]
self.logger.info(
f"{message.author} ({message.id}) requested message with prefix: {initial_text}"
)
sent_message = await message.channel.send("Let me think...")
self.logger.info(f"Scheduling generation for {message.id}...")
self.conn.send(
GenerateRequest(
initial_text,
sent_message.channel.id,
sent_message.id,
message.author.id,
)
)
async def check_responses(self):
while True:
while self.conn.poll():
resp = self.conn.recv()
if resp.type == ResponseType.GENERATE:
self.logger.info(
f"Response found, responding in message {resp.message_id}"
)
channel = await self.fetch_channel(resp.channel_id)
message = await channel.fetch_message(resp.message_id)
await message.edit(content=f"How about:\n{resp.generated}")
else:
self.logger.error("Invalid message type received")
await asyncio.sleep(RESP_CHECK_INTERVAL_S)
if __name__ == "__main__":
print("Creating client...")
client = IdeaBotClient()
print("Starting bot...")
client.run(os.environ.get("DISCORD_TOKEN"))
print("Terminating worker process...")
client.terminate_worker_process()
| 2,930 | 892 |
'''
This module contains a medley of sklearn transformers which can be integrated
into a pipeline.
'''
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA
from scipy.stats import kstat
from homcv import betti_numbers
class CumulantsExtractor(BaseEstimator, TransformerMixin):
'''Scikit-Learn transformer computing cumulants of the features.
Cumulants are universal numerical invariants of probability
distributions. Their interpretation is context dependent. For example,
if the input is an image, these cumulants may be conceptualized as
"textural" features.
Note that this transformer can only compute the first 4 cumulants.
Example
-------
>>> X = np.ones(shape = (1, 100))
This distribution is entirely "deterministic", and we should therefore
expect it to have no cumulants higher that 1, and have an expectation
value of 1.
>>> cumulants_extractor = CumulantsExtractor()
>>> cumulants_extractor.transform(X)
[1, 0, 0, 0]
Attributes
----------
highest_cumulant_ : int
highest cumultant to be computed by the transform method.
'''
def __init__(self, highest_cumulant_=4):
assert highest_cumulant_ <= 4, 'cannot compute cumulant higher than 4'
self.highest_cumulant_ = highest_cumulant_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
'''
return self
def _get_cumulants(self, v):
kstats = np.array([kstat(data=v, n=k)
for k in range(1, self.highest_cumulant_ + 1)])
return kstats
def transform(self, X, y=None):
'''
Computes cumulants of features less than the specified highest cumulant
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
cumulants: ndarray, shape = (n_samples, highest_cumulant)
cumulants of the empirical distribution determine by data
along axis=1
'''
cumulants = np.apply_along_axis(
func1d=self._get_cumulants,
axis=1,
arr=X,
)
return cumulants
class GrayScaler(BaseEstimator, TransformerMixin):
'''Transforms a color image into grayscale.
Transforms a batch color images into a batch of grayscale images
using 1-component PCA.
'''
def __init__(self):
self.pca = PCA(n_components=1)
pass
def _flatten(self, X):
'''
Flattens the image so that it can be transformed into a form
PCA can transform
'''
assert X.ndim == 4, "batch must be 4 dimensional"
n_color_channels = X.shape[-1]
X_flat = X.reshape(-1, n_color_channels)
return X_flat
def _unflatten(self, X_grayscale_flat, n_samples, image_dimensions):
''' Unflattens image, making it have shape (n_samples, n_x, n_y) '''
X_unflat = X_grayscale_flat.reshape(n_samples,
image_dimensions[0],
image_dimensions[1])
return X_unflat
def fit(self, X, y=None):
'''
Fits a 1-component PCA on the distributions of colors of all the
pixels in the entire batch of images.
'''
X_flat = self._flatten(X)
self.pca.fit(X_flat)
return self
def transform(self, X, y=None):
'''
Finds a gray-scale approximation to a batch of images
using 1-component PCA in color space.
Parameters
----------
X: ndarray, shape (n_samples, x_dim, y_dim, n_color_channels)
Array of n_samples images, of size (x_dim, y_dim) with
n_color_channels
Returns
-------
X_grayscaled: ndarray, shape (n_samples, x_dim, y_dim)
Array of n_samples grayscale images of the same size as the
input X.
'''
image_dimensions = (X.shape[1], X.shape[2])
n_samples = X.shape[0]
X_flat = self._flatten(X)
X_grayscale_flat = self.pca.transform(X_flat)
X_grayscaled = self._unflatten(
X_grayscale_flat,
n_samples,
image_dimensions
)
return X_grayscaled
class Reshaper(BaseEstimator, TransformerMixin):
''' Reshapes a 2d array into a ndarray of a specified shape.
Attributes
----------
output_shape_ : tuple of int
shape of the output array
'''
def __init__(self, output_shape_):
self.output_shape_ = output_shape_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
'''
assert X.shape[1] == np.prod(np.array(self.output_shape_)), ('output '
'size does not match input size')
return self
def transform(self, X, y=None):
''' Reshapes the array
Parameters
----------
X : ndarray, shape (n_samples, input_dim)
input data to be transformed
Returns
-------
X_reshaped: ndarray, shape (n_samples,) + self.output_shape
Reshaped array
'''
X_transformed_shape = (X.shape[0],) + self.output_shape_
return X.reshape(X_transformed_shape)
class Bettier(BaseEstimator, TransformerMixin):
'''Computes the Betti Numbers of the dark regions of a batch of images
Attributes
----------
threshold_ : float, optional
The transform method computes the Betti numbers of the region
formed by any pixel darker than `threshold`.
'''
def __init__(self, threshold_=.5):
self.threshold_ = threshold_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API
and hence work in pipelines.
'''
return self
def transform(self, X, y=None):
'''
Returns the betti numbers of the dark region of the images.
Parameters
----------
X : ndarray, shape (n_samples, n_x, n_y)
Batch of grayscale images.
Returns
-------
X_transformed : ndarry, shape (n_samples, 2)
Zeroeth and first Betti numbers of each image in the batch
'''
betti_numbers_list = [
betti_numbers(X[i, :, :], self.threshold_)[None,:]
for i in range(X.shape[0])
]
X_transformed = np.concatenate(betti_numbers_list, axis=0)
return X_transformed
| 7,090 | 2,091 |
# -*- coding: utf-8 -*-
import time
import numpy
from krypy.linsys import LinearSystem, Cg
from krypy.deflation import DeflatedCg, DeflatedGmres, Ritz
from krypy.utils import Arnoldi, ritz, BoundCG
from krypy.recycling import RecyclingCg
from krypy.recycling.factories import RitzFactory,RitzFactorySimple
from krypy.recycling.evaluators import RitzApriori,RitzApproxKrylov
from scipy import random, linalg
def find_deflation_subspace(A,b,k,ortho='dmgs',ritz_type='ritz'):
Ar = Arnoldi(A,b,ortho=ortho)
for i in range(1,k+1):
Ar.advance()
[V,H] = Ar.get()
[theta,U,resnorm,Z] = ritz(H,V,type=ritz_type)
return Z
def reuse_deflation_subspace(sol,ritz_type='ritz'):
[theta,U,resnorm,Z] = ritz(sol.H,sol.V,type=ritz_type)
return Z
cgt = []
dft = []
rct = []
for i in range(1,100):
matrixSize = 100
R = random.rand(matrixSize,matrixSize)
A = numpy.dot(R,R.transpose())
b=numpy.ones((matrixSize, 1))
k = 10
numSystems = 10
rank = 1 #rank of each system to add
Asys = [A]
for i in range(1,numSystems):
u = random.rand(matrixSize, rank)
Asys.append(Asys[i-1] + numpy.dot(u,u.T))
systems = []
for i in range(0,len(Asys)):
systems.append(LinearSystem(A=Asys[i],b=b,self_adjoint=True,positive_definite=True))
ts = time.time()
for i in range(0,len(Asys)):
cg_sol = Cg(systems[i],maxiter=1000)
te = time.time()
cgt.append((te-ts)*1000)
ts = time.time()
for i in range(0,len(Asys)):
U=find_deflation_subspace(Asys[i],b,k)
deflated_sol = DeflatedCg(systems[i],U=U,maxiter=1000)
te = time.time()
dft.append((te-ts)*1000)
vector_factory = RitzFactorySimple(n_vectors=k, which='sm')
ts = time.time()
recycler = RecyclingCg(vector_factory=vector_factory)
for i in range(0,len(Asys)):
recycled_sol = recycler.solve(systems[i],maxiter=1000)
te = time.time()
rct.append((te-ts)*1000)
print('Mean time taken for CG (ms):', sum(cgt)/len(cgt))
print('Mean time taken for Deflated CG (ms):', sum(dft)/len(dft))
print('Mean time taken for Recycled CG (ms):', sum(rct)/len(rct))
| 2,220 | 944 |
#!/usr/bin/env python
#need to point to classes inorder to import
import rospy
from blackboard.Robot import Robot
from blackboard.RosCommunication import Talker
from rosnode import rosnode_ping
from blackboard.Blackboard import Blackboard
from blackboard.Battery import Battery
bat = Battery(100,500,100)
talker = Talker('robot2')
r = Robot('blackboard','robot1',2,2,2,2,5,10,10,bat,'robot2',talker)
rospy.spin()
| 417 | 163 |
from __future__ import unicode_literals
import frappe
from datetime import datetime, date
from club_crm.club_crm.utils.sms_notification import send_sms
from club_crm.club_crm.utils.push_notification import send_push
from frappe.utils import getdate, get_time, flt
from frappe.utils import escape_html
from frappe import throw, msgprint, _
@frappe.whitelist()
def todays_order():
today = date.today()
orders = []
order_list = frappe.get_all('Food Order Entry', filters={'date': today, 'order_status':['in', {'Ordered','Ready', 'Delivered'}]}, fields=['*'])
if order_list:
for each_order in order_list:
order = frappe.get_doc('Food Order Entry', each_order.name)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
orders.append({
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
})
frappe.response["message"] = {
"orders": orders
}
@frappe.whitelist()
def order_ready(order_id):
order = frappe.get_doc('Food Order Entry', order_id)
frappe.db.set_value("Food Order Entry",order_id,"order_status","Ready")
frappe.db.commit()
if order.ready_notify==0:
client = frappe.get_doc('Client', order.client_id)
msg = "Your food order from Grams is ready."
receiver_list='"'+str(order.mobile_number)+'"'
send_sms(receiver_list,msg)
if client.fcm_token:
title = "Grams at Katara Club"
send_push(client.name,title,msg)
frappe.db.set_value("Food Order Entry",order_id,"ready_notify",1)
frappe.db.commit()
order = frappe.get_doc('Food Order Entry', order_id)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
frappe.response["message"] = {
'status': 1,
'status_message': 'Order is marked as Ready',
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
}
@frappe.whitelist()
def order_delivered(order_id):
order = frappe.get_doc('Food Order Entry', order_id)
frappe.db.set_value("Food Order Entry",order_id,"order_status","Delivered")
frappe.db.commit()
order = frappe.get_doc('Food Order Entry', order_id)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
frappe.response["message"] = {
"status": 1,
"status_message": 'Order is marked as Delivered',
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
} | 3,935 | 1,196 |
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.initializers import he_normal
from keras.initializers import Zeros
from keras.activations import relu
from keras.layers import Flatten
from keras.activations import softmax
from keras import optimizers
from keras.losses import categorical_crossentropy
from keras.metrics import top_k_categorical_accuracy
from keras.applications import VGG16, VGG19
import os
import cv2
import numpy as np
# select GPU number to use
os.environ["CUDA_VISIBLE_DEVICES"]="3"
# select data to train
image_path = '/datahdd/workdir/donghyun/faster_rcnn_kdh/PascalDataSetReduced/'
filenumber = 0
X_train = list()
Y_train = list()
while(1):
path = image_path + 'pascal_voc_'+str(filenumber)
if os.path.isfile(path+'.jpg') is True & os.path.isfile(path+'.txt') is True:
X_image = cv2.imread(path+'.jpg')
Y_label = np.loadtxt(path+'.txt', delimiter = ' ')
X_train.append(X_image)
Y_train.append(Y_label)
#print(str(filenumber) + ' is loaded')
else:
print('image loading stopped at ' + str(filenumber-1))
break
filenumber += 1
# data separate and shuffle and save indices
X_train = np.array(X_train)
Y_train = np.array(Y_train)
# shuffling all of the data set and separate train & val set
shuffled_indexes = np.arange(len(X_train))
np.random.shuffle(shuffled_indexes)
shuffle_indexes = shuffled_indexes[0:int(float(0.1*len(X_train)))]
X_test = X_train[shuffle_indexes, :]
Y_test = Y_train[shuffle_indexes, :]
np.savetxt('test_shuffled_index_reduced.txt', shuffle_indexes, delimiter = ' ', fmt = '%i')
print('TEST SET INDEX saved')
shuffle_indexes = shuffled_indexes[int(float(0.1 * len(X_train))):len(X_train)]
X_train = X_train[shuffle_indexes, :]
Y_train = Y_train[shuffle_indexes, :]
np.savetxt('train_shuffled_index_reduced.txt', shuffle_indexes, delimiter = ' ', fmt = '%i')
print('TRAIN SET INDEX saved')
model = Sequential()
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 64, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
input_shape = (224, 224, 3)))
model.add(Conv2D( filters = 64, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 128, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 128, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Flatten())
model.add(Dense( units = 1024, activation = 'relu'))
model.add(Dense( units = 20, activation = 'softmax'))
model.summary()
##---OPTIMIZERS---##
adam = optimizers.adam(lr=0.0001, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay= 0, amsgrad = False)
momentum = optimizers.SGD(lr=0.01, momentum = 0.9, decay=1e-6)
model.compile(optimizer = adam, loss = categorical_crossentropy, metrics=['accuracy'])
# when using the categorical_crossentropy loss, your targets should be in categorical format (one- hot encoding)
model.fit(X_train, Y_train, batch_size = 64, epochs = 100, validation_data=(X_test, Y_test))
#score = model.evaluate(X_test, Y_test, batch_size = 64)
| 5,448 | 1,967 |
from .experiment import Experiment
from .experiment1 import Experiment1
from .experiment2 import Experiment21, Experiment22, Experiment23
from .experiment3 import Experiment31, Experiment32, Experiment33, Experiment34, Experiment35, Experiment36
| 246 | 76 |
# Generated by Django 2.1.7 on 2019-03-24 05:27
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('categories', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField()),
('content', ckeditor_uploader.fields.RichTextUploadingField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('thumbnail', models.ImageField(upload_to='images')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(to='categories.Category')),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
},
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
| 2,149 | 614 |
import unittest
from app import db
from app.models import User
class TestUser(unittest.TestCase):
'''
Class that tests the User class
'''
def setUp(self):
self.new_user = User(username = "Diana",fullname = "Diana",email = "diana@gmail.com", bio= "A girl", profile_pic_url="imageurl", password ="diana")
db.session.add(self.new_user)
db.session.commit()
def tearDown(self):
User.query.delete()
db.session.commit()
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_save_user(self):
self.new_user.save_user()
self.assertTrue(len(User.query.all())>0)
def test_check_instance_variables(self):
self.assertEquals(self.new_user.username, 'Diana')
self.assertEquals(self.new_user.fullname, 'Diana')
self.assertEquals(self.new_user.email, 'diana@gmail.com')
self.assertEquals(self.new_user.bio, 'A girl')
self.assertEquals(self.new_user.profile_pic_url, 'imageurl')
self.assertTrue(self.new_user.verify_password('diana'))
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password | 1,231 | 402 |
import http
from pathlib import Path
from urllib.parse import urlencode
from hargreaves.search.models import InvestmentCategoryTypes
from hargreaves.session.mocks import MockSessionClient
from hargreaves.orders.manual.clients import ManualOrderClient
from hargreaves.orders.manual.models import ManualOrder, ManualOrderConfirmation, ManualOrderPosition
from hargreaves.orders.manual.parsers import parse_manual_order_confirmation_page
from hargreaves.orders.models import OrderPositionType, OrderAmountType
from hargreaves.utils import clock
from hargreaves.utils.logs import LogHelper
from requests_tracker.mocks import MockWebSession
LogHelper.configure_std_out()
clock.freeze_time()
def test_parse_manual_sell_order_confirmation_uk_equity_ok():
confirm_html = Path(Path(__file__).parent / 'files/sell/manual-sell-order-confirmation-uk-equity.html') \
.read_text()
order_confirmation = parse_manual_order_confirmation_page(confirm_html=confirm_html,
amount_type=OrderAmountType.Quantity)
assert order_confirmation.order_date.strftime('%d/%m/%Y') == '21/03/2022'
assert order_confirmation.stock_code == 'PDG'
assert order_confirmation.quantity == 100.0
assert order_confirmation.order_type == 'Sell'
assert order_confirmation.limit_price is None
assert order_confirmation.order_status == 'Pending'
def test_parse_manual_sell_order_confirmation_us_equity_ok():
confirm_html = Path(Path(__file__).parent / 'files/sell/manual-sell-order-confirmation-us-equity.html') \
.read_text()
order_confirmation = parse_manual_order_confirmation_page(confirm_html=confirm_html,
amount_type=OrderAmountType.Quantity)
assert order_confirmation.order_date.strftime('%d/%m/%Y') == '23/03/2022'
assert order_confirmation.stock_code == 'TUSK'
assert order_confirmation.quantity == 500.0
assert order_confirmation.order_type == 'Sell'
assert order_confirmation.limit_price == 1.9
assert order_confirmation.order_status == 'Pending'
def test_submit_manual_sell_order_confirmation_uk_equity():
confirm_html = Path(Path(__file__).parent / 'files/sell/manual-sell-order-confirmation-uk-equity.html') \
.read_text()
current_position = ManualOrderPosition(
hl_vt="1601575001",
security_type="equity",
out_of_hours=True,
sedol="B1JQBT1",
account_id=70,
available=179681.27,
holding=300,
holding_value=77.40,
transfer_units=0,
remaining_units=300,
remaining_units_value=77.40,
isin="GB00B1JQBT10",
epic="",
currency_code="GBX",
SD_Bid=0.00,
SD_Ask=0.00,
fixed_interest=False,
category_code=InvestmentCategoryTypes.EQUITIES
)
order = ManualOrder(
position=current_position,
position_type=OrderPositionType.Sell,
amount_type=OrderAmountType.Quantity,
quantity=100,
limit=None,
earmark_orders_confirm=False)
with MockWebSession() as web_session:
expected_params = {
'hl_vt': "1601575001",
'type': "equity",
'out_of_hours': "1",
'sedol': "B1JQBT1",
'product_no': "70",
'available': "179681.27",
'holding': "300",
'holding_value': "77.4",
'transfer_units': "0.0000",
'remaining_units': "300",
'remaining_units_value': "77.4",
'isin': "GB00B1JQBT10",
'epic': "",
'currency_code': "GBX",
'SD_Bid': "0.00",
'SD_Ask': "0.00",
'fixed_interest': "0",
'bs': "Sell",
'quantity': "100",
'qs': "quantity",
'limit': "",
'earmark_orders_confirm': "false",
}
mock = web_session.mock_post(
url='https://online.hl.co.uk/my-accounts/manual_deal',
headers={
'Referer': f'https://online.hl.co.uk/my-accounts/security_deal/sedol/{order.sedol}'
},
response_text=confirm_html,
status_code=http.HTTPStatus.OK
)
session_client = MockSessionClient()
client = ManualOrderClient(session_client)
order_confirmation = client.submit_order(web_session=web_session, order=order)
actual_param = mock.request_history[0].text
assert urlencode(expected_params) == actual_param
assert type(order_confirmation) == ManualOrderConfirmation
assert session_client.was_called is True
def test_submit_manual_sell_order_confirmation_us_equity():
confirm_html = Path(Path(__file__).parent / 'files/sell/manual-sell-order-confirmation-us-equity.html') \
.read_text()
current_position = ManualOrderPosition(
hl_vt="1496180636",
security_type="equity",
out_of_hours=True,
sedol="BDBFK59",
account_id=70,
available=164629.62,
holding=7635,
holding_value=11093.562582535,
transfer_units=0,
remaining_units=7635,
remaining_units_value=11093.562582535,
isin="US56155L1089",
epic="",
currency_code="USD",
SD_Bid=0.00,
SD_Ask=0.00,
fixed_interest=False,
category_code=InvestmentCategoryTypes.OVERSEAS
)
order = ManualOrder(
position=current_position,
position_type=OrderPositionType.Sell,
amount_type=OrderAmountType.Value,
quantity=500,
limit=1.9,
earmark_orders_confirm=False)
with MockWebSession() as web_session:
expected_params = {
'hl_vt': "1496180636",
'type': "equity",
'out_of_hours': "1",
'sedol': "BDBFK59",
'product_no': "70",
'available': "164629.62",
'holding': "7635",
'holding_value': "11093.562582535",
'transfer_units': "0.0000",
'remaining_units': "7635",
'remaining_units_value': "11093.562582535",
'isin': "US56155L1089",
'epic': "",
'currency_code': "USD",
'SD_Bid': "0.00",
'SD_Ask': "0.00",
'fixed_interest': "0",
'bs': "Sell",
'quantity': "500",
'qs': "value",
'limit': "1.9",
'earmark_orders_confirm': "false",
}
mock = web_session.mock_post(
url='https://online.hl.co.uk/my-accounts/manual_deal_overseas',
headers={
'Referer': f'https://online.hl.co.uk/my-accounts/security_deal/sedol/{order.sedol}'
},
response_text=confirm_html,
status_code=http.HTTPStatus.OK
)
session_client = MockSessionClient()
client = ManualOrderClient(session_client)
order_confirmation = client.submit_order(web_session=web_session, order=order)
actual_param = mock.request_history[0].text
assert urlencode(expected_params) == actual_param
assert type(order_confirmation) == ManualOrderConfirmation
assert session_client.was_called is True
| 7,284 | 2,464 |
#!/usr/bin/env python
"""Creates JADE configuration for stage 1 of pydss_simulation pipeline."""
import logging
import sys
import click
from jade.common import CONFIG_FILE
from jade.loggers import setup_logging
from jade.utils.utils import load_data
from PyDSS.reports.pv_reports import PF1_SCENARIO, CONTROL_MODE_SCENARIO
from disco.enums import SimulationType
from disco.extensions.pydss_simulation.pydss_configuration import PyDssConfiguration
from disco.extensions.pydss_simulation.estimate_run_minutes import generate_estimate_run_minutes
from disco.pydss.common import ConfigType
from disco.pydss.pydss_configuration_base import get_default_reports_file
logger = logging.getLogger(__name__)
def callback_is_enabled(_, __, value):
if value is None:
return None
return {"true": True, "false": False}[value.lower()]
COMMON_TIME_SERIES_OPTIONS = (
click.option(
"-c",
"--config-file",
default=CONFIG_FILE,
show_default=True,
help="JADE config file to create",
),
click.option(
"--feeder-losses",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Feeder Losses report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--pv-clipping",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the PV clipping report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--pv-curtailment",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the PV curtailment report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--thermal-metrics",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Thermal Metrics report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--voltage-metrics",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Voltage Metrics report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--capacitor-changes",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Capacitor State Changes report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--regcontrol-changes",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the RegControl Tap Number Changes report. If not set, use the "
"value in --reports-filename.",
),
click.option(
"--export-data-tables",
default=False,
is_flag=True,
show_default=True,
help="Export collected circuit element properties as tables.",
),
click.option(
"--exports-filename",
default=None,
show_default=True,
help="PyDSS export options, default is None.",
),
click.option(
"-r",
"--reports-filename",
default=get_default_reports_file(SimulationType.QSTS),
show_default=True,
help="PyDSS report options",
),
click.option(
"--skip-night/--no-skip-night",
default=False,
is_flag=True,
show_default=True,
help="Don't run controls or collect data during nighttime hours.",
),
click.option(
"--store-all-time-points/--no-store-all-time-points",
is_flag=True,
default=False,
show_default=True,
help="Store per-element data at all time points for thermal and voltage metrics.",
),
click.option(
"--store-per-element-data/--no-store-per-element-data",
is_flag=True,
default=False,
show_default=True,
help="Store per-element data in thermal and voltage metrics.",
),
click.option(
"-v",
"--volt-var-curve",
default=None,
help="Update the PyDSS volt-var curve name. If not set, use the pre-configured curve.",
),
click.option(
"--verbose",
is_flag=True,
default=False,
help="Enable debug logging",
),
)
def common_time_series_options(func):
for option in reversed(COMMON_TIME_SERIES_OPTIONS):
func = option(func)
return func
@click.command()
@click.argument("inputs")
@common_time_series_options
@click.option(
"-e",
"--estimated-run-minutes",
type=int,
help="Estimated per-job runtime. Default is None.",
)
@click.option(
"--calc-estimated-run-minutes/--no-calc-estimated-run-minutes",
is_flag=True,
default=True,
show_default=True,
help="Calculate estimated per-job runtime by parsing the OpenDSS files.",
)
@click.option(
"--dc-ac-ratio",
default=None,
type=float,
help="Set a custom DC-AC ratio for PV Systems.",
)
@click.option(
"--pf1/--no-pf1",
is_flag=True,
default=True,
show_default=True,
help="Include PF1 scenario or not",
)
@click.option(
"--control-mode/--no-control-mode",
is_flag=True,
default=True,
show_default=True,
help="Include control_mode scenario or not",
)
@click.option(
"--order-by-penetration/--no-order-by-penetration",
default=False,
show_default=True,
help="Make jobs with higher penetration levels blocked by those with lower levels. This "
"can be beneficial if you want the higher-penetration-level jobs to be "
"canceled if a job with a lower penetration level fails. However, it can significantly "
"reduce the number of jobs that can run simultaneously.",
)
def time_series(
inputs,
config_file,
feeder_losses,
pv_clipping,
pv_curtailment,
thermal_metrics,
voltage_metrics,
capacitor_changes,
regcontrol_changes,
export_data_tables,
exports_filename,
reports_filename,
skip_night,
store_all_time_points,
store_per_element_data,
volt_var_curve,
verbose,
estimated_run_minutes,
calc_estimated_run_minutes,
dc_ac_ratio,
pf1,
control_mode,
order_by_penetration,
):
"""Create JADE configuration for time series simulations."""
level = logging.DEBUG if verbose else logging.INFO
setup_logging(__name__, None, console_level=level, packages=["disco"])
if not pf1 and not control_mode:
logger.error("At least one of '--pf1' or '--control-mode' must be set.")
sys.exit(1)
simulation_config = PyDssConfiguration.get_default_pydss_simulation_config()
simulation_config["project"]["simulation_type"] = SimulationType.QSTS.value
simulation_config["reports"] = load_data(reports_filename)["reports"]
simulation_config["exports"]["export_data_tables"] = export_data_tables
for report in simulation_config["reports"]["types"]:
if report["name"] == "Feeder Losses" and feeder_losses is not None:
report["enabled"] = feeder_losses
if report["name"] == "PV Clipping" and pv_clipping is not None:
report["enabled"] = pv_clipping
if report["name"] == "PV Curtailment" and pv_curtailment is not None:
report["enabled"] = pv_curtailment
if report["name"] == "Thermal Metrics" and thermal_metrics is not None:
report["enabled"] = thermal_metrics
if report["name"] == "Voltage Metrics" and voltage_metrics is not None:
report["enabled"] = voltage_metrics
if report["name"] in ("Thermal Metrics", "Voltage Metrics"):
report["store_all_time_points"] = store_all_time_points
report["store_per_element_data"] = store_per_element_data
if report["name"] == "Capacitor State Change Counts" and capacitor_changes is not None:
report["enabled"] = capacitor_changes
if report["name"] == "RegControl Tap Number Change Counts" and regcontrol_changes is not None:
report["enabled"] = regcontrol_changes
exports = {} if exports_filename is None else load_data(exports_filename)
scenarios = []
if control_mode:
scenarios.append(
PyDssConfiguration.make_default_pydss_scenario(CONTROL_MODE_SCENARIO, exports)
)
if pf1:
scenarios.append(PyDssConfiguration.make_default_pydss_scenario(PF1_SCENARIO, exports))
config = PyDssConfiguration.auto_config(
inputs,
simulation_config=simulation_config,
scenarios=scenarios,
order_by_penetration=order_by_penetration,
estimated_run_minutes=estimated_run_minutes,
dc_ac_ratio=dc_ac_ratio,
)
has_pydss_controllers = config.has_pydss_controllers()
if control_mode and not has_pydss_controllers:
scenarios_config = config.get_pydss_config(ConfigType.SCENARIOS)
assert scenarios_config[0]["name"] == CONTROL_MODE_SCENARIO
scenarios_config.pop(0)
logger.info(
"Excluding %s scenario because there are no pydss controllers.", CONTROL_MODE_SCENARIO
)
config.set_pydss_config(ConfigType.SCENARIOS, scenarios_config)
if volt_var_curve is not None:
if has_pydss_controllers and control_mode:
config.update_volt_var_curve(volt_var_curve)
else:
logger.warning(
"Setting a volt_var_curve has no effect when there is no %s scenario.",
CONTROL_MODE_SCENARIO,
)
if calc_estimated_run_minutes:
generate_estimate_run_minutes(config)
if skip_night:
pydss_sim_config = config.get_pydss_config(ConfigType.SIMULATION_CONFIG)
pydss_sim_config["project"]["simulation_range"] = {"start": "06:00:00", "end": "18:00:00"}
# Note that we are using the same convergence error threshold percent.
config.set_pydss_config(ConfigType.SIMULATION_CONFIG, pydss_sim_config)
config.dump(filename=config_file)
print(f"Created {config_file} for TimeSeries Analysis")
| 10,675 | 3,335 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
#
# Author: Makoto Shimazu <makoto.shimaz@gmail.com>
# URL: https://github.com/Pizaid
# License: 2-Clause BSD License
# Created: 2014-08-09
#
import sys
sys.path.append('gen-py')
from Pizaid import ControllerService
from Pizaid.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
transport = TSocket.TSocket('localhost', 9090)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = ControllerService.Client(protocol)
transport.open()
# print client.network_ipv4()
# print client.storage_storage_group_list()
# print client.storage_join("main", "/dev/sda")
# print client.storage_capacity_kb("main")
# print client.storage_usage_kb("main")
# print client.storage_usage_percent("main")
print client.storage_disk_list("unused")
print client.storage_disk_id("/dev/sda")
print client.storage_disk_port("/dev/sda")
print client.storage_disk_size("/dev/sda")
except Thrift.TException, tx:
print '%s' % (tx.message)
| 1,236 | 414 |
from auxilliary_functions import *
def read_file(filename):
"""
Reads in a file as utf-8.
:param filename: Filepath to the file to be read.
"""
with open(filename, 'r',encoding='utf-8') as file:
return file.read()
def moralize(input_text, output_format='pydict'):
"""
Takes input text and returns format as either a Python dictionary or JSON object.
:param input_text: Text you want to analyze.
:param output_format: defaults to Python dictionary, enter '.json' for output in a JSON object.po
"""
analyzed_text = word_frequency_dict(input_text)
return count_keywords(analyzed_text, output_format)
| 660 | 199 |
import os
import json
from pprint import pprint
def convert(inputfile,outputfile):
json_data=open(inputfile)
data = json.load(json_data)
#pprint(data)
json_data.close()
# copy to file
f = open(outputfile, 'w')
for line in data:
f.write(line +'\n')
f.close()
#json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
if __name__ == '__main__':
convert('/Users/cassiomelo/Downloads/formalcontext.json', '/Users/cassiomelo/Downloads/formalcontext.cxt') | 519 | 193 |
#!/usr/bin/python
import os
import sys
import sklearn
from sklearn.naive_bayes import GaussianNB
from sklearn.externals import joblib
import argparse
import numpy as np
import fileUtils
import tools
def saveModel(modelData, fpath):
joblib.dump(modelData, fpath)
def readfile(fpath):
tmpList = []
for line in fileUtils.readTxtFile(fpath, ','):
tmp = line.split(',')
if len(tmp) > 4:
tmp_multi = fileUtils.str2int(tmp[3]) * fileUtils.str2int(tmp[4])
else:
tmp_multi = fileUtils.str2int(tmp[-1]) * fileUtils.str2int(tmp[-2])
tmpList.append(tmp_multi)
return tmpList
def computeFeature(fpath, rangeList):
start, end, interval = rangeList[0], rangeList[1], rangeList[2]
rangeList, sectionList = tools.getSectionList(start, end, interval)
features = readfile(fpath)
for feat in features:
index = tools.computeRange(rangeList, feat)
sectionList[index] += 1
return sectionList
def computeAllFeature(dpath):
fileList = fileUtils.genfilelist(dpath)
allFeatures = []
for fpath in fileList:
tmpFeat = computeFeature(fpath)
allFeatures.append(tmpFeat)
return np.array(allFeatures)
def train(trainData, trainLabel):
gnb = GaussianNB()
y_pred = gnb.fit(trainData, trainLabel)
return y_pred
def main(opts):
trainDataDir = opts.trainDataDir
data, label = loadTrainData(trainDataDir)
mymodel = train(data, label)
saveModel(mymodel, opts.modelSaveDir)
print('model saved at {}'.format(opts.modelSaveDir))
def parseOpts(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--trainDataDir', help='path to training data dir')
parser.add_argument('-m', '--modelSaveDir', help='path to model save dir')
opts = parser.parse_args()
return opts
if __name__ == "__main__":
opts = parseOpts(sys.argv)
main(opts)
| 1,919 | 637 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import gspread
import config
from oauth2client.service_account import ServiceAccountCredentials as Account
api_url = 'https://api.leaseweb.com/invoices/v1/invoices'
def api_request(url, headers, params=None):
try:
conn = requests.get(url=url, headers=headers, params=params)
conn.raise_for_status()
except requests.exceptions.HTTPError as http_error:
raise SystemExit(http_error)
except requests.exceptions.RequestException as req_error:
raise SystemExit(req_error)
except Exception as error:
raise SystemExit(error)
else:
return conn.json()
def main(header):
hosts = []
for item in api_request(api_url, header)['invoices']:
host = {
'ContractId': item['id'],
'Date': item['date'],
'DueDate': item['dueDate'],
'TaxAmount': item['taxAmount'],
'Total': item['total'],
'OpenAmount': item['openAmount'],
'Currency': item['currency'],
'Status': item['status'],
}
hosts.append(host)
return hosts
# Google sheet
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = Account.from_json_keyfile_name('google_sheet_secret.json', scope)
client = gspread.authorize(creds)
def update_google_table(parameter_list):
# Google spreadsheet
spreadsheet = client.open("Leaseweb invoices")
# Создание вкладки worksheet
worksheet = spreadsheet.worksheet('All invoices')
# Формирование заголовка таблицы
header = [
'ContractId',
'Date',
'DueDate',
'TaxAmount',
'Total',
'OpenAmount',
'Currency',
'Status',
]
worksheet.update('A1', [header])
start_cell = 'A2'
end_cell = 'H' + str(len(parameter_list) + 1)
cell_range = worksheet.range('{}:{}'.format(start_cell, end_cell))
simplyfied_data = []
for row in parameter_list:
for column in header:
simplyfied_data.append(row[column])
for i, cell in enumerate(cell_range):
cell.value = simplyfied_data[i]
worksheet.update_cells(cell_range)
if __name__ == '__main__':
invoices_list = []
for auth_key in config.lw_accounts:
for invoice in main(config.lw_accounts[auth_key]):
invoices_list.append(invoice)
update_google_table(invoices_list)
| 2,472 | 785 |
# -*- coding: utf-8 -*-
import ast
import redis
import socket
import hashlib
import pymongo
from scrapy import Request
from w3lib.url import canonicalize_url
from scrapy.utils.python import to_bytes
def get_str_md5(string: str, encoding='utf-8'):
"""
计算字符串的 MD5 值
:param string:
:param encoding:
:return:
"""
md5_obj = hashlib.md5()
md5_obj.update(string.encode(encoding=encoding))
return md5_obj.hexdigest()
def get_request_md5(request: Request):
"""
计算 scrapy.Request 的 MD5 值
(仿照 scrapy.utils.request 的 request_fingerprint 函数)
:param request:
:return:
"""
md5_obj = hashlib.md5()
md5_obj.update(to_bytes(request.method))
md5_obj.update(to_bytes(canonicalize_url(request.url)))
md5_obj.update(request.body or b'')
return md5_obj.hexdigest()
def get_redis_conn(settings):
"""从项目配置中获取Redis配置并建立连接"""
return redis.Redis(host=settings.get('REDIS_HOST'), port=settings.get('REDIS_PORT'),
**settings.get('REDIS_PARAMS'))
def get_mongo_cli(settings):
"""从项目配置中获取MongoDB配置并建立连接"""
return pymongo.MongoClient(settings.get('MONGO_URI'), **settings.get('MONGO_PARAMS'))
def get_local_ip():
"""
:return: 本地内网 IP 字符串,如:'192.168.0.1'
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def cookie_str_to_dict(cookie_str):
"""将浏览器抓包获取到的 cookie 字符串转换为字典形式"""
cookie_dict = dict()
for i in cookie_str.split(';'):
i = i.strip()
if '=' not in i:
i += '='
k, v = i.split('=', maxsplit=1)
cookie_dict[k] = v
return cookie_dict
def run_func(argv, local_var):
"""Run as : run_func(sys.argv, locals())"""
argv_len = len(argv)
warn_msg = f'Please run this program as [ python file_name.py function_name k1=v1 k2="\'str_v2\'" ... ] \n' \
f'(Please use single quotes when passing strings)\n'
if argv_len > 1:
func_name = argv[1]
func = local_var.get(func_name)
assert func, f'Please check if [ {func_name} ] exists '
params = dict()
try:
for arg in argv[2:]:
k, v = arg.split('=', 1)
v = v.strip("'") if v.startswith("'") else ast.literal_eval(v)
params[k] = v
except:
raise UserWarning(warn_msg)
return func(**params)
else:
print(warn_msg)
| 2,499 | 968 |
from __future__ import annotations
import decimal
from ctc.toolbox import validate_utils
from . import cpmm_spec
def trade(
x_reserves: int | float,
y_reserves: int | float,
x_sold: int | float | None = None,
x_bought: int | float | None = None,
y_sold: int | float | None = None,
y_bought: int | float | None = None,
new_x_reserves: int | float | None = None,
new_y_reserves: int | float | None = None,
fee_rate: int | float | None = None,
) -> cpmm_spec.Trade:
"""perform trade with AMM
## Input Requirements
- all input values must be positive
- must always specify both x_reserves and y_reserves
- must specify exactly one of:
- x_sold
- x_bought
- y_sold
- y_bought
- new_x_reserves
- new_y_reserves
- values in this list can be scalars or numpy arrays
"""
# validate inputs
if fee_rate is None:
fee_rate = 0.003
value = validate_utils._ensure_exactly_one(
x_sold, x_bought, y_sold, y_bought, new_x_reserves, new_y_reserves
)
validate_utils._ensure_non_negative(value)
kwargs = {
'x_reserves': x_reserves,
'y_reserves': y_reserves,
'fee_rate': fee_rate,
}
reverse_kwargs = {
'y_reserves': x_reserves,
'x_reserves': y_reserves,
'fee_rate': fee_rate,
}
if x_sold is not None:
# case: sell x for y, x specified
x_bought = -x_sold
y_bought = compute_y_bought_when_x_sold(x_sold=x_sold, **kwargs)
y_sold = -y_bought
elif y_sold is not None:
# case: sell y for x, y specified
y_bought = -y_sold
x_bought = compute_y_bought_when_x_sold(x_sold=y_sold, **reverse_kwargs)
x_sold = -x_bought
elif x_bought is not None:
# case: sell y for x, x specified
x_sold = -x_bought
y_sold = compute_x_sold_when_y_bought(
y_bought=x_bought, **reverse_kwargs
)
y_bought = -y_sold
elif y_bought is not None:
# case: sell y for x, x specified
y_sold = -y_bought
x_sold = compute_x_sold_when_y_bought(y_bought=y_bought, **kwargs)
x_bought = -x_sold
else:
raise Exception('could not compute output')
return {
'x_bought': x_bought,
'x_sold': x_sold,
'y_bought': y_bought,
'y_sold': y_sold,
'fee_rate': fee_rate,
'new_pool': {
'x_reserves': x_reserves + x_sold,
'y_reserves': y_reserves + y_sold,
},
}
def trade_to_target_reserves(
x_reserves: int | float,
y_reserves: int | float,
new_x_reserves: int | float | None = None,
new_y_reserves: int | float | None = None,
fee_rate: float | None = None,
) -> cpmm_spec.Trade:
"""compute trade required to reach specific target token reserve amounts"""
# convert reserve targets to bought or sold amounts
if new_x_reserves is not None:
if validate_utils._ensure_positive(
x_reserves - new_x_reserves, error=False
):
x_bought = x_reserves - new_x_reserves
return trade(
x_bought=x_bought,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
x_sold = new_x_reserves - x_reserves
return trade(
x_sold=x_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
elif new_y_reserves is not None:
if validate_utils._ensure_positive(
y_reserves - new_y_reserves, error=False
):
y_bought = y_reserves - new_y_reserves
return trade(
y_bought=y_bought,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
y_sold = new_y_reserves - y_reserves
return trade(
y_sold=y_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
raise Exception('specify either new_x_reserves or new_y_reserves')
def trade_to_price(
x_reserves: int | float,
y_reserves: int | float,
new_x_per_y: int | float | None = None,
new_y_per_x: int | float | None = None,
fee_rate: float | None = None,
) -> cpmm_spec.Trade:
"""compute trade required to reach specific price"""
validate_utils._ensure_exactly_one(new_x_per_y, new_y_per_x)
# convert prices to x per y
if new_x_per_y is None:
if new_y_per_x is None:
raise Exception('must specify x_per_y or y_per_x')
new_x_per_y = new_y_per_x ** -1
# compute trades
if new_x_per_y >= x_reserves / y_reserves:
# case: sell x to increase x per y
x_sold = compute_x_sold_to_reach_price(
new_x_per_y=new_x_per_y,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
return trade(
x_sold=x_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
# case: sell y to decrease x per y
y_sold = compute_x_sold_to_reach_price(
new_x_per_y=(new_x_per_y ** -1),
x_reserves=y_reserves,
y_reserves=x_reserves,
fee_rate=fee_rate,
)
return trade(
y_sold=y_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
def compute_x_sold_to_reach_price(
x_reserves: int | float,
y_reserves: int | float,
new_x_per_y: int | float,
fee_rate: float | None = None,
) -> float:
"""use quadratic formula to find trade size needed to reach new price
- see wolframalpha.com/input/?i=g+x%5E2+%2B+%281+%2B+g%29+x+%2B+C+%3D+0
"""
if fee_rate is None:
fee_rate = 0.003
gamma = 1 - fee_rate
C = 1 - new_x_per_y * y_reserves / x_reserves
alpha = (gamma + 1) ** 2 - 4 * C * gamma
if isinstance(gamma, decimal.Decimal):
alpha = alpha.sqrt()
else:
alpha = alpha ** 0.5
alpha = alpha - gamma - 1
alpha = alpha / 2 / gamma
x_sold = alpha * x_reserves
return x_sold
def compute_y_bought_when_x_sold(
x_sold: int | float,
x_reserves: int | float,
y_reserves: int | float,
fee_rate: float | None = None,
) -> float:
"""compute amount of y bought when selling x_sold amount of x"""
if fee_rate is None:
fee_rate = 0.003
validate_utils._ensure_non_negative(x_sold)
alpha = x_sold / x_reserves
gamma = 1 - fee_rate
y_bought = alpha * gamma / (1 + alpha * gamma) * y_reserves
return y_bought
def compute_x_sold_when_y_bought(
y_bought: int | float,
x_reserves: int | float,
y_reserves: int | float,
fee_rate: float | None = None,
) -> float:
"""compute amount of x that must be sold to buy y_bought amount of y"""
if fee_rate is None:
fee_rate = 0.003
validate_utils._ensure_non_negative(y_bought)
beta = y_bought / y_reserves
gamma = 1 - fee_rate
x_sold = beta / (1 - beta) / gamma * x_reserves
return x_sold
| 7,392 | 2,687 |
#!/usr/bin/env python3
import boto3
import csv
import json
import re
import os
import logging
from multiprocessing import Pool
import sys
sys.path.insert(0, './lib')
from kafka import KafkaProducer
lambda_client = boto3.client('lambda')
bucket_name = None
kafka_topic = None
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logger = logging.getLogger()
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'true':
logger.setLevel(logging.DEBUG)
logger.debug('debug mode enabled.')
else:
logger.setLevel(logging.INFO)
def handler_file(event, context):
key_name = event['key_name']
bucket_name = event['bucket_name']
kafka_topics = event['kafka_topic'].split(",")
for t in kafka_topics:
logging.info("Sending data to topic \"%s\"." % t)
kafka_hosts = os.environ['KAFKA_HOSTS'].split(",")
logging.info("Started handling %s." % key_name)
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, key_name)
csvlines = obj.get()['Body'].read().decode('utf-8').splitlines()
csvreader = csv.DictReader(csvlines)
nr_lines = 0
producer = KafkaProducer(bootstrap_servers=kafka_hosts)
nr_topics = len(kafka_topics)
topic_id = 0
logging.info("Producer created for %s." % key_name)
for l in csvreader:
producer.send(kafka_topics[topic_id], json.dumps(l))
topic_id += 1
nr_lines += 1
if topic_id == nr_topics:
topic_id = 0
producer.flush()
logging.info("Messages produced. Nr of messages: %d." % nr_lines)
return nr_lines
def handler_load(event, context):
bucket_name = event['bucket_name']
key_prefix = event['key_prefix']
kafka_topic = event['kafka_topic']
nr_failed = 0
nr_success = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix=key_prefix):
if re.search('\.csv$', obj.key):
logging.info("File added %s" % obj.key)
args = {
'bucket_name': bucket_name,
'key_name': obj.key,
'kafka_topic': kafka_topic
}
logger.info('Starting async processing of %s...' % obj.key)
results = lambda_client.invoke_async(
FunctionName='capstone-kafka-ingest-dev-send_file',
InvokeArgs=json.dumps(args)
)
logger.info("Async processing of %s started." % obj.key)
if results['Status'] == 202:
logger.info('Lambda invoked successfully.')
nr_success += 1
else:
logger.error('Failed to start lambda for %s.' % obj.key)
nr_failed += 1
logger.info('%d lambda started successfully' % nr_success)
logger.info('%d lambda failed to start.' % nr_failed)
def worker_lambda(key):
logger.info("Start processing of %s..." % key)
args = {
'bucket_name': bucket_name,
'key_name': key,
'kafka_topic': kafka_topic
}
results = lambda_client.invoke(
FunctionName='capstone-kafka-ingest-dev-send_file',
InvocationType='RequestResponse',
Payload=json.dumps(args))
logging.info(str(results))
if results['StatusCode'] == 200:
logger.info('Lambda completed successfully.')
return (key, True)
else:
logger.error('Failed to start lambda for %s.' % key)
return (key, False)
if __name__ == '__main__':
bucket_name, key_prefix, kafka_topic = sys.argv[1:]
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
files_to_process = []
for obj in bucket.objects.filter(Prefix=key_prefix):
if re.search('\.csv$', obj.key):
logger.info("File added %s" % obj.key)
files_to_process.append(obj.key)
pool = Pool(100)
results = pool.map(worker_lambda, files_to_process)
success = []
failed = []
for result in results:
if result[1]:
success.append(result[0])
else:
failed.append(result[0])
if len(failed) != 0:
print "Not all files were processed successfully :("
print(str(failed))
print "%d files completed successfully" % len(success)
| 4,245 | 1,352 |
from django.contrib import admin
from .models import Noticia
# Register your models here.
admin.site.register(Noticia) | 119 | 32 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 14:09:05 2020
@author: yannis
"""
import torch
import random
from pdb import set_trace as bp
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.utils import get_vec_normalize
import motion_imitation
import time
import numpy as np
def testPolicy(path,scales=None,pol_scales=None):
processes = 1
render = True
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
env = make_vec_envs(
'A1GymEnv-v1',
seed,
processes,
None,
None,
device='cpu',
allow_early_resets=True, render=render)
env_core = env.venv.venv.envs[0].env.env
actor_critic, ob_rms = torch.load(path,map_location=torch.device('cpu'))
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, processes)
#env_core = env.venv.venv.envs[0]
if processes==1:
N_sim = 100
Reward = np.zeros((N_sim,))
input('press enter')
n=0
R=0
obs=env.reset()
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales)
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(obs,recurrent_hidden_states,masks, deterministic = True )
obs, reward, done, _ = env.step(action[0])
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales)
#env_core.cam_track_torso_link()
R+=reward
#control_steps +=1
time.sleep(5*1.0/240.0)
if done:
n+=1
Reward[n]=R
print('Reward: ',R)
R=0
#obs=env.reset()
#obs[:,-4:] = torch.FloatTensor(pol_scales)
#input('press enter')
masks.fill_(0.0 if done else 1.0)
#print('Scale: ', Scale[j,:], ', total reward:' , Reward)
input('press enter')
else:
N_sim = processes
TotalReward = np.zeros((processes,))
obs=env.reset()
#bp()
n = 0
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=True)
obs, reward, done, _ = env.step(action)
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
TotalReward += reward.numpy().flatten()
for D in done:
if D:
#print(done)
n+=1
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
print('TotalReward: ', TotalReward, flush=True)
AverageTotalReward = np.mean(TotalReward)
Std = np.std(TotalReward)
#print(TotalReward)
print('Av. Total reward: ',AverageTotalReward, ', std: ',Std,', virtual scale: ', obs[0,-4:], flush=True)
#bp()
N_sim = processes
TotalReward = np.zeros((processes,))
obs=env.reset()
#bp()
n = 0
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=True)
obs, reward, done, _ = env.step(action)
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
TotalReward += reward.numpy().flatten()
for D in done:
if D:
#print(done)
n+=1
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
print('TotalReward: ', TotalReward, flush=True)
AverageTotalReward = np.mean(TotalReward)
Std = np.std(TotalReward)
#print(TotalReward)
print('Av. Total reward: ',AverageTotalReward, ', std: ',Std,', virtual scale: ', obs[0,-4:], flush=True)
env.close()
#bp()
if __name__ == '__main__':
scales = None
pol_scales = None
#path = '/home/yannis/Repositories/motion_imitation/12_03_nominal_policy/ppo/A1GymEnv-v1.pt'
#path = '/home/yannis/Repositories/motion_imitation/12_11_nominal_policy/ppo/A1GymEnv-v1.pt'
path = '/home/yannis/Repositories/motion_imitation/12_18_nominal_policy/ppo/A1GymEnv-v1.pt'
testPolicy(path,scales,pol_scales) | 5,144 | 1,762 |
# -*- coding: utf-8 -*-
import ast
import os
import re
from setuptools import find_packages, setup
setup(
name='hailstorms',
version='1.0.5',
description="Distributed load testing framework",
long_description="""Hailstorm is a simplified config based, distributed load testing framework""",
classifiers=[
"Topic :: Software Development :: Testing :: Traffic Generation",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
],
keywords=['loadtest', 'locustio', 'hailstorm', 'hailstorms'],
author='Mikael Larsson',
author_email='mikael.larsson@romram.se',
url='https://github.com/romramse/hailstorms',
license='MIT',
packages=find_packages(
include=['hailstorms', 'hailstorms.start'],
exclude=['ez_setup', 'examples', 'tests', 'graphs', 'generated', 'labs', 'scripts', 'venv']),
include_package_data=True,
zip_safe=False,
install_requires=[
"locustio>=0.8.1",
"gevent>=1.2.2",
"flask>=0.10.1",
"requests>=2.9.1",
"msgpack>=0.4.2",
"six>=1.10.0",
"pyzmq>=16.0.2"
],
)
| 1,585 | 517 |
#
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import unittest
import mma_test.utils as utils
import shutil
import time
from typing import Dict
from mma_test.test_hive import TestHive
def get_test_suites_map() -> Dict[str, unittest.TestSuite]:
test_suites = {}
test_suites[TestHive.__name__] = (
unittest.defaultTestLoader.loadTestsFromTestCase(TestHive))
return test_suites
if __name__ == '__main__':
suites = get_test_suites_map()
parser = argparse.ArgumentParser(description='MMA FT runner')
parser.add_argument(
"--list_test_suites",
required=False,
const=True,
action="store_const",
default=False,
help="list available test suites")
parser.add_argument(
"--list_test_cases",
required=False,
type=str,
help="list test cases of specified test suite")
parser.add_argument(
"--run_test_suite",
required=False,
help="run specified test suite")
parser.add_argument(
"--run_test_case",
required=False,
help="run specified test case, should be in format suite.case")
parser.add_argument(
"--fail_fast",
required=False,
const=True,
action="store_const",
default=False,
help="fail fast")
args = parser.parse_args()
if args.list_test_suites:
for suite in suites.keys():
print(suite)
exit(0)
if args.list_test_cases is not None:
suite_name = args.list_test_cases
if suite_name in suites:
suite = suites[suite_name]
for test in suite._tests:
print(test.id().split(".")[-1])
exit(0)
else:
raise Exception("Test suite not found: %s" % suite_name)
if args.run_test_suite is not None and args.run_test_case is not None:
err_msg = ("--run_test_suite and "
"--run_test_case cannot present at the same time")
raise Exception(err_msg)
os.makedirs(utils.get_test_temp_dir(), exist_ok=True)
print("Start MMA server")
mma_server_sp = utils.start_mma_server()
print("MMA server pid: %s" % str(mma_server_sp.pid))
time.sleep(10)
try:
s = unittest.TestSuite()
if args.run_test_suite is not None:
if args.run_test_suite in suites:
s.addTest(suites[args.run_test_suite])
else:
raise Exception("Invalid test suite")
elif args.run_test_case is not None:
splits = args.run_test_case.split(".")
if len(splits) != 2:
raise Exception("Invalid testcase: %s" % args.run_test_case)
for test in suites[splits[0]]._tests:
if splits[1] == test.id().split(".")[-1]:
s.addTest(test)
else:
s.addTests(suites.values())
runner = unittest.TextTestRunner(
verbosity=3, failfast=args.fail_fast, buffer=True)
runner.run(s)
finally:
print("Stop MMA server")
utils.stop_mma_server(mma_server_sp)
shutil.rmtree(utils.get_test_temp_dir())
| 3,747 | 1,171 |
import csv
import logging
__all__ = (
'read_synonyms',
)
LOGGER = logging.getLogger(__name__)
def read_synonyms(path):
"""Read synonyms.
Read synonyms from the following format:
word_id;preferred_EN;variant1;variant2;variant3;variant4;variant5
1;Anatolia;anatolia;anatolie;anatolien;;
2;Assyria;assyria;assyrie;assyrien;;
3;Babylonia;babylonia;babylonie;babylonien;;
4;Byblos;;;;;
5;Crocodilopolis;;;;;
What we do:
- Remove first line (word_id, etc.)
- Remove first (numbered) elements from each line
- Remove empty elements (that are produced when reading the CSV)
:param path:
:return:
"""
data = []
try:
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
counter = 0 # Counter so that we skip the first line
for row in csv_reader:
# Skip the first line
if counter == 0:
counter += 1
continue
# Remove the first (numbered) element
row.pop(0)
# Remove empty elements
row = [__i.lower() for __i in row if __i]
if len(row) > 1:
# Append remaining (usable) elements separated by comma
# to the returned list.
data.append(
', '.join(row)
)
counter += 1
except OSError as err:
LOGGER.error("Can't read from file {}.".format(path))
LOGGER.error(err.message)
LOGGER.debug("Produced synonyms file for {}:".format(path))
LOGGER.debug(data)
return data
| 1,740 | 517 |
from selenium import webdriver
import selenium
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import re
STATUS_OUTPUT = \
'''Video: {0}
Status: {1}
Time(sec): {2} / {3}'''
CLASS_REGEX = r'''https://bh3773.class.gaoxiaobang.com/class/(\d+)/unit/(\d+)/chapter/(\d+)'''
CLASS_STRING = '''https://bh3773.class.gaoxiaobang.com/class/{0}/unit/{1}/chapter/{2}'''
# Get VideoListIDs needs LOTS OF resources, cache them to lower CPU usage.
VLIDcache = {}
class Status:
title = "TITLE"
playStatus = "PLAYSTATUS"
ctime = -1
duration = -1
error = False
def __repr__(self):
if(not self.error):
return STATUS_OUTPUT.format(self.title, self.playStatus, str(self.ctime), str(self.duration))
else:
return "Not valid video page."
def videoList(driver: webdriver.chrome.webdriver.WebDriver):
try:
return list(filter(lambda x: x.get_attribute(
'content_type') == 'Video', driver.find_elements_by_class_name("chapter-info")))
except:
return []
def autoLogin(driver: webdriver.chrome.webdriver.WebDriver, loginLink: str, username: str, passwd: str):
try:
driver.get(loginLink)
driver.find_element_by_id('username').send_keys(username)
driver.find_element_by_id('password').send_keys(passwd)
driver.find_element_by_class_name('login_btn').click()
return True
except selenium.common.exceptions.NoSuchElementException:
return False
def status(driver: webdriver.chrome.webdriver.WebDriver):
'''
Get current status of video page.
:param driver: WebDriver, the WebDriver to get status
:returns: Status, a Status object storing status information
'''
output = Status()
try:
videoPlayer = driver.find_element_by_id('video_player_html5_api')
output.title = driver.find_element_by_class_name('chapter-title').text
videoShell = driver.find_element_by_id('video_player')
vsClass = videoShell.get_attribute('class')
if(vsClass.find('vjs-paused') + 1):
output.playStatus = 'paused'
else:
output.playStatus = 'playing'
output.duration = videoPlayer.get_property('duration')
output.ctime = videoPlayer.get_property('currentTime')
except Exception:
output.error = True
finally:
return output
def triggerPlay(driver):
'''
Trigger current play status.
:param driver: WebDriver, the WebDriver to trigger
:returns: Bool, if the trigger is successful
'''
try:
videoPlayer = driver.find_element_by_class_name('video-js')
videoPlayer.click()
return True
except Exception:
return False
def needAnswer(driver: selenium.webdriver.chrome.webdriver.WebDriver):
'''
Check if a question is shown.
:param driver: WebDriver, the WebDriver to check
:returns: Bool, if a question is shown.
'''
f = driver.find_elements_by_class_name('correctAnswer')
if(f):
return True
else:
return False
def answer(driver: selenium.webdriver.chrome.webdriver.WebDriver):
'''
Answer in-video questions.
:param driver: WebDriver, the WebDriver to answer
:returns: Bool, if answer is successful
'''
try:
answers = driver.find_element_by_class_name(
'correctAnswer').get_attribute('data')
correctArray = [ord(i) - ord('A') for i in answers]
chooseName = 'gxb-icon-check'
try:
driver.find_element_by_class_name('gxb-icon-radio')
chooseName = 'gxb-icon-radio'
except selenium.common.exceptions.NoSuchElementException:
pass
for answer in correctArray:
driver.find_elements_by_class_name(chooseName)[
answer].click()
driver.find_element_by_class_name('submit').click()
play = WebDriverWait(driver, 2).until(
EC.presence_of_element_located((By.CLASS_NAME, 'player')))
play.click()
return True
except:
return False
def nextVideo(driver: webdriver.chrome.webdriver.WebDriver):
match = re.match(CLASS_REGEX, driver.current_url)
if(not match):
return False
videoIds = list(map(lambda x: x.get_attribute(
'chapter_id'), videoList(driver)))
try:
# When the page is not video, append it to video list to get the nearest video.
if(match.groups()[2] not in videoIds):
videoIds.append(match.groups()[2])
videoIds.sort()
index = videoIds.index(match.groups()[2])
if(index != len(videoIds) - 1):
url = CLASS_STRING.format(
*match.groups()[:-1], videoIds[index + 1])
driver.get(url)
return True
else:
return False
# TODO: When the class ends. Raise a custom error and start a new class.
except:
return False
def inVideoPage(driver: webdriver.chrome.webdriver.WebDriver):
match = re.match(CLASS_REGEX, driver.current_url)
if(not match):
return False
if(match.groups()[0] not in VLIDcache.keys()):
VLIDcache[match.groups()[0]] = list(map(lambda x: x.get_attribute(
'chapter_id'), videoList(driver)))
return(match.groups()[2] in VLIDcache[match.groups()[0]])
| 5,503 | 1,652 |
import abc
import dataclasses as dc
import enum
import types as pytypes
from collections import Counter
from functools import wraps, partial
from typing import Sequence, Callable, Type as PyType, Dict, Any, Optional
import networkx as nx
import statey as st
from statey import resource, task, exc
from statey.provider import Provider
from statey.syms import utils, types, Object, diff
class Transition(abc.ABC):
"""
A transition defines the procedure from migration a machine
from one state to another (they may also be the same state)
"""
from_name: str
to_name: str
name: str
@abc.abstractmethod
async def plan(
self,
current: resource.BoundState,
config: resource.BoundState,
session: task.TaskSession,
) -> Object:
"""
Same as Resource.plan(), except for planning
a specific transition.
"""
raise NotImplementedError
@dc.dataclass(frozen=True)
class FunctionTransition(Transition):
"""
Transition class that simply wraps a function
"""
from_name: str
to_name: str
name: str
func: Callable[[Any], Any]
async def plan(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return await self.func(current=current, config=config, session=session)
def transition(from_name: str, to_name: str, name: str = utils.MISSING) -> Any:
"""
Generate a decorate to wrap a function as a transition
"""
def dec(func):
nonlocal name
if name is utils.MISSING:
name = getattr(func, "__name__", "<unknown>")
@wraps(func)
def get_transition(*args, **kwargs):
new_func = lambda *args2, **kwargs2: func(
*args, *args2, **kwargs, **kwargs2
)
return FunctionTransition(from_name, to_name, name, new_func)
get_transition.transition_factory = True
return get_transition
return dec
class MachineMeta(type(resource.Resource)):
"""
Special behavior for state machines
"""
@classmethod
def _validate_states(
cls, old_states: Sequence[resource.State], new_states: Sequence[resource.State]
) -> Sequence[resource.State]:
new_names = Counter(state.name for state in new_states)
if new_names and max(new_names.values()) > 1:
multi = {k: v for k, v in new_names.items() if v > 1}
raise ValueError(f"Duplicate states found: {multi}")
old_states = [state for state in old_states if state.name not in new_names]
return old_states + list(new_states)
def __new__(
cls, name: str, bases: Sequence[PyType], attrs: Dict[str, Any]
) -> PyType:
super_cls = super().__new__(cls, name, bases, attrs)
states = super_cls.__states__ if hasattr(super_cls, "__states__") else ()
new_states = [val for val in attrs.values() if isinstance(val, resource.State)]
states = cls._validate_states(states, new_states)
super_cls.__states__ = tuple(states)
transitions = (
super_cls.__transitions__
if hasattr(super_cls, "__transitions__")
else set()
)
new_transitions = {
name
for name, val in attrs.items()
if hasattr(val, "transition_factory") and val.transition_factory
}
super_cls.__transitions__ = transitions | new_transitions
return super_cls
class Machine(resource.Resource, metaclass=MachineMeta):
"""
Class with a metaclass to automatically collect states and transitions into class variables.
"""
def __init__(self, name: str, provider: Optional[Provider] = None) -> None:
if provider is None:
from statey.provider import default_provider as provider
self.name = name
self.provider = provider
# This is temporary, should clean this up
for state in self.__states__:
self.set_resource_state(resource.ResourceState(state, name, provider.id))
def set_resource_state(self, state: resource.ResourceState) -> None:
setattr(self, state.state.name, state)
@property
def null_state(self) -> resource.ResourceState:
state = next((s for s in self.__states__ if s.null))
return resource.ResourceState(state, self.name, self.provider.id)
async def plan(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
from_name = current.state.name
to_name = config.state.name
transitions = (getattr(self, tran)() for tran in self.__transitions__)
transition = next(
(
tran
for tran in transitions
if tran.from_name == from_name
if tran.to_name == to_name
),
None,
)
if transition is None:
raise exc.PlanError(
f"Unable to find transition from {from_name} to {to_name}."
)
return await transition.plan(current, config, session)
def __call__(self, *args, **kwargs) -> resource.ResourceState:
states = [state for state in self.__states__ if state != self.null_state.state]
if len(states) > 1:
raise TypeError(f'"{self.name}" has more than one non-null state.')
if len(states) < 1:
raise TypeError(f'"{self.name}" does not have any non-null states.')
return resource.ResourceState(states[0], self.name, self.provider.id)(
*args, **kwargs
)
@abc.abstractmethod
async def refresh(self, current: resource.BoundState) -> resource.BoundState:
"""
Same as Resource.refresh()
"""
raise NotImplementedError
async def finalize(self, current: resource.BoundState) -> resource.BoundState:
return current
class ModificationAction(enum.Enum):
"""
Actions to control simple machine behavior
"""
NONE = "none"
MODIFY = "modify"
DELETE_AND_RECREATE = "delete_and_recreate"
class SingleStateMachine(Machine):
"""
A simple machine is an FSM which can only have two states: UP and DOWN.
Note that a SimpleMachine's UP state should have all of the same fields available
in its output type as its input type.
"""
UP: resource.State
DOWN: resource.NullState = resource.NullState("DOWN")
@abc.abstractmethod
async def create(
self, session: task.TaskSession, config: resource.StateConfig
) -> "Object":
"""
Create this resource with the given configuration
"""
raise NotImplementedError
@abc.abstractmethod
async def delete(
self, session: task.TaskSession, current: resource.StateSnapshot
) -> "Object":
"""
Delete the resource with the given data
"""
raise NotImplementedError
@abc.abstractmethod
async def modify(
self,
session: task.TaskSession,
current: resource.StateSnapshot,
config: resource.StateConfig,
) -> "Object":
"""
Modify the resource from `data` to the given config. Default implementation
is always to delete and recreate the resource.
NOTE: if subclasses do not modify the get_action() implementation they can
override this with a stub method, as it will never be called. It is defined
as an abstract to avoid the case where it is omitted accidentally and
NotImplementedError is raised during the task execution
"""
raise NotImplementedError
# Overridding this as an "optional" abstract method
modify = NotImplemented
@abc.abstractmethod
async def refresh_state(self, data: Any) -> Optional[Any]:
"""
Get a refreshed version of `data` (which is in the state UP). Return None
to indicate the resource no longer exists.
"""
raise NotImplementedError
@abc.abstractmethod
async def get_action(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> ModificationAction:
"""
From the current, and config values, determine which modification action should be taken.
"""
raise NotImplementedError
async def refresh_config(self, config: "Object") -> "Object":
"""
Transform a configuration before planning
"""
return config
async def refresh(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
if current.state.name == self.null_state.name:
return current
info = await self.refresh_state(current.data)
if info is None:
return resource.StateSnapshot({}, self.null_state)
return resource.StateSnapshot(info, current.state)
@transition("UP", "UP")
async def modify_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
config = config.clone(obj=await self.refresh_config(config.obj))
action = await self.get_action(current, config, session)
if action == ModificationAction.NONE:
return current.obj
if action == ModificationAction.MODIFY:
if self.modify is NotImplemented:
raise NotImplementedError(
f"`modify` has not been defined in {type(self).__name__}."
)
return await self.modify(session, current, config)
if action == ModificationAction.DELETE_AND_RECREATE:
raise exc.NullRequired
raise exc.InvalidModificationAction(action)
@transition("DOWN", "UP")
async def create_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
config = config.clone(obj=await self.refresh_config(config.obj))
return await self.create(session, config)
@transition("UP", "DOWN")
async def delete_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return await self.delete(session, current)
@transition("DOWN", "DOWN")
async def noop_down(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return current.obj
class SimpleMachine(SingleStateMachine):
"""
A simple machine has only a single state and each transition only consists
of a single task
"""
async def get_expected(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Any:
"""
Get the expected output for the given configuration. Default implementation
is just passing through config fields and setting the rest as unknown
"""
output = st.Unknown[config.state.output_type]
if not current.state.null:
output = current.obj
return st.fill(config.obj, config.state.output_type, output)
# Not defined as abstract methods because subclasses may want to just override
# the top-level methods instead
async def create_task(self, config: Any) -> Any:
"""
Defines a single task called "create" that will create this resource
"""
raise NotImplementedError
async def delete_task(self, current: Any) -> Any:
"""
Defines a single task called "delete" that will delete this resource
"""
raise NotImplementedError
async def modify_task(self, diff: diff.Diff, current: Any, config: Any) -> Any:
"""
Defines a single task called "modify" that will modify this resource
"""
raise NotImplementedError
def _get_optional_method(self, name: str) -> Callable[[Any], Any]:
if getattr(type(self), name) is getattr(SimpleMachine, name):
raise NotImplementedError(f"{name} has not been defined in this class.")
return getattr(self, name)
def get_action_from_diff(self, diff: diff.Diff) -> ModificationAction:
"""
With the given diff, determine which action must be taken to get to the configured
state. This is only called when both the current and configured state are UP.
Overriding this method is optional, by default it will always delete and recreate
the resource.
"""
if not diff:
return ModificationAction.NONE
return ModificationAction.DELETE_AND_RECREATE
def get_diff(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> diff.Diff:
"""
Produce a diff given the current, config and session data
"""
differ = session.ns.registry.get_differ(config.state.input_type)
current_as_config = st.filter_struct(current.obj, config.type)
return differ.diff(current_as_config, config.obj, session)
async def get_action(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> ModificationAction:
"""
Split get_action into get_diff and get_action_from_diff
"""
diff = self.get_diff(current, config, session)
return self.get_action_from_diff(diff)
async def create(
self, session: task.TaskSession, config: resource.StateConfig
) -> "Object":
current = resource.StateSnapshot({}, self.null_state.state)
expected = await self.get_expected(current, config, session)
create_task = self._get_optional_method("create_task")
return session["create"] << (task.new(create_task)(config.obj) >> expected)
async def delete(
self, session: task.TaskSession, current: resource.StateSnapshot
) -> "Object":
delete_task = self._get_optional_method("delete_task")
ref = session["delete"] << task.new(delete_task)(current.obj)
return st.join(st.Object({}, st.EmptyType, session.ns.registry), ref)
async def modify(
self,
session: task.TaskSession,
current: resource.StateSnapshot,
config: resource.StateConfig,
) -> "Object":
expected = await self.get_expected(current, config, session)
modify_task = self._get_optional_method("modify_task")
diff = self.get_diff(current, config, session)
partial_modify = partial(modify_task, diff)
return session["modify"] << (
task.new(partial_modify)(current.obj, config.obj) >> expected
)
# class MachineResource(resource.Resource):
# """
# Simple wrapper resource, for state machines all logic is really in the States
# implementation
# Example:
# rs = MachineResource(MyMachine('new_resource'))
# """
# # This will be set in the constructor
# States = None
# def __init__(
# self, name: str, machine_cls: PyType[Machine], provider: Provider
# ) -> None:
# self.States = self.machine_cls = machine_cls
# self.name = name
# self.provider = provider
# super().__init__()
# async def plan(
# self,
# current: resource.StateSnapshot,
# config: resource.StateConfig,
# session: task.TaskSession,
# ) -> Object:
# return await self.s.plan(current, config, session)
# async def refresh(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
# return await self.s.refresh(current)
# async def finalize(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
# return await self.s.finalize(current)
| 16,090 | 4,351 |
from .vec2_double import Vec2Double
from .vec2_double import Vec2Double
from .jump_state import JumpState
from .weapon import Weapon
class Unit:
def __init__(self, player_id, id, health, position, size, jump_state, walked_right, stand, on_ground, on_ladder, mines, weapon):
self.player_id = player_id
self.id = id
self.health = health
self.position = position
self.size = size
self.jump_state = jump_state
self.walked_right = walked_right
self.stand = stand
self.on_ground = on_ground
self.on_ladder = on_ladder
self.mines = mines
self.weapon = weapon
@staticmethod
def read_from(stream):
player_id = stream.read_int()
id = stream.read_int()
health = stream.read_int()
position = Vec2Double.read_from(stream)
size = Vec2Double.read_from(stream)
jump_state = JumpState.read_from(stream)
walked_right = stream.read_bool()
stand = stream.read_bool()
on_ground = stream.read_bool()
on_ladder = stream.read_bool()
mines = stream.read_int()
if stream.read_bool():
weapon = Weapon.read_from(stream)
else:
weapon = None
return Unit(player_id, id, health, position, size, jump_state, walked_right, stand, on_ground, on_ladder, mines, weapon)
def write_to(self, stream):
stream.write_int(self.player_id)
stream.write_int(self.id)
stream.write_int(self.health)
self.position.write_to(stream)
self.size.write_to(stream)
self.jump_state.write_to(stream)
stream.write_bool(self.walked_right)
stream.write_bool(self.stand)
stream.write_bool(self.on_ground)
stream.write_bool(self.on_ladder)
stream.write_int(self.mines)
if self.weapon is None:
stream.write_bool(False)
else:
stream.write_bool(True)
self.weapon.write_to(stream)
def __repr__(self):
return "Unit(" + \
repr(self.player_id) + "," + \
repr(self.id) + "," + \
repr(self.health) + "," + \
repr(self.position) + "," + \
repr(self.size) + "," + \
repr(self.jump_state) + "," + \
repr(self.walked_right) + "," + \
repr(self.stand) + "," + \
repr(self.on_ground) + "," + \
repr(self.on_ladder) + "," + \
repr(self.mines) + "," + \
repr(self.weapon) + \
")"
| 2,555 | 809 |
from django.urls import path
from django.conf.urls import url
from .views import (home, capitulo_detalle, buscador, app, app_ios)
from .api import CapitulosList, CapitulosDetail
app_name = 'web'
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^capitulo/(?P<slug>[-\w]+)/$', capitulo_detalle, name='capitulo_detalle'),
url(r'^buscador/', buscador, name='buscador'),
url(r'^app/', app, name='app'),
url(r'^app-ios/', app_ios, name='app_ios'),
]
| 457 | 187 |
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
nums = []
for row in matrix:
num = min(row)
i = row.index(num)
if num == max([line[i] for line in matrix]):
nums.append(num)
return nums
| 305 | 91 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='metadata.proto',
package='',
serialized_pb='\n\x0emetadata.proto\"\xa3\x03\n\x05Track\x12\n\n\x02id\x18\x02 \x01(\t\x12\x10\n\x08\x63reation\x18\x03 \x01(\x05\x12\x12\n\nlastPlayed\x18\x04 \x01(\x05\x12\r\n\x05title\x18\x06 \x01(\t\x12\x0e\n\x06\x61rtist\x18\x07 \x01(\t\x12\x10\n\x08\x63omposer\x18\x08 \x01(\t\x12\r\n\x05\x61lbum\x18\t \x01(\t\x12\x13\n\x0b\x61lbumArtist\x18\n \x01(\t\x12\x0c\n\x04year\x18\x0b \x01(\x05\x12\x0f\n\x07\x63omment\x18\x0c \x01(\t\x12\r\n\x05track\x18\r \x01(\x05\x12\r\n\x05genre\x18\x0e \x01(\t\x12\x10\n\x08\x64uration\x18\x0f \x01(\x05\x12\x16\n\x0e\x62\x65\x61tsPerMinute\x18\x10 \x01(\x05\x12\x11\n\tplayCount\x18\x14 \x01(\x05\x12\x13\n\x0btotalTracks\x18\x1a \x01(\x05\x12\x0c\n\x04\x64isc\x18\x1b \x01(\x05\x12\x12\n\ntotalDiscs\x18\x1c \x01(\x05\x12\x0b\n\x03u11\x18\x1f \x01(\x05\x12\x10\n\x08\x66ileSize\x18 \x01(\x05\x12\x0b\n\x03u13\x18% \x01(\x05\x12\x0b\n\x03u14\x18& \x01(\x05\x12\x0f\n\x07\x62itrate\x18, \x01(\x05\x12\x0b\n\x03u15\x18\x35 \x01(\t\x12\x0b\n\x03u16\x18= \x01(\x05\":\n\x0fMetadataRequest\x12\x16\n\x06tracks\x18\x01 \x03(\x0b\x32\x06.Track\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\"8\n\x0cQueuedUpload\x12\n\n\x02id\x18\x01 \x01(\t\x12\n\n\x02u0\x18\x02 \x01(\x05\x12\x10\n\x08serverId\x18\x03 \x01(\t\"P\n\x06Status\x12\n\n\x02u0\x18\x01 \x01(\x05\x12\n\n\x02u1\x18\x02 \x01(\x05\x12\n\n\x02u2\x18\x03 \x01(\x05\x12\n\n\x02u3\x18\x04 \x01(\x05\x12\n\n\x02u4\x18\x05 \x01(\x05\x12\n\n\x02u5\x18\x06 \x01(\x05\"<\n\rTrackResponse\x12\x0b\n\x03ids\x18\x02 \x03(\t\x12\x1e\n\x07uploads\x18\x03 \x03(\x0b\x32\r.QueuedUpload\"X\n\x10MetadataResponse\x12\n\n\x02u0\x18\x01 \x01(\x05\x12 \n\x08response\x18\x02 \x01(\x0b\x32\x0e.TrackResponse\x12\x16\n\x05state\x18\x06 \x01(\x0b\x32\x07.Status\"/\n\nUploadAuth\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x10\n\x08hostname\x18\x02 \x01(\t\"L\n\x05Quota\x12\x15\n\rmaximumTracks\x18\x01 \x01(\x05\x12\x17\n\x0f\x61vailableTracks\x18\x02 \x01(\x05\x12\x13\n\x0btotalTracks\x18\x03 \x01(\x05\"\x1e\n\x0b\x43lientState\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\"Q\n\x13\x43lientStateResponse\x12\n\n\x02u0\x18\x01 \x01(\x05\x12\x17\n\x06status\x18\x06 \x01(\x0b\x32\x07.Status\x12\x15\n\x05quota\x18\x08 \x01(\x0b\x32\x06.Quota\"Q\n\x12UploadAuthResponse\x12\n\n\x02u0\x18\x01 \x01(\x05\x12\x17\n\x06status\x18\x06 \x01(\x0b\x32\x07.Status\x12\n\n\x02u1\x18\x0b \x01(\x05\x12\n\n\x02u2\x18\x0c \x01(\x05')
_TRACK = descriptor.Descriptor(
name='Track',
full_name='Track',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Track.id', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='creation', full_name='Track.creation', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lastPlayed', full_name='Track.lastPlayed', index=2,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='title', full_name='Track.title', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='artist', full_name='Track.artist', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='composer', full_name='Track.composer', index=5,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='album', full_name='Track.album', index=6,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='albumArtist', full_name='Track.albumArtist', index=7,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='year', full_name='Track.year', index=8,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comment', full_name='Track.comment', index=9,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='track', full_name='Track.track', index=10,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='genre', full_name='Track.genre', index=11,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='duration', full_name='Track.duration', index=12,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='beatsPerMinute', full_name='Track.beatsPerMinute', index=13,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='playCount', full_name='Track.playCount', index=14,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='totalTracks', full_name='Track.totalTracks', index=15,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disc', full_name='Track.disc', index=16,
number=27, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='totalDiscs', full_name='Track.totalDiscs', index=17,
number=28, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u11', full_name='Track.u11', index=18,
number=31, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fileSize', full_name='Track.fileSize', index=19,
number=32, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u13', full_name='Track.u13', index=20,
number=37, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u14', full_name='Track.u14', index=21,
number=38, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='bitrate', full_name='Track.bitrate', index=22,
number=44, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u15', full_name='Track.u15', index=23,
number=53, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u16', full_name='Track.u16', index=24,
number=61, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=19,
serialized_end=438,
)
_METADATAREQUEST = descriptor.Descriptor(
name='MetadataRequest',
full_name='MetadataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='tracks', full_name='MetadataRequest.tracks', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='address', full_name='MetadataRequest.address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=440,
serialized_end=498,
)
_QUEUEDUPLOAD = descriptor.Descriptor(
name='QueuedUpload',
full_name='QueuedUpload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='QueuedUpload.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u0', full_name='QueuedUpload.u0', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverId', full_name='QueuedUpload.serverId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=500,
serialized_end=556,
)
_STATUS = descriptor.Descriptor(
name='Status',
full_name='Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='u0', full_name='Status.u0', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u1', full_name='Status.u1', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u2', full_name='Status.u2', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u3', full_name='Status.u3', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u4', full_name='Status.u4', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u5', full_name='Status.u5', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=558,
serialized_end=638,
)
_TRACKRESPONSE = descriptor.Descriptor(
name='TrackResponse',
full_name='TrackResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='ids', full_name='TrackResponse.ids', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uploads', full_name='TrackResponse.uploads', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=640,
serialized_end=700,
)
_METADATARESPONSE = descriptor.Descriptor(
name='MetadataResponse',
full_name='MetadataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='u0', full_name='MetadataResponse.u0', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='response', full_name='MetadataResponse.response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='state', full_name='MetadataResponse.state', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=702,
serialized_end=790,
)
_UPLOADAUTH = descriptor.Descriptor(
name='UploadAuth',
full_name='UploadAuth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='address', full_name='UploadAuth.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hostname', full_name='UploadAuth.hostname', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=792,
serialized_end=839,
)
_QUOTA = descriptor.Descriptor(
name='Quota',
full_name='Quota',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='maximumTracks', full_name='Quota.maximumTracks', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='availableTracks', full_name='Quota.availableTracks', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='totalTracks', full_name='Quota.totalTracks', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=841,
serialized_end=917,
)
_CLIENTSTATE = descriptor.Descriptor(
name='ClientState',
full_name='ClientState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='address', full_name='ClientState.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=919,
serialized_end=949,
)
_CLIENTSTATERESPONSE = descriptor.Descriptor(
name='ClientStateResponse',
full_name='ClientStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='u0', full_name='ClientStateResponse.u0', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='status', full_name='ClientStateResponse.status', index=1,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='quota', full_name='ClientStateResponse.quota', index=2,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=951,
serialized_end=1032,
)
_UPLOADAUTHRESPONSE = descriptor.Descriptor(
name='UploadAuthResponse',
full_name='UploadAuthResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='u0', full_name='UploadAuthResponse.u0', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='status', full_name='UploadAuthResponse.status', index=1,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u1', full_name='UploadAuthResponse.u1', index=2,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='u2', full_name='UploadAuthResponse.u2', index=3,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1034,
serialized_end=1115,
)
_METADATAREQUEST.fields_by_name['tracks'].message_type = _TRACK
_TRACKRESPONSE.fields_by_name['uploads'].message_type = _QUEUEDUPLOAD
_METADATARESPONSE.fields_by_name['response'].message_type = _TRACKRESPONSE
_METADATARESPONSE.fields_by_name['state'].message_type = _STATUS
_CLIENTSTATERESPONSE.fields_by_name['status'].message_type = _STATUS
_CLIENTSTATERESPONSE.fields_by_name['quota'].message_type = _QUOTA
_UPLOADAUTHRESPONSE.fields_by_name['status'].message_type = _STATUS
class Track(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACK
# @@protoc_insertion_point(class_scope:Track)
class MetadataRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METADATAREQUEST
# @@protoc_insertion_point(class_scope:MetadataRequest)
class QueuedUpload(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _QUEUEDUPLOAD
# @@protoc_insertion_point(class_scope:QueuedUpload)
class Status(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STATUS
# @@protoc_insertion_point(class_scope:Status)
class TrackResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRACKRESPONSE
# @@protoc_insertion_point(class_scope:TrackResponse)
class MetadataResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METADATARESPONSE
# @@protoc_insertion_point(class_scope:MetadataResponse)
class UploadAuth(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UPLOADAUTH
# @@protoc_insertion_point(class_scope:UploadAuth)
class Quota(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _QUOTA
# @@protoc_insertion_point(class_scope:Quota)
class ClientState(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTSTATE
# @@protoc_insertion_point(class_scope:ClientState)
class ClientStateResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTSTATERESPONSE
# @@protoc_insertion_point(class_scope:ClientStateResponse)
class UploadAuthResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UPLOADAUTHRESPONSE
# @@protoc_insertion_point(class_scope:UploadAuthResponse)
# @@protoc_insertion_point(module_scope)
| 26,402 | 10,170 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from pages.mobile.base import Base
class Article(Base):
_helpful_button_locator = (By.NAME, 'helpful')
_helpful_header_text_locator = (By.CSS_SELECTOR, 'div.vote-bar header')
_vote_message_text_locator = (By.CSS_SELECTOR, 'div.vote-bar p')
@property
def helpful_header_text(self):
return self.selenium.find_element(*self._helpful_header_text_locator).text
def wait_for_vote_message_text(self, text):
WebDriverWait(self.selenium, self.timeout).until(
lambda s: s.find_element(*self._vote_message_text_locator).text == text)
def click_helpful_button(self):
self.selenium.find_element(*self._helpful_button_locator).click()
| 992 | 318 |
from response import ResponseObj
from response import RequestHandler
from request import RequestObjNew
import tornado.web
import traceback
import tornado.gen
import tornado.ioloop
import tornado.concurrent
import logging
from lib.customException import ApplicationException
import globalsObj
import re
import jwtoken.lib.jwtoken
import asyncio
class jwtokenHandler(RequestHandler):
def __init__(self, *args, **kwds):
super(RequestHandler, self).__init__(*args, **kwds)
self.dbobjJwt = globalsObj.DbConnections['jwtDb']
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
#self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', ' POST, GET, OPTIONS')
# gestione errore generico
def write_error(self, status_code, **kwargs):
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# inserisci codice errore personalizzato
getResponse.setError('3')
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
#get
async def get(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
if re.match("/api/jwt/getByType", self.request.path):
#task da eseguire per il get
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.getByType)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
elif re.match("/api/jwt/verify", self.request.path):
#task da eseguire per il get
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.verify)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
self.writeLog(response_obj)
self.writeResponse(response_obj)
#@tornado.gen.coroutine
async def post(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
if re.match("/api/jwt/verify", self.request.path):
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.verify)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
self.writeLog(response_obj)
self.writeResponse(response_obj)
def options(self):
# no body
self.set_status(204)
self.finish()
def writeResponse(self, response_obj):
self.set_status(response_obj.error.httpcode)
self.write(response_obj.jsonWrite())
self.finish()
def writeLog(self, response_obj):
x_real_ip = self.request.headers.get("X-Real-IP")
remote_ip = x_real_ip or self.request.remote_ip
#insert log
if str(self.request.body, 'utf-8') == '':
body = None
else:
body = str(self.request.body, 'utf-8')
log_request = self.dbobjJwt.makeQuery("EXECUTE log_request(%s, %s, %s, %s)",
[self.request.method,
self.request.protocol + "://" + self.request.host + self.request.uri,
body,
remote_ip],
type = self.dbobjJwt.stmts['log_request']['pool'], close = True, fetch=False)
log_response = self.dbobjJwt.makeQuery("EXECUTE log_response(%s, %s, %s, %s)",
[response_obj.error.httpcode,
self.request.protocol + "://" + self.request.host + self.request.uri,
response_obj.jsonWrite(),
remote_ip],
type = self.dbobjJwt.stmts['log_response']['pool'], close = True, fetch=False)
return
#@tornado.concurrent.run_on_executor
def getByType(self):
try:
jwtCode = super(self.__class__, self).get_argument('type')
""" This will be executed in `executor` pool. """
#connJwt = jwtoken.lib.database.Database(globalsObj.DbConnections['jwtMasterdsn'])
#newcod_token = connJwt.createTokenByType(jwtCode)
newcod_cod_token = self.dbobjJwt.makeQuery("EXECUTE create_token_by_type(%s)",
[jwtCode],type = self.dbobjJwt.stmts['create_token_by_type']['pool'], close = True)
newcod_token = self.dbobjJwt.makeQuery("EXECUTE get_token_by_cod(%s)",
[newcod_cod_token['result']['cod_token']],type = self.dbobjJwt.stmts['get_token_by_cod']['pool'], close = True)
if newcod_token['error'] == 0 and newcod_token['result'] is not None:
# genera risposta tutto ok
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(token = newcod_token['result']['token'])
elif newcod_token['error'] == 0 and newcod_token['result'] is None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken102')
elif newcod_token['error'] > 1:
response_obj = ResponseObj(debugMessage=newcod_token['result'].pgerror, httpcode=500,
devMessage=("PostgreSQL error code: %s" % newcod_token['result'].pgcode))
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(__name__).error('%s'% error,exc_info=True)
except ApplicationException as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError(inst.code)
#responsejson = response_obj.jsonWrite()
logging.getLogger(__name__).error('Exception',exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(__name__).error('Exception',exc_info=True)
finally:
logging.getLogger(__name__).warning('jwt/getByType handler executed')
return response_obj
def verify(self):
try:
#connJwt = jwtoken.lib.database.Database(globalsObj.DbConnections['jwtSlavedsn'])
if self.request.method == 'GET':
token = super(self.__class__, self).get_argument('token')
elif self.request.method == 'POST':
# leggi il json della richiesta
temp = RequestObjNew(self.request.body)
if temp.error["code"] == 2:
response_obj = ResponseObj(debugMessage=temp.error["message"], httpcode=400)
response_obj.setError('400')
logging.getLogger(__name__).error('Validation error. Json input error')
return response_obj
elif temp.error["code"] > 0:
raise tornado.web.HTTPError(httpcode=503, log_message=temp.error["message"])
token = temp.request['token']
#verifica = connJwt.verifyToken(token)
verifica = self.dbobjJwt.makeQuery("EXECUTE verify_token(%s)",
[token],type = self.dbobjJwt.stmts['verify_token']['pool'], close = True)
if verifica['error'] == 0:
if verifica['result'][0] == None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken101')
elif verifica['result'][0]['error'] == 0:
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(jose = verifica['result'][0]['message'])
elif verifica['result'][0]['error'] > 0:
response_obj = ResponseObj(httpcode=401, devMessage=(verifica['result'][0]['message']))
response_obj.setError('jwtoken100')
elif verifica['error'] == 1:
response_obj = ResponseObj(debugMessage=verifica['result'].pgerror, httpcode=500,
devMessage=("PostgreSQL error code: %s" % verifica['result'].pgcode))
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(__name__).error('%s'% error,exc_info=True)
except ApplicationException as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError(inst.code)
#responsejson = response_obj.jsonWrite()
logging.getLogger(__name__).error('Exception',exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(__name__).error('Exception',exc_info=True)
finally:
logging.getLogger(__name__).warning('jwt/verify handler executed')
if self.request.method == 'POST':
response_obj.setID(temp.id)
return response_obj
| 10,008 | 2,940 |
"""配置通往consumer的路由,即配置websocket路由"""
from django.conf.urls import url
from django.urls import path
from . import consumers
websocket_urlpatterns = [
# url(r'^ws/chat/(?P<room_name>[^/]+)/$', consumers.ChatConsumer),
path('ws/chat/<room_name>/', consumers.ChatConsumer),
path('wss/chat/<room_name>/', consumers.ChatConsumer),
path('ws/group_chat/<room_name>/', consumers.GroupChatConsumer),
path('wss/group_chat/<room_name>/', consumers.GroupChatConsumer),
]
| 481 | 191 |
import boto3
import typing
import functools
from ._bases import BaseMixin
from ..utils import logging
@functools.lru_cache(maxsize=1)
def ssmClient():
return boto3.client('ssm')
class ParameterStore(BaseMixin):
def __init__(self):
super().__init__(name='')
@classmethod
def get(cls, key: str) -> bytes:
logging.debug(f'Fetching SSM Parameter {key}')
response = ssmClient().get_parameter(Name=key, WithDecryption=True)
return response['Parameter']['Value']
| 510 | 166 |
import logging
import os
import sys
import traceback
from itertools import cycle
import discord
from discord.ext import commands, tasks
# Log information about bot operations.
logging.basicConfig(level=logging.INFO)
# Get Discord token from environmental variable.
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
# Google Maps API token for searching places
GOOGLE_API_TOKEN = os.getenv("GOOGLE_API_TOKEN")
# MongoDB connection string
MONGODB_CONNECTION_STRING = os.getenv("MONGODB_CONNECTION_STRING")
# Default prefix for bot commands.
DEFAULT_PREFIX = "."
# Path to file tracking number of Google API requests.
REQUESTS_COUNTER_FILE = "data/google_api_requests.txt" # todo
# Set the bot client with '.' (dot) as a command prefix.
POMELO_CLIENT = commands.Bot(command_prefix=DEFAULT_PREFIX)
# Status text to be displayed in bot description.
STATUS_LIST = cycle(
(
"Powered by fruit energy.",
"Fresh, ripe and juicy.",
"Don't trust Pancake!",
"Completely insect-free!",
'Type: ".help"',
)
)
# EVENT LISTENERS
@POMELO_CLIENT.event
async def on_ready():
"""If the bot is ready (i.e. is turned on), print out the message to console."""
change_status.start()
print("[ONLINE] Pomelo is fresh and ripe, lads!")
@POMELO_CLIENT.event
async def on_command_error(ctx, error):
"""If user forgets to put necessary arguments into a command, mock them."""
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
"You're okay there pal? Because you've _clearly_ missed some of the arguments in your command... "
"_shakes head_ Type '.help <command_name> to learn more about command."
)
elif isinstance(error, commands.CommandNotFound):
await ctx.send(
"Are you delusional? Such command **doesn't exist** AT ALL. Type '.help' if you are feeling little _stale_."
)
elif isinstance(error, commands.MissingPermissions):
await ctx.send(
"You do not have permissions to use such command. Do not try to be tricky with me, kid."
)
elif isinstance(error, commands.NotOwner):
await ctx.send("Only The Creator Himself can call such spells on me.")
# All other Exceptions not returned come here and the default traceback is then printed.
print(f"Ignoring exception in command {ctx.command}:", file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
# LOOPS
@tasks.loop(seconds=15)
async def change_status():
"""Change status text every X seconds."""
await POMELO_CLIENT.change_presence(activity=discord.Game(next(STATUS_LIST)))
if __name__ == "__main__":
"""Check 'cogs' directory for cog files (which are basically bot modules) and load them."""
for filename in os.listdir(os.path.join("src", "cogs")):
if filename.endswith("py"):
POMELO_CLIENT.load_extension(f"cogs.{filename[:-3]}")
POMELO_CLIENT.run(DISCORD_BOT_TOKEN)
| 3,020 | 940 |
from termcolor import cprint, colored
# Reference: https://ufal.mff.cuni.cz/pdt/Morphology_and_Tagging/Doc/hmptagqr.html
# Morphodita online demo: http://lindat.mff.cuni.cz/services/morphodita/
categories = [
{'POS': 'Part of Speech'},
{'SUBPOS': 'Detailed Part of Speech'},
{'GENDER': 'Gender'},
{'NUMBER': 'Number'},
{'CASE': 'Case'},
{'POSSGENDER': 'Possessor\'s Gender'},
{'POSSNUMBER': 'Possessor\'s Number'},
{'PERSON': 'Person'},
{'TENSE': 'Tense'},
{'GRADE': 'Degree of comparison'},
{'NEGATION': 'Negation'},
{'VOICE': 'Voice'},
{'RESERVE1': 'Unused'},
{'RESERVE2': ' Unused'},
{'VAR': 'Variant, Style, Register, Special Usage'}
]
allowed_values = [
# 1) POS
{
'A': 'Adjective',
'C': 'Numeral',
'D': 'Adverb',
'I': 'Interjection',
'J': 'Conjunction',
'N': 'Noun',
'P': 'Pronoun',
'V': 'Verb',
'R': 'Preposition',
'T': 'Particle',
'X': 'Unknown, Not Determined, Unclassifiable',
'Z': 'Punctuation (also used for the Sentence Boundary token)'
},
# 2) SUBPOS
{
'!': 'Abbreviation used as an adverb (now obsolete)',
'#': 'Sentence boundary (for the virtual word ###)',
'*': 'Word krát (lit.: times) (POS: C, numeral)',
',': 'Conjunction subordinate (incl. aby, kdyby in all forms)',
'.': 'Abbreviation used as an adjective (now obsolete)',
'0': 'Preposition with attached -ň (pronoun něj, lit. him); proň, naň, .... (POS: P, pronoun)',
'1': 'Relative possessive pronoun jehož, jejíž, ... (lit. whose in subordinate relative clause)',
'2': 'Hyphen (always as a separate token)',
'3': 'Abbreviation used as a numeral (now obsolete)',
'4': 'Relative/interrogative pronoun with adjectival declension of both types (soft and hard) (jaký, který, čí, ..., lit. what, which, whose, ...)',
'5': 'The pronoun he in forms requested after any preposition (with prefix n-: něj, něho, ..., lit. him in various cases)',
'6': 'Reflexive pronoun se in long forms (sebe, sobě, sebou, lit. myself / yourself / herself / himself in various cases; se is personless)',
'7': 'Reflexive pronouns se (CASE = 4), si (CASE = 3), plus the same two forms with contracted -s: ses, sis (distinguished by PERSON = 2; also number is singular only)',
'8': 'Possessive reflexive pronoun svůj (lit. my/your/her/his when the possessor is the subject of the sentence)',
'9': 'Relative pronoun jenž, již, ... after a preposition (n-: něhož, niž, ..., lit. who)',
':': 'Punctuation (except for the virtual sentence boundary word ###, which uses the SUBPOS #)',
';': 'Abbreviation used as a noun (now obsolete)',
'=': 'Number written using digits (POS: C, numeral)',
'?': 'Numeral kolik (lit. how many/how much)',
'@': 'Unrecognized word form (POS: X, unknown)',
'A': 'Adjective, general',
'B': 'Verb, present or future form',
'C': 'Adjective, nominal (short, participial) form rád, schopen, ...',
'D': 'Pronoun, demonstrative (ten, onen, ..., lit. this, that, that ... over there, ...)',
'E': 'Relative pronoun což (corresponding to English which in subordinate clauses referring to a part of the preceding text)',
'F': 'Preposition, part of; never appears isolated, always in a phrase (nehledě (na), vzhledem (k), ..., lit. regardless, because of)',
'G': 'Adjective derived from present transgressive form of a verb',
'H': 'Personal pronoun, clitical (short) form (mě, mi, ti, mu, ...); these forms are used in the second position in a clause (lit. me, you, her, him), even though some of them (mě) might be regularly used anywhere as well',
'I': 'Interjections (POS: I)',
'J': 'Relative pronoun jenž, již, ... not after a preposition (lit. who, whom)',
'K': 'Relative/interrogative pronoun kdo (lit. who), incl. forms with affixes -ž and -s (affixes are distinguished by the category VAR (for -ž) and PERSON (for -s))',
'L': 'Pronoun, indefinite všechnen, sám (lit. all, alone)',
'M': 'Adjective derived from verbal past transgressive form',
'N': 'Noun (general)',
'O': 'Pronoun svůj, nesvůj, tentam alone (lit. own self, not-in-mood, gone)',
'P': 'Personal pronoun já, ty, on (lit. I, you, he) (incl. forms with the enclitic -s, e.g. tys, lit. you\'re); gender position is used for third person to distinguish on/ona/ono (lit. he/she/it), and number for all three persons',
'Q': 'Pronoun relative/interrogative co, copak, cožpak (lit. what, isn\'t-it-true-that)',
'R': 'Preposition (general, without vocalization)',
'S': 'Pronoun possessive můj, tvůj, jeho (lit. my, your, his); gender position used for third person to distinguish jeho, její, jeho (lit. his, her, its), and number for all three pronouns',
'T': 'Particle (POS: T, particle)',
'U': 'Adjective possessive (with the masculine ending -ův as well as feminine -in)',
'V': 'Preposition (with vocalization -e or -u): (ve, pode, ku, ..., lit. in, under, to)',
'W': 'Pronoun negative (nic, nikdo, nijaký, žádný, ..., lit. nothing, nobody, not-worth-mentioning, no/none)',
'X': '(temporary) Word form recognized, but tag is missing in dictionary due to delays in (asynchronous) dictionary creation',
'Y': 'Pronoun relative/interrogative co as an enclitic (after a preposition) (oč, nač, zač, lit. about what, on/onto what, after/for what)',
'Z': 'Pronoun indefinite (nějaký, některý, číkoli, cosi, ..., lit. some, some, anybody\'s, something)',
'^': 'Conjunction (connecting main clauses, not subordinate)',
'a': 'Numeral, indefinite (mnoho, málo, tolik, několik, kdovíkolik, ..., lit. much/many, little/few, that much/many, some (number of), who-knows-how-much/many)',
'b': 'Adverb (without a possibility to form negation and degrees of comparison, e.g. pozadu, naplocho, ..., lit. behind, flatly); i.e. both the NEGATION as well as the GRADE attributes in the same tag are marked by - (Not applicable)',
'c': 'Conditional (of the verb být (lit. to be) only) (by, bych, bys, bychom, byste, lit. would)',
'd': 'Numeral, generic with adjectival declension ( dvojí, desaterý, ..., lit. two-kinds/..., ten-...)',
'e': 'Verb, transgressive present (endings -e/-ě, -íc, -íce)',
'f': 'Verb, infinitive',
'g': 'Adverb (forming negation (NEGATION set to A/N) and degrees of comparison GRADE set to 1/2/3 (comparative/superlative), e.g. velký, za\-jí\-ma\-vý, ..., lit. big, interesting',
'h': 'Numeral, generic; only jedny and nejedny (lit. one-kind/sort-of, not-only-one-kind/sort-of)',
'i': 'Verb, imperative form',
'j': 'Numeral, generic greater than or equal to 4 used as a syntactic noun (čtvero, desatero, ..., lit. four-kinds/sorts-of, ten-...)',
'k': 'Numeral, generic greater than or equal to 4 used as a syntactic adjective, short form (čtvery, ..., lit. four-kinds/sorts-of)',
'l': 'Numeral, cardinal jeden, dva, tři, čtyři, půl, ... (lit. one, two, three, four); also sto and tisíc (lit. hundred, thousand) if noun declension is not used',
'm': 'Verb, past transgressive; also archaic present transgressive of perfective verbs (ex.: udělav, lit. (he-)having-done; arch. also udělaje (VAR = 4), lit. (he-)having-done)',
'n': 'Numeral, cardinal greater than or equal to 5',
'o': 'Numeral, multiplicative indefinite (-krát, lit. (times): mnohokrát, tolikrát, ..., lit. many times, that many times)',
'p': 'Verb, past participle, active (including forms with the enclitic -s, lit. \'re (are))',
'q': 'Verb, past participle, active, with the enclitic -ť, lit. (perhaps) -could-you-imagine-that? or but-because- (both archaic)',
'r': 'Numeral, ordinal (adjective declension without degrees of comparison)',
's': 'Verb, past participle, passive (including forms with the enclitic -s, lit. \'re (are))',
't': 'Verb, present or future tense, with the enclitic -ť, lit. (perhaps) -could-you-imagine-that? or but-because- (both archaic)',
'u': 'Numeral, interrogative kolikrát, lit. how many times?',
'v': 'Numeral, multiplicative, definite (-krát, lit. times: pětkrát, ..., lit. five times)',
'w': 'Numeral, indefinite, adjectival declension (nejeden, tolikátý, ..., lit. not-only-one, so-many-times-repeated)',
'x': 'Abbreviation, part of speech unknown/indeterminable (now obsolete)',
'y': 'Numeral, fraction ending at -ina (POS: C, numeral); used as a noun (pětina, lit. one-fifth)',
'z': 'Numeral, interrogative kolikátý, lit. what (at-what-position-place-in-a-sequence)',
'}': 'Numeral, written using Roman numerals (XIV)',
'~': 'Abbreviation used as a verb (now obsolete)'
},
# 3) GENDER
{
'-': 'Not applicable',
'F': 'Feminine',
'H': 'Feminine or Neuter',
'I': 'Masculine inanimate',
'M': 'Masculine animate',
'N': 'Neuter',
'Q': 'Feminine (with singular only) or Neuter (with plural only); used only with participles and nominal forms of adjectives',
'T': 'Masculine inanimate or Feminine (plural only); used only with participles and nominal forms of adjectives',
'X': 'Any of the basic four genders',
'Y': 'Masculine (either animate or inanimate)',
'Z': 'Not fenimine (i.e., Masculine animate/inanimate or Neuter); only for (some) pronoun forms and certain numerals'
},
# 4) NUMBER
{
'-': 'Not applicable',
'D': 'Dual',
'P': 'Plural',
'S': 'Singular',
'W': 'Singular for feminine gender, plural with neuter; can only appear in participle or nominal adjective form with gender value Q',
'X': 'Any'
},
# 5) CASE
{
'-': 'Not applicable',
'1': 'Nominative',
'2': 'Genitive',
'3': 'Dative',
'4': 'Accusative',
'5': 'Vocative',
'6': 'Locative',
'7': 'Instrumental',
'X': 'Any'
},
# 6) POSSGENDER
{
'-': 'Not applicable',
'F': 'Feminine possessor',
'M': 'Masculine animate possessor (adjectives only)',
'X': 'Any gender',
'Z': 'Not feminine (both masculine or neuter)'
},
# 7) POSSNUMBER
{
'-': 'Not applicable',
'P': 'Plural (possessor)',
'S': 'Singular (possessor)'
},
# 8) PERSON
{
'-': 'Not applicable',
'1': '1st person',
'2': '2nd person',
'3': '3rd person',
'X': 'Any person'
},
# 9) TENSE
{
'-': 'Not applicable',
'F': 'Future',
'H': 'Past or Present',
'P': 'Present',
'R': 'Past',
'X': 'Any (Past, Present, or Future)'
},
# 10) GRADE
{
'-': 'Not applicable',
'1': 'Positive',
'2': 'Comparative',
'3': 'Superlative'
},
# 11) NEGATION
{
'-': 'Not applicable',
'A': 'Affirmative (not negated)',
'N': 'Negated'
},
# 12) VOICE
{
'-': 'Not applicable',
'A': 'Active',
'P': 'Passive'
},
# 13) RESERVE1
{
'-': 'Not applicable'
},
# 14) RESERVE2
{
'-': 'Not applicable'
},
# 15) VAR
{
'-': 'Not applicable (basic variant, standard contemporary style; also used for standard forms allowed for use in writing by the Czech Standard Orthography Rules despite being marked there as colloquial)',
'1': 'Variant, second most used (less frequent), still standard',
'2': 'Variant, rarely used, bookish, or archaic',
'3': 'Very archaic, also archaic + colloquial',
'4': 'Very archaic or bookish, but standard at the time',
'5': 'Colloquial, but (almost) tolerated even in public',
'6': 'Colloquial (standard in spoken Czech)',
'7': 'Colloquial (standard in spoken Czech), less frequent variant',
'8': 'Abbreviations',
'9': 'Special uses, e.g. personal pronouns after prepositions etc.'
}
]
def validate(pos_tag: str) -> list:
pos_tag_len = len(allowed_values)
if len(pos_tag) != pos_tag_len:
raise Exception('POS tag length incorrect. Expected {} characters, got {}.'.format(pos_tag_len, len(pos_tag)))
errors = []
for i, char in enumerate(pos_tag):
values = list(allowed_values[i].keys())
if char not in values:
errors.append(i)
return errors
def explain(pos_tag: str, errors: list) -> None:
if len(errors) == 0:
print('\nPOS tag syntax valid.')
highlighted_tag = colored(pos_tag, 'green')
else:
print('\nInvalid POS tag syntax!')
highlighted_tag = ''
for i, char in enumerate(pos_tag):
if i in errors:
print('Invalid value: {} at position {}'.format(char, i))
highlighted_tag += colored(char, 'red')
else:
highlighted_tag += colored(char, 'green')
print('Full tag:', highlighted_tag, '\n')
cprint('{:<6}{:<11}{:<6}{}'.format('Index', 'Category', 'Value', 'Description'), 'yellow', 'on_grey')
for i, char in enumerate(pos_tag):
if i in errors:
print(colored('{:<6}{:<11}{:<6}{}'.format(i, str(list(categories[i].keys())[0]), char, 'INVALID'), 'red'))
else:
if char == '-':
print(colored('{:<6}{:<11}{:<6}{}'.format(i, str(list(categories[i].keys())[0]), char, allowed_values[i][char]), attrs=['dark']))
else:
print('{:<6}{:<11}{:<6}{}'.format(i, str(list(categories[i].keys())[0]), char, allowed_values[i][char]))
print('\n')
def __main__():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("pos_tag", help="POS tag in Morphodita format.")
args = parser.parse_args()
errors = validate(args.pos_tag)
explain(args.pos_tag, errors)
if __name__ == '__main__':
__main__()
| 14,209 | 4,863 |
# Real-time human segmentation with a web camera
# Modules
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import time
import torch
from torchvision import transforms
# Use GPU if available
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load Pretrained DeepLabV3
model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
model.eval()
model.to(device)
# Preprocess image
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# Start camera capture
capture = cv2.VideoCapture(0)
while(True):
# Capture mirror image video frame
_, frame = capture.read()
frame = cv2.flip(frame, 1)
# Convert frame to tensor
frame_tensor = preprocess(frame).unsqueeze(0).to(device)
# Predict image segmentation
with torch.no_grad():
output = model(frame_tensor)['out'][0].argmax(0)
# Group classes into human or background
output[output != 15] = 0
output[output == 15] = 1
# Resize output to frame shape
output = output.byte().cpu().numpy()
output = np.stack((output, output, output), -1)
output = cv2.resize(output, frame.shape[1::-1]).astype(bool)
# Create human and background masks
human = (frame * output).astype(float)
background = frame * np.invert(output)
# Apply transparent overlay to human class
overlay = output * np.array([[255, 0, 0]])
human = 0.66 * human + 0.33 * overlay
# Display frame with overlay
cv2.imshow('frame', human.astype('uint8') + background.astype('uint8'))
# Exit with q key
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release camera capture
capture.release()
cv2.destroyAllWindows()
| 1,796 | 635 |
from __future__ import unicode_literals
from telesign.rest import RestClient
TELEBUREAU_CREATE_RESOURCE = "/v1/telebureau/event"
TELEBUREAU_RETRIEVE_RESOURCE = "/v1/telebureau/event/{reference_id}"
TELEBUREAU_DELETE_RESOURCE = "/v1/telebureau/event/{reference_id}"
class TelebureauClient(RestClient):
"""
TeleBureau is a service is based on TeleSign's watchlist, which is a proprietary database containing verified phone
numbers of users known to have committed online fraud. TeleSign crowd-sources this information from its customers.
Participation is voluntary, but you have to contribute in order to benefit.
"""
def __init__(self, customer_id, api_key, rest_endpoint='https://rest-ww.telesign.com', **kwargs):
super(TelebureauClient, self).__init__(customer_id, api_key, rest_endpoint=rest_endpoint, **kwargs)
def create_event(self, phone_number, fraud_type, occurred_at, **params):
"""
Creates a telebureau event corresponding to supplied data.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.post(TELEBUREAU_CREATE_RESOURCE,
phone_number=phone_number,
fraud_type=fraud_type,
occurred_at=occurred_at,
**params)
def retrieve_event(self, reference_id, **params):
"""
Retrieves the fraud event status. You make this call in your web application after completion of create
transaction for a telebureau event.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.get(TELEBUREAU_RETRIEVE_RESOURCE.format(reference_id=reference_id),
**params)
def delete_event(self, reference_id, **params):
"""
Deletes a previously submitted fraud event. You make this call in your web application after completion of the
create transaction for a telebureau event.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.delete(TELEBUREAU_DELETE_RESOURCE.format(reference_id=reference_id),
**params)
| 2,276 | 638 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:26:20 2018
@author: nbaya
"""
import os
import glob
import re
import pandas as pd
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
import sys
import numpy as np
v3_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/imputed-v3-results/"
#Get saved phenotypes
malefiles = (list(map(os.path.basename,glob.glob(v3_path+"*.male*.gz")))) #restrict to male files to prevent counting phenotype twice
find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
savedphenotypes = list(map(lambda filename: re.search(find,filename).group(1), malefiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#Get all phenotypes
allphenotypes = pd.Series.tolist(pd.read_table(v3_path+"phenotypes.both_sexes.tsv").iloc[:]["phenotype"]) #list of all phenotypes (male & female)
allphenotypes = pd.DataFrame({'phenotype':allphenotypes})
allphenotypes.to_csv(v3_path+"allphenotypeslist.tsv",sep = "\t")
# TEMPORARY -------------------------------------------------------------------
#savedFiles= (list(map(os.path.basename,glob.glob(chrX_path+"*.gz")))) #restrict to male files to prevent counting phenotype twice
#find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
#newphenotypes = list(map(lambda filename: re.search(find,filename).group(1), savedFiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#
#nextphenotypes = list(set(savedphenotypes).difference(set(newphenotypes)))
#
#len(nextphenotypes)
# -----------------------------------------------------------------------------
n_cores = multiprocessing.cpu_count()
#old method of extracting chrX
def prev_chrX_from_saved_phenotypes(ph):
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t')
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX = pd.merge(chrX_male,chrX_female, on = 'variant',suffixes = ("_male","_female"))
chrX.to_csv(chrX_path+ph+".chrX.tsv.gz",sep = '\t', compression = 'gzip')
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in savedphenotypes)
# TEMPORARY -------------------------------------------------------------------
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in nextphenotypes)
# -----------------------------------------------------------------------------
#def chrX_from_new_phenotypes(ph):
#
## call(["gsutil" ,"cp","gs://ukbb-gwas-imputed-v3-results/export1/"+ph+".**male*",
## "~/Documents/lab/ukbb-sexdiff/chrX/"])
#
#
# call('gsutil ls gs://ukbb-gwas-imputed-v3-results/export1/'+ph+'.**male*', shell=True)
## "~/Documents/lab/ukbb-sexdiff/chrX/',)
## call(["paste","<(cat", ph, ".imputed_v3.results.female.tsv.gz","|","zcat",
## "|" , "cut -f 1,2,3,5,6,8)", "<(cat", ph,".imputed_v3.results.male.tsv.gz" ,
## "|", "zcat", "|", "cut", "-f", "1,2,3,5,6,8)", "|", "awk" ,"\'", "NR==1{",
## "print", "\"variant\",\"n_female\",\"n_male\",\"frq_female\",\"frq_male\",\"beta_female\",\"se_female\",\"p_female\",\"beta_male\",\"se_male\",\"p_male\"",
## "}NR>1", "&&", "$1==$7{", "maff=$3/(2*$2);" , "mafm=$9/(2*$8);" ,
## "if(maff > .05 && maff<.95 && mafm > .05 && mafm < .95){",
## "print $1,$2,$8,maff,mafm,$4,$5,$6,$10,$11,$12} }\' | gzip >", ph, ".sexdiff.gz]"])
#
#testph = ['46','47']
#
#for ph in testph:
# chrX_from_new_phenotypes(ph)
#for ph in set(allphenotypes).difference(set(savedphenotypes)): #for all phenotypes not saved
# -----------------------------------------------------------------------------
chrX_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/chrX/data/"
ph = "1757"
#Males
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_male = chrX_male.reset_index() #necessary for upcoming concat between chrX_male and a3
a1 = np.asarray(chrX_male.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_male2 = pd.concat([a3[[0,1,3,2]],chrX_male], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_male2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_male2.to_csv(chrX_path+ph+".chrX.male.tsv.gz",sep = '\t', compression = 'gzip')
#Females
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX_female = chrX_female.reset_index() #necessary for upcoming concat between chrX_female and a3
a1 = np.asarray(chrX_female.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_female2 = pd.concat([a3[[0,1,3,2]],chrX_female], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_female2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_female2.to_csv(chrX_path+ph+".chrX.female.tsv.gz",sep = '\t', compression = 'gzip')
| 6,099 | 2,439 |
import time
def f(x):
pass
if __name__ == "__main__":
# execute only if
# run as a script
f("oo") | 120 | 46 |
class Solution(object):
def countComponents(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
id_ = [i for i in xrange(n)]
start = 0
for edge in edges:
i = self.root(id_, edge[0])
j = self.root(id_, edge[1])
id_[i] = j
count = 0
for i in xrange(len(id_)):
if id_[i] == i:
count += 1
return count
def root(self, id_, i):
while i != id_[i]:
id_[i] = id_[id_[i]]
i = id_[i]
return i
a = Solution()
print a.countComponents(5, [[0, 1], [1, 2], [2, 3], [3, 4]])
print a.countComponents(5, [[0,1],[1,2],[0,2],[3,4]])
| 749 | 277 |
#!/usr/bin/python3
# 10-divisible_by_2.py
def divisible_by_2(my_list=[]):
"""Find all multiples of 2 in a list."""
multiples = []
for i in range(len(my_list)):
if my_list[i] % 2 == 0:
multiples.append(True)
else:
multiples.append(False)
return (multiples)
| 315 | 121 |
import codegen
from arm.insts import arm_insts
with open('InstSema.arm.cpp', 'w') as f:
codegen.emit_instruction_bindings(arm_insts, 'ArmInsts', f)
with open('InstWrappers.arm.c', 'w') as f:
f.write('#include <arm_neon.h>\n')
codegen.emit_wrappers(arm_insts, f)
| 270 | 115 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.http import HttpResponse
from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from catalyst_utils.models import Person, Survey, Gradebook
from catalyst_utils.dao.file import read_file, build_archive
from userservice.user import UserService
from logging import getLogger
import json
import re
logger = getLogger(__name__)
@method_decorator(login_required, name='dispatch')
class APIView(View):
@property
def person(self):
if not hasattr(self, '_person'):
username = UserService().get_user()
self._person = Person.objects.get(login_name=username)
return self._person
@staticmethod
def json_response(content='', status=200):
return HttpResponse(json.dumps(content, sort_keys=True),
status=status,
content_type='application/json')
@staticmethod
def error_response(status, message='', content={}):
content['error'] = str(message)
return HttpResponse(json.dumps(content),
status=status,
content_type='application/json')
@staticmethod
def file_response(content, filename, content_type='text/csv'):
response = HttpResponse(content=content, status=200,
content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
re.sub(r'[,/]', '-', filename))
return response
@staticmethod
def sorted_tools(tools):
return sorted(tools,
key=lambda t: (t['created_date'], t['name'].upper()),
reverse=True)
class SurveyList(APIView):
def get(self, request, *args, **kwargs):
try:
owned_surveys = Survey.objects.by_owner(self.person)
netid_surveys = Survey.objects.by_netid_admin(self.person)
admin_surveys = Survey.objects.by_administrator(self.person)
except Person.DoesNotExist:
return self.json_response(status=204)
data = {
'owned_surveys': self.sorted_tools(
[s.json_data() for s in owned_surveys]),
'netid_surveys': self.sorted_tools(
[s.json_data() for s in netid_surveys]),
'admin_surveys': self.sorted_tools(
[s.json_data() for s in admin_surveys]),
}
return self.json_response(data)
class GradebookList(APIView):
def get(self, request, *args, **kwargs):
try:
owned_gradebooks = Gradebook.objects.by_owner(self.person)
netid_gradebooks = Gradebook.objects.by_netid_admin(self.person)
admin_gradebooks = Gradebook.objects.by_administrator(self.person)
except Person.DoesNotExist:
return self.json_response(status=204)
data = {
'owned_gradebooks': self.sorted_tools(
[s.json_data() for s in owned_gradebooks]),
'netid_gradebooks': self.sorted_tools(
[s.json_data() for s in netid_gradebooks]),
'admin_gradebooks': self.sorted_tools(
[s.json_data() for s in admin_gradebooks]),
}
return self.json_response(data)
class SurveyFile(APIView):
def get(self, request, *args, **kwargs):
survey_id = kwargs.get('survey_id')
try:
survey = Survey.objects.get(survey_id=survey_id)
except Survey.DoesNotExist:
return self.error_response(404, 'Not Found')
if not survey.is_administrator(self.person):
return self.error_response(401, 'Not Authorized')
try:
archive = build_archive([survey.export_path,
survey.responses_path,
survey.code_translation_path])
except ObjectDoesNotExist:
return self.error_response(404, 'Not Available')
return self.file_response(archive, survey.filename,
content_type='application/zip')
class GradebookFile(APIView):
def get(self, request, *args, **kwargs):
gradebook_id = kwargs.get('gradebook_id')
try:
gradebook = Gradebook.objects.get(gradebook_id=gradebook_id)
except Gradebook.DoesNotExist:
return self.error_response(404, 'Not Found')
if not gradebook.is_administrator(self.person):
return self.error_response(401, 'Not Authorized')
try:
return self.file_response(read_file(gradebook.export_path),
gradebook.filename,
content_type='application/vnd.ms-excel')
except ObjectDoesNotExist:
return self.error_response(404, 'Not Available')
| 5,045 | 1,432 |
'''Constants for MachineThematicAnalysis Toolkit'''
import sys
import os
import shutil
import platform
import wx
#import wx.lib.agw.flatnotebook as FNB
import External.wxPython.flatnotebook_fix as FNB
CUR_VER = '0.8.11'
#Variables to configure GUI
FNB_STYLE = FNB.FNB_DEFAULT_STYLE|FNB.FNB_HIDE_ON_SINGLE_TAB|FNB.FNB_NO_X_BUTTON|FNB.FNB_FF2
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DATE_FORMAT = '%Y-%m-%d'
if getattr(sys, 'frozen', False):
# this is a Pyinstaller bundle
ROOT_PATH = sys._MEIPASS
else:
# normal python process
ROOT_PATH = os.getcwd()
FONTS_PATH = os.path.join(ROOT_PATH, 'Fonts')
IMAGES_PATH = os.path.join(ROOT_PATH, 'Images')
XSD_PATH = os.path.join(ROOT_PATH, 'External/XSD')
SAVE_DATA_PATH = os.path.realpath(os.path.expanduser('~/Documents/ComputationalThematicAnalysisToolkit.nosync'))
old_SAVE_DATA_PATH = os.path.realpath(os.path.expanduser('~/Documents/ComputationalThematicAnalysisToolkit'))
if not os.path.exists(SAVE_DATA_PATH):
if os.path.exists(old_SAVE_DATA_PATH):
os.rename(old_SAVE_DATA_PATH, SAVE_DATA_PATH)
else:
os.makedirs(SAVE_DATA_PATH)
if platform.system() == 'Windows':
APP_DATA_PATH = os.path.realpath(os.path.expanduser('~/AppData/Local/ComputationalThematicAnalysisToolkit'))
else:
APP_DATA_PATH = os.path.realpath(os.path.expanduser('~/Library/ComputationalThematicAnalysisToolkit'))
if not os.path.exists(APP_DATA_PATH):
os.makedirs(APP_DATA_PATH)
SAVED_WORKSPACES_PATH = os.path.realpath(os.path.join(SAVE_DATA_PATH, 'Saved_Workspaces'))
if not os.path.exists(SAVED_WORKSPACES_PATH):
os.makedirs(SAVED_WORKSPACES_PATH)
DATA_PATH = os.path.realpath(os.path.join(SAVE_DATA_PATH, 'Data'))
if not os.path.exists(DATA_PATH):
old_DATA = os.path.realpath(os.path.join(APP_DATA_PATH, 'Data'))
if os.path.exists(old_DATA):
shutil.move(old_DATA, SAVE_DATA_PATH)
else:
os.makedirs(DATA_PATH)
CURRENT_WORKSPACE_PATH = os.path.realpath(os.path.join(APP_DATA_PATH, 'Current_Workspace'))
old_CURRENT_WORKSPACE = os.path.realpath(os.path.join(SAVE_DATA_PATH, 'Current_Workspace'))
if not os.path.exists(CURRENT_WORKSPACE_PATH):
if os.path.exists(old_CURRENT_WORKSPACE):
shutil.move(old_CURRENT_WORKSPACE, APP_DATA_PATH)
else:
os.makedirs(CURRENT_WORKSPACE_PATH)
AUTOSAVE_PATH = os.path.realpath(os.path.join(CURRENT_WORKSPACE_PATH, 'AutoSave'))
LOG_PATH = os.path.realpath(os.path.join(APP_DATA_PATH, 'Logs'))
old_LOG = os.path.realpath(os.path.join(SAVE_DATA_PATH, 'Logs'))
if not os.path.exists(LOG_PATH):
if os.path.exists(old_LOG):
shutil.move(old_LOG, APP_DATA_PATH)
else:
os.makedirs(LOG_PATH)
#Menu Options
# removed to use built in id generator wx.ID_ANY
#Module Specific Variables
##Filtering
TOKEN_TEXT_IDX = 0
TOKEN_STEM_IDX = 1
TOKEN_LEMMA_IDX = 2
TOKEN_POS_IDX = 3
TOKEN_SPACY_STOPWORD_IDX = 4
TOKEN_TEXT_TFIDF_IDX = 5
TOKEN_STEM_TFIDF_IDX = 6
TOKEN_LEMMA_TFIDF_IDX = 7
TOKEN_ENTRIES = 'entries'
TOKEN_WORDS = 'words'
TOKEN_POS = 'pos'
TOKEN_NUM_WORDS = 'num_of_words'
TOKEN_PER_WORDS = 'per_of_words'
TOKEN_NUM_DOCS = 'num_of_docs'
TOKEN_PER_DOCS = 'per_of_docs'
TOKEN_SPACY_STOPWORD = 'spacy_stopword'
TOKEN_REMOVE_FLG = 'removed_flg'
TOKEN_TFIDF = 'tfidf_range'
FILTER_RULE_ANY = '<ANY>'
FILTER_RULE_REMOVE = 'remove'
FILTER_RULE_INCLUDE = 'include'
FILTER_RULE_REMOVE_SPACY_AUTO_STOPWORDS = 'remove spacy auto stopwords'
FILTER_RULE_INCLUDE_SPACY_AUTO_STOPWORDS = 'include spacy auto stopwords'
FILTER_TFIDF_REMOVE = 'remove tokens where their tfidf is '
FILTER_TFIDF_INCLUDE = 'include tokens where their tfidf is '
FILTER_TFIDF_LOWER = ' in the lower '
FILTER_TFIDF_UPPER = ' in the upper '
###Token Filters
AVAILABLE_DATASET_LANGUAGES1 = ['eng-sm', 'fre-sm'] #removed eng-trf and fre-trf due to difficulties with preparing installations -- Sept 21, 2021
AVAILABLE_DATASET_LANGUAGES2 = ['English', 'French']
###Usefulness
NOT_SURE = "Not Sure"
USEFUL = "Useful"
NOT_USEFUL = "Not Useful"
# dialogs
TWITTER_DIALOG_SIZE = wx.Size(350, -1)
OPTIONS_DIALOG_SIZE = wx.Size(350, -1)
#definition of fields available for use from the retrievers
available_fields = {
('Reddit', 'submission',): {
'id': {
'desc': "the unique Reddit Submission id (may not be unique across other sources/types",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'url': {
'desc': "a url link to the original source of the data",
'type': 'url',
'computation_fields_default': False,
'label_fields_default': True,
},
'created_utc': {
'desc': "The UTC time stamp of when the submission was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': True,
},
'title': {
'desc': "the raw title of the submission.",
'type': 'string',
'computation_fields_default': True,
'label_fields_default': True,
},
'selftext': {
'desc': "the raw text of the submission.",
'type': 'string',
'computation_fields_default': True,
'label_fields_default': False,
},
'author': {
'desc': "the account name of the poster",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'author_flair_css_class': {
'desc': "the CSS class f the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'author_flair_text': {
'desc': "the text of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'num_comments': {
'desc': "the number of comments made under this submission (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'num_crossposts': {
'desc': "the number of crossposts of this submission (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'score': {
'desc': "the submission's score (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'subreddit': {
'desc': "the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'subreddit_id': {
'desc': "The unique id of the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
},
('Reddit', 'comment',): {
'id': {
'desc': 'unique Reddit Comment id (may not be unique across other sources/types)',
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'url': {
'desc': "a url link to the original source of the data",
'type': 'url',
'computation_fields_default': False,
'label_fields_default': True,
},
'created_utc': {
'desc': "The UTC time stamp of when the comment was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': True,
},
'body': {
'desc': "the raw text of the comment.",
'type': 'string',
'computation_fields_default': True,
'label_fields_default': True,
},
'author': {
'desc': "the account name of the poster",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'author_flair_css_class': {
'desc': "the CSS class of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'author_flair_text': {
'desc': "the text of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'link_id': {
'desc': "A reference id that can link a comment to it's associated submission's id.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'parent_id': {
'desc': "A reference id for the item (a comment or submission) that this comment is a reply to",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'score': {
'desc': "the submission's score (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission_id':{
'desc': 'the id of the submission that comment is a response to',
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'subreddit': {
'desc': "the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'subreddit_id': {
'desc': "The unique id of the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
},
('Reddit', 'discussion',): {
'id': {
'desc': 'unique Reddit Comment id (may not be unique across other sources/types)',
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'url': {
'desc': "a url link to the original source of the data",
'type': 'url',
'computation_fields_default': False,
'label_fields_default': True,
},
'created_utc': {
'desc': "The UTC time stamp of when the comment was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': True,
},
'title': {
'desc': "the raw title of the discussion.",
'type': 'string',
'computation_fields_default': True,
'label_fields_default': True,
},
'text': {
'desc': "the raw text of the discussion.",
'type': 'string',
'computation_fields_default': True,
'label_fields_default': False,
},
'submission.author': {
'desc': "the account name of the poster",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.author_flair_css_class': {
'desc': "the CSS class f the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.author_flair_text': {
'desc': "the text of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.created_utc': {
'desc': "The UTC time stamp of when the submission was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.id': {
'desc': "the unique Reddit Submission id (may not be unique across other sources/types",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.num_comments': {
'desc': "the number of comments made under this submission (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.num_crossposts': {
'desc': "the number of crossposts of this submission (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.selftext': {
'desc': "the raw text of the submission.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.score': {
'desc': "the submission's score (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.subreddit': {
'desc': "the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.subreddit_id': {
'desc': "The unique id of the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'submission.title': {
'desc': "the raw title of the submission.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.author': {
'desc': "the account name of the poster",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.author_flair_css_class': {
'desc': "the CSS class of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.author_flair_text': {
'desc': "the text of the author's flair. subreddit specific",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.body': {
'desc': "the raw text of the comment.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.created_utc': {
'desc': "The UTC time stamp of when the comment was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.id': {
'desc': 'unique Reddit Comment id (may not be unique across other sources/types)',
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.link_id': {
'desc': "A reference id that can link a comment to it's associated submission's id.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.parent_id': {
'desc': "A reference id for the item (a comment or submission) that this comment is a reply to",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.score': {
'desc': "the submission's score (may be out of date unless updated from Reddit API)",
'type': 'integer',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.subreddit': {
'desc': "the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
'comment.subreddit_id': {
'desc': "The unique id of the subreddit the comment is from.",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': False,
},
},
('Twitter', 'tweet',): {
'created_utc': { # not a field in tweet object; created using 'created_at'
'desc': "The UTC time stamp of when the tweet was posted.",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': True,
},
'url': { # not a field in tweet object; created using tweet 'id'
'desc': "a url link to the original tweet",
'type': 'url',
'computation_fields_default': False,
'label_fields_default': True,
},
'full_text': {
'desc': "The full text of this tweet.",
'type': "string",
'computation_fields_default': True,
'label_fields_default': True,
},
'text': {
'desc': "The text in the tweet, truncated to 140 characters.",
'type': "string",
'computation_fields_default': False,
'label_fields_default': False,
},
},
('CSV', 'documents',): {
'id': {
'desc': "unique id of the row's data",
'type': 'string',
'computation_fields_default': False,
'label_fields_default': True,
},
'url': {
'desc': "a url link to the original source of the row's data",
'type': 'url',
'computation_fields_default': False,
'label_fields_default': False,
},
'created_utc': {
'desc': "The UTC time stamp of when the row's data was created",
'type': 'UTC-timestamp',
'computation_fields_default': False,
'label_fields_default': False,
},
}
}
| 19,172 | 5,691 |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
from .stats import Stats
from functools import partial
class Champion:
def __init__(self, name, level=1, ad=0, ap=0,base_aspd=0, as_ratio=0, cs=0, csd=175, ar=0, mr=0, hp=0,
ad_growth=0, ap_growth=0, aspd_growth=0, ar_growth=0, mr_growth=0, hp_growth=0) -> None:
self.base_stats = Stats(ad=ad, ap=ap,cs=cs, csd=csd,
ar=ar, mr=mr, hp=hp)
self.ad_growth = ad_growth
self.ap_growth = ap_growth
self.aspd_growth = aspd_growth
self.ar_growth = ar_growth
self.mr_growth = mr_growth
self.hp_growth = hp_growth
self.base_aspd = base_aspd
self.as_ratio = base_aspd if as_ratio == 0 else as_ratio #if the as_ratio is not set then its base_aspd
self.name = name
self.level = level
self.generate_bonus_stats(level)
def generate_bonus_stats(self, level):
# simplify function call.
f = partial(growth_formula, level)
self.bonus_stats = Stats(ad=f(self.ad_growth), ap=f(self.ap_growth), aspd=f(self.aspd_growth),
ar=f(self.ar_growth), mr=f(self.mr_growth), hp=f(self.hp_growth))
self.current_stats = self.base_stats + self.bonus_stats
def level_up(self):
if self.level < 18:
self.level += 1
self.generate_bonus_stats(self.level)
# forward the attributes from curent_stats so inventory can be used as a stats object
#TODO: this feels hacky
def __getattribute__(self, name: str) -> Any:
try:
return super().__getattribute__(name)
except:
return self.current_stats.__dict__[name]
def growth_formula(level, growth):
return growth * (level-1) * (0.7025 + 0.0175 * (level-1))
from functools import partial
TargetDummy = partial(Champion, name="Target Dummy", hp=1000)
# create a partial function from the champion
Aatrox = partial(Champion,name="Aatrox", ad=60, ad_growth=5, hp=580, hp_growth=90, ar=38,
ar_growth=3.25, mr=32, mr_growth=1.25, base_aspd=0.651, aspd_growth=2.5)
Caitlynn = partial(Champion, name="Caitlynn", ad=62, ad_growth=3.8, hp=510, hp_growth=93, ar=28,
ar_growth=3.5, mr=30, mr_growth=0.5, base_aspd=0.681, aspd_growth=4, as_ratio=0.568) | 2,421 | 992 |
#!/usr/bin/python
# Writer (c) 2012, MrStealth
# Rev. 1.1.1
# License: Attribution-NonCommercial-ShareAlike 3.0 Unported (CC BY-NC-SA 3.0)
# -*- coding: utf-8 -*-
import os
import sqlite3 as sqlite
import xbmcaddon
__addon__ = xbmcaddon.Addon(id='plugin.video.unified.search')
addon_path = __addon__.getAddonInfo('path')
class SearchDB:
def __init__(self):
self.filename = os.path.join(addon_path, 'resources/databases', 'searches.db')
self.connect()
def connect(self):
# Create directory if not exist
basedir = os.path.dirname(self.filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
# Create DB file if not exist
if not os.path.isfile(self.filename):
print "Create new sqlite file %s" % self.filename
open(self.filename, 'w').close()
# Try to avoid OperationalError: database is locked
self.db = sqlite.connect(self.filename, timeout=1000, check_same_thread = False)
self.db.text_factory = str
self.cursor = self.db.cursor()
self.execute = self.cursor.execute
self.commit = self.db.commit()
self.create_if_not_exists()
def create_if_not_exists(self):
try:
self.execute("CREATE TABLE IF NOT EXISTS searches (id INT, keyword TEXT, counter INT default 0)")
self.db.commit()
except sqlite.OperationalError:
print "Database '%s' is locked" % self.filename
pass
def new(self, keyword):
search_id = self.search_id()
self.execute('INSERT INTO searches(id, keyword) VALUES(?,?)', (search_id, keyword))
self.db.commit()
return search_id
def search_id(self):
self.execute("SELECT MAX(id) FROM searches")
return self.increase_counter(self.cursor.fetchone()[0])
def increase_counter(self, counter):
counter = counter + 1 if counter or counter == 0 else 1
return counter
def get_latest_search_id(self):
self.execute("SELECT MAX(id) FROM searches")
return self.cursor.fetchone()[0]
def update_counter(self, search_id):
self.execute("UPDATE searches SET counter=counter+1 WHERE id=%d" % (search_id))
self.execute("SELECT MAX(counter) FROM searches WHERE id=%d" % search_id)
self.db.commit()
return self.cursor.fetchone()[0]
def all(self):
self.execute("SELECT * FROM searches ORDER BY id DESC")
return [{'id': x[0], 'keyword': x[1], 'counter': x[2]} for x in self.cursor.fetchall()]
def drop(self):
if os.path.isfile(self.filename):
self.connect()
self.execute('DELETE FROM searches')
self.db.commit()
def close(self):
self.cursor.close()
self.db.close()
| 2,816 | 880 |
'''
Author: Eunice Jun (@emjun)
Date created: November, 4, 2019
Purpose: Transform a wide format dataset into long format
Use: python3 longify.py <data_in_wide_format.csv>
'''
import sys
import csv
import pandas as pd
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Misusing script. Must include EXACTLY ONE parameter: python3 longify.py <data_in_wide_format.csv>")
elif not sys.argv[1].endswith('.csv'):
print("Data file must be a CSV file!")
else:
wide_csv = sys.argv[1]
wide_df = pd.read_csv(wide_csv)
# long_df = pd.wide_to_long(wide_df, stubnames='Score', i=None, j='ID')
cols_to_collapse = ['AR', 'TV']
result_col = 'Score'
import pdb; pdb.set_trace()
long_df.to_csv()
| 784 | 283 |
import autokeras as ak
from tensorflow.python.util import nest
from tf2cv.models.resnet import ResNet
LAYER_OPTIONS = [[1, 1, 1, 1], [2, 1, 1, 1], [2, 2, 1, 1], [2, 2, 2, 1], [2, 2, 2, 2], [3, 3, 3, 3],
[3, 4, 6, 3]]
class CustomResnetBlock(ak.Block):
def __init__(self, in_size=(224, 224), in_channels=3, layer_options=LAYER_OPTIONS, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.in_size = in_size
self.layers_options = layer_options
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# Get HP Params for network
bottleneck = hp.Boolean('hp_bottleneck', default=False)
layers_option_idx = list(range(len(self.layers_options)))
layers_sel = hp.Choice('idx_layers', values=layers_option_idx)
layers = self.layers_options[layers_sel]
if self.in_size[0] < 100:
init_block_channels = 16
channels_per_layers = [16, 32, 64]
layers = layers[:3]
else:
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
width_scale = hp.Float('width_scale', min_value=0.5, max_value=1.5, step=0.1)
if width_scale != 1.0:
# it should not change the last block of last layer
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
# Create layers
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=True,
in_channels=self.in_channels,
in_size=self.in_size,
use_with_ak_classification=True).features
output_node = net(input_node)
return output_node
| 2,241 | 794 |
import os
import cv2
import numpy as np
directory = "/home/rider/DataSets/Images/Development/humanoid_soccer_dataset/ScreenshotMasks"
for filename in os.listdir(directory):
if filename.endswith(".txt"):
blank_image = np.zeros((480,640), np.uint8)
with open(os.path.join(directory, filename)) as f:
lines = f.readlines()
for i in range(len(lines)):
splitted_list = lines[i].split(' ')
for j in range(len(splitted_list)-1):
blank_image[i][j] = (splitted_list[j])
cv2.imwrite(os.path.join(directory, filename.replace(".txt",".png")),blank_image)
cv2.waitKey(0)
# print(os.path.join(directory, filename))
continue
else:
continue
| 766 | 246 |
import sys, os
from serif.theory.serif_theory import SerifTheory
from serif.theory.enumerated_type import MentionType
from serif.util.serifxml_utils import CountryIdentifier
class SerifEntityTheory(SerifTheory):
def num_mentions(self):
"""Returns the number or mentions in this Entity"""
return len(self.mentions)
def representative_mention(self):
"""Finds the mentions that best represents the Entity. Algorithm
ported from Java's DefaultRepresentativeMentionFinder."""
# Look for country name first but calculate longest name as well
longest_name_mention = None
longest_length = None
for mention in self.mentions:
if mention.mention_type != MentionType.name:
continue
name = mention.atomic_head.text.lower()
if longest_name_mention is None or len(name) > longest_length:
longest_name_mention = mention
longest_length = len(name)
if CountryIdentifier.is_country_string(name):
return mention
# Longest name
if longest_name_mention:
return longest_name_mention
# Earliest desc (or longest if tie)
earliest_desc_mention = None
earliest_char_offset = None
earliest_desc_mention_length = None
for mention in self.mentions:
if mention.mention_type != MentionType.desc:
continue
if (earliest_desc_mention is None or
mention.start_char < earliest_char_offset or
(mention.start_char == earliest_char_offset and
len(mention.text) > earliest_desc_mention_length)):
earliest_desc_mention = mention
earliest_char_offset = mention.start_char
earliest_desc_mention_length = len(mention.text)
if earliest_desc_mention:
return earliest_desc_mention
# Default, could happen with first person pronouns?
if len(self.mentions) > 0:
return self.mentions[0]
return None
def representative_name(self):
"""Finds the most 'representative name' from the list of Mentions.
If there is no name Mention in the Entity, this will return None.
Algorithm is ported from Java."""
rm = self.representative_mention()
if rm is not None and rm.mention_type == MentionType.name:
return rm
return None
def contains_mention(self, mention):
"""Returns true if given Mention is part of the Entity"""
for m in self.mentions:
if m == mention:
return True
return False
def has_name_mention(self):
"""Returns true if there is a name Mention in the Entity"""
for m in self.mentions:
if m.mention_type == MentionType.name:
return True
return False
def has_desc_mention(self):
"""Returns true if there is a desc Mention in the Entity"""
for m in self.mentions:
if m.mention_type == MentionType.desc:
return True
return False
def has_name_or_desc_mention(self):
"""Returns true if there is a name or desc Mention in the Entity"""
for m in self.mentions:
if (m.mention_type == MentionType.desc or
m.mention_type == MentionType.name):
return True
return False
| 3,524 | 931 |
# URI Online Judge
CPF = input().split('.')
XXX, YYY = CPF[0], CPF[1]
ZZZ, DD = CPF[2].split('-')[0], CPF[2].split('-')[1]
print(XXX)
print(YYY)
print(ZZZ)
print(DD)
| 169 | 87 |
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import os
import base64
keyword = input('What do you want? ')
save_floder = input('Where do you want to save images?(Default as the current directory) ')
if save_floder == '': save_floder = os.getcwd()
if not os.path.exists(save_floder): os.mkdir(save_floder)
url = 'https://cn.bing.com/images/search?q=%s&form=BESBTB&first=1&scenario=ImageBasicHover&ensearch=1' % keyword
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36'
}
print('Starting fetching image urls...')
r = requests.get(url, headers=headers)
html = r.text
soup = BeautifulSoup(html, 'lxml')
img_elements = soup.select('.mimg')
img_urls = []
for img_element in img_elements:
if 'src' in img_element.attrs:
img_urls.append(img_element['src'])
if 'data-src' in img_element.attrs:
img_urls.append(img_element['data-src'])
print('Starting downloading images...')
for i in range(len(img_urls)):
if 'data:image/' in img_urls[i]:
print('Warning: Not support base64')
continue
# img_urls[i] += (4 - len(img_urls[i]) % 4) * '='
# img_bytes = base64.b64decode(img_urls[i].split(',')[1])
# file_name = save_floder + '/' + str(i) + '.' + img_urls[i].split(';')[0].split('/')[1]
else:
r = requests.get(img_urls[i])
img_bytes = r.content
file_name = save_floder + '/' + str(i) + '.' + r.headers['Content-Type'].split('/')[1]
with open(file_name, 'wb') as f:
f.write(img_bytes)
print('Downloaded %s' % file_name)
| 1,655 | 623 |
from flask import Flask, request
import requests
import json
import configparser
from api_interaction import *
# read variables from config
credential = configparser.ConfigParser()
credential.read('cred.prod')
# Import credential
bearer_bot = credential['Webex']['WEBEX_TEAMS_TOKEN']
botEmail = credential['Webex']['WEBEX_BOT_EMAIL']
# WebhookUrl
webhookUrl = credential['Webex']['WEBEX_WEBHOOK_URL']
Meraki_API_KEY = credential['Webex']['Meraki_API_KEY']
headers_bot = {
"Accept": "application/json",
"Content-Type": "application/json; charset=utf-8",
"Authorization": "Bearer " + bearer_bot
}
app = Flask(__name__)
#### Functions
def createWebhook(bearer, webhookUrl):
hook = True
botWebhooks = send_webex_get("https://webexapis.com/v1/webhooks")["items"]
for webhook in botWebhooks:
if webhook["targetUrl"] == webhookUrl:
hook = False
if hook:
dataWebhook = {
"name": "Messages collab bot Webhook",
"resource": "messages",
"event": "created",
"targetUrl": webhookUrl
}
dataWebhookCard = {
"name": "Card Report collab bot Webhook",
"targetUrl": webhookUrl,
"resource": "attachmentActions",
"event": "created"
}
send_webex_post("https://webexapis.com/v1/webhooks/", dataWebhook)
send_webex_post("https://webexapis.com/v1/webhooks/", dataWebhookCard)
print("Webhook status: done")
def deleteWebHooks(bearer, webhookUrl):
webhookURL = "https://webexapis.com/v1/webhooks/"
botWebhooks = send_webex_get(webhookURL)["items"]
for webhook in botWebhooks:
send_webex_delete(webhookURL + webhook["id"])
def send_webex_get(url, payload=None,js=True):
if payload == None:
request = requests.get(url, headers=headers_bot)
else:
request = requests.get(url, headers=headers_bot, params=payload)
if js == True:
if request.status_code == 200:
try:
r = request.json()
except json.decoder.JSONDecodeError:
print("Error JSONDecodeError")
return("Error JSONDecodeError")
return r
else:
print (request)
return ("Error " + str(request.status_code))
return request
def send_webex_delete(url, payload=None):
if payload == None:
request = requests.delete(url, headers=headers_bot)
else:
request = requests.delete(url, headers=headers_bot, params=payload)
def send_webex_post(url, data):
request = requests.post(url, json.dumps(data), headers=headers_bot).json()
return request
def postNotificationToPerson(reportText, personEmail):
body = {
"toPersonEmail": personEmail,
"markdown": reportText,
"text": "This text would be displayed by Webex Teams clients that do not support markdown."
}
send_webex_post('https://webexapis.com/v1/messages', body)
def postCard(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCard.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
def postCardDNAC(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCardDNAC.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
def postCardMeraki(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCardMeraki.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
@app.route('/', methods=['GET', 'POST'])
def webex_webhook():
if request.method == 'POST':
webhook = request.get_json(silent=True)
print("Webhook:")
print(webhook)
if webhook['resource'] == 'messages' and webhook['data']['personEmail'] != botEmail:
result = send_webex_get('https://webexapis.com/v1/messages/{0}'.format(webhook['data']['id']))
print("result messages", result)
in_message = result.get('text', '').lower()
print("in_message", in_message)
if in_message.startswith('/hi'):
personEmail = webhook['data']['personEmail']
postNotificationToPerson('Hi', personEmail)
elif in_message.startswith('/dnac'):
postCardDNAC(webhook['data']['personEmail'])
elif in_message.startswith('/post'):
postCardMeraki(webhook['data']['personEmail'])
else:
postCard(webhook['data']['personEmail'])
elif webhook['resource'] == 'attachmentActions':
result = send_webex_get('https://webexapis.com/v1/attachment/actions/{}'.format(webhook['data']['id']))
print("\n\n Result ", result)
person = send_webex_get('https://webexapis.com/v1/people/{}'.format(result['personId']))
personEmail = person["emails"][0]
postNotificationToPerson("Bot received your answer", personEmail)
if (result['inputs']['type'] == 'event_card'):
responseText = "Your Email " + personEmail + "\n" + "Date in Adaptive Card: " + result['inputs']['date'] + "\n" + "Text in Adaptive Card: " + result['inputs']['input_text']
postNotificationToPerson(responseText, personEmail)
elif (result['inputs']['type'] == 'api_operation_card'):
reportText = SimpleAPIoperation(dnac_url)
postNotificationToPerson(reportText[1], personEmail)
postNotificationToPerson(reportText[0], personEmail)
elif (result['inputs']['type'] == 'api_operation_card_post'):
reportText = merakiPostOperation(result['inputs']['admin_email'])
postNotificationToPerson(reportText, personEmail)
elif (result['inputs']['type'] == '3rd_party'):
pass
return "true"
elif request.method == 'GET':
message = "<center><img src=\"http://bit.ly/SparkBot-512x512\" alt=\"Webex Bot\" style=\"width:256; height:256;\"</center>" \
"<center><h2><b>Congratulations! Your <i style=\"color:#ff8000;\"></i> bot is up and running.</b></h2></center>" \
"<center><b><i>Please don't forget to create Webhooks to start receiving events from Webex Teams!</i></b></center>" \
"<center><b>Generate meeting token <a href='/token'>/token</a></b></center>"
return message
print("Start Bot")
deleteWebHooks(bearer_bot, webhookUrl)
createWebhook(bearer_bot, webhookUrl) | 7,313 | 2,197 |
from aiohttp import web, WSMsgType
import asyncio
from handlers.handler import Handler
class FileHandler(Handler):
def __init__(self, file):
self.file = file
@asyncio.coroutine
def handle(self, request):
with open(self.file, "rt") as file:
return web.Response(text=file.read(), content_type="text/html")
def __enter__(self):
pass
def __exit__(self, exit_type, value, traceback):
pass
| 453 | 141 |
from library.ftx.base import AsyncBaseApiClass
class Account(AsyncBaseApiClass):
"""https://docs.ftx.com/#account"""
def __init__(self, api_key: str, secret_key: str, subaccount_name: str = None):
super().__init__(api_key, secret_key, subaccount_name)
async def get_account_information(self):
""" https://docs.ftx.com/#get-account-information """
return await self.get('/api/account')
async def get_positions(self):
""" https://docs.ftx.com/#get-positions """
return await self.get('/api/positions')
async def change_account_leverage(self, leverage: float):
""" https://docs.ftx.com/#change-account-leverage """
assert leverage < 2
return await self.post('/api/account/leverage', data={'leverage': leverage})
| 798 | 244 |
from torchexpo.nlp.sentiment_analysis.electra import (electra_imdb)
from torchexpo.nlp.sentiment_analysis.distilbert import (distilbert_imdb) | 141 | 52 |
import collections
import copy
import tensorflow as tf
class Pipeline(object):
""" A linear sequence of stages that perform operations on an input. """
# Magic number that we use to differentiate pipeline instances. For our
# purposes, if two references point to the same underlying object, they are
# the same.
_instance_number = 0
def __init__(self):
# This keeps track of the pipeline output.
self.__output = None
# Keeps track of the stages in this pipeline.
self.__stages = []
# Keeps track of any pipelines that this one feeds into.
self.__sub_pipelines = []
# Local instance number copy.
self.__instance_number = Pipeline._instance_number
Pipeline._instance_number += 1
def __copy__(self):
# We choose not to allow copying pipeline, because invariably this isn't
# going to work the way you want it to.
raise NotImplementedError("Copying pipelines is not supported.")
def __deepcopy__(self, memodict={}):
return self.__copy__()
def __eq__(self, other):
return self.__instance_number == other.__instance_number
def __hash__(self):
# Get the hash value from the underlying object, instead of just the
# reference.
return hash(self.__instance_number)
def __build_stage(self, stage):
""" Builds a single stage of the pipeline.
Args:
stage: The stage to build. """
# For everything but the last stage, we should have only one output.
assert len(self.__output) == 1
# Run the stage on our current output.
outputs = stage.build(self.__output[0])
if type(outputs) == tf.Tensor:
# It might have returned a singleton, which we convert to a list.
outputs = [outputs]
# Convert output images to datapoints.
data_points = []
for output in outputs:
data_point = copy.copy(self.__output[0])
data_point.image = output
data_points.append(data_point)
self.__output = data_points
def __is_leaf(self):
""" Returns: True if this pipeline has no descendents. """
return len(self.__sub_pipelines) == 0
def __get_outputs_and_leaves(self):
"""
Returns:
A tuple, the first element of which is a list of outputs, and the second
of which is a list of leaf pipelines. """
if self.__is_leaf():
# This is the easy case. We just have ourselves to worry about.
return (self.__output, [self])
# In this case, we have to collect the output and leaves from every
# sub-pipeline.
outputs = []
leaves = []
for pipeline in self.__sub_pipelines:
pipe_outputs, pipe_leaves = pipeline.__get_outputs_and_leaves()
outputs.extend(pipe_outputs)
leaves.extend(pipe_leaves)
return (outputs, leaves)
def add(self, stage):
""" Adds a new stage to the pipeline.
Args:
stage: The stage to add.
Returns:
If the stage has a single output, the current pipeline is returned.
Otherwise, the pipeline splits, and multiple new pipelines are
automatically created and returned. The exact behavior should be specified
by the pipeline stage. """
# Add the stage.
self.__stages.append(stage)
# Figure out how many outputs we have from this stage.
num_outputs = stage.get_num_outputs()
if num_outputs == 1:
# We can keep using the same pipeline.
return self
else:
# The pipeline forks.
pipelines = []
for _ in range(0, num_outputs):
# Create a new pipeline originating at each output.
pipeline = Pipeline()
pipelines.append(pipeline)
self.__sub_pipelines.append(pipeline)
return pipelines
def build(self, data):
""" Builds the pipeline on a set of input data.
Args:
data: The data point to serve as input for the pipeline. """
# Initially, the output equals the input, in case we have no data.
self.__output = [data]
# Build every stage.
for stage in self.__stages:
self.__build_stage(stage)
# Build the sub-pipelines.
if not self.__is_leaf():
for pipeline, output in zip(self.__sub_pipelines, self.__output):
pipeline.build(output)
def get_outputs(self):
""" Gets the ultimate output for this pipeline and any ones downstream. This
should only be called after build().
Returns:
A list of data_points corresponding to the "leaf" outputs from left to
right. """
outputs, _ = self.__get_outputs_and_leaves()
return outputs
def get_num_outputs(self):
""" Gets the total number of outputs from this pipeline and any
sub-pipelines. This is safe to call at any time.
Returns:
The total number of outputs. """
if self.__is_leaf():
# No sub-pipelines, so we just have our own output.
return 1
# Add up the number of outputs from each sub-pipeline.
num_outputs = 0
for pipeline in self.__sub_pipelines:
num_outputs += pipeline.get_num_outputs()
return num_outputs
def get_leaf_pipelines(self):
"""
Returns:
List of all pipelines that are descendents of this pipeline, but which have
no decendents of their own. This list can include the pipeline that this
method was called on. Elements in this list correspond to the elements
in the list returned by get_outputs(). """
_, leaves = self.__get_outputs_and_leaves()
return leaves
class PipelineStage(object):
""" Defines a stage in the preprocessing pipeline. These can be added
arbitrarily to data loaders in order to perform preprocessing. """
def build(self, data_point):
""" Builds the pipeline stage on a DataPoint object.
Args:
data_point: The data_point object to run the stage on.
Returns:
The result of the pipeline stage. """
raise NotImplementedError("build() must be implemented by subclass.")
def get_num_outputs(self):
"""
Returns:
The number of outputs from this pipeline stage. """
raise NotImplementedError( \
"get_num_outputs() must be implemented by subclass.")
class RandomCropStage(PipelineStage):
""" A pipeline stage that extracts a random crop of the image. It has a single
image output. """
def __init__(self, crop_size):
"""
Args:
crop_size: The size to crop the image at, as (h, w). """
self.__crop_h, self.__crop_w = crop_size
def build(self, data_point):
image = data_point.image
# Extract the crop.
num_channels = image.get_shape()[2]
crop_size = [self.__crop_h, self.__crop_w, num_channels]
crop = tf.random_crop(image, crop_size)
return crop
def get_num_outputs(self):
return 1
class CenterCropStage(PipelineStage):
""" A pipeline stage that extracts the central crop ofthe image. It has a
single image output. """
def __init__(self, crop_fraction):
"""
Args:
crop_fraction: The fraction of the image to retain, in the range from 0.0
to 1.0. """
self.__crop_fraction = crop_fraction
def build(self, data_point):
image = data_point.image
# Extract the crop.
return tf.image.central_crop(image, self.__crop_fraction)
def get_num_outputs(self):
return 1
class RandomBrightnessStage(PipelineStage):
""" A pipeline stage that randomly changes the brightness of the image. It has
a single image output. """
def __init__(self, max_delta):
"""
Args:
max_delta: The maximum amount to add to or remove from pixel values. """
self.__max_delta = max_delta
def build(self, data_point):
image = data_point.image
return tf.image.random_brightness(image, self.__max_delta)
def get_num_outputs(self):
return 1
class RandomContrastStage(PipelineStage):
""" A pipeline stage that randomly changes the contrast of the image. It has
a single image output. """
def __init__(self, min_factor, max_factor):
"""
Args:
min_factor: Minimum value of the contrast factor.
max_factor: Maximum value of the contrast factor. """
self.__min_factor = min_factor
self.__max_factor = max_factor
def build(self, data_point):
image = data_point.image
return tf.image.random_contrast(image, self.__min_factor, self.__max_factor)
def get_num_outputs(self):
return 1
class RandomHueStage(PipelineStage):
""" A pipeline stage that randomly changes the hue of the image. It has
a single image output. """
def __init__(self, max_delta):
"""
Args:
max_delta: The maximum amount to change the hue channel by. """
self.__max_delta = max_delta
def build(self, data_point):
image = data_point.image
return tf.image.random_hue(image, self.__max_delta)
def get_num_outputs(self):
return 1
class RandomSaturationStage(PipelineStage):
""" A pipeline stage that randomly changes the saturation of the image. It has
a single image output. """
def __init__(self, min_factor, max_factor):
"""
Args:
min_factor: Minimum value of the saturation factor.
max_factor: Maximum value of the saturation factor. """
self.__min_factor = min_factor
self.__max_factor = max_factor
def build(self, data_point):
image = data_point.image
return tf.image.random_saturation(image, self.__min_factor,
self.__max_factor)
def get_num_outputs(self):
return 1
class GrayscaleStage(PipelineStage):
""" A pipeline stage that converts input images to grayscale. It has a single
image output. """
def build(self, data_point):
image = data_point.image
return tf.image.rgb_to_grayscale(image)
def get_num_outputs(self):
return 1
class ResizeStage(PipelineStage):
""" A pipeline stage that resizees input images. It has a single image output.
"""
def __init__(self, size):
"""
Args:
size: The size of the final image, as a tuple of (h, w). """
self.__size = size
def build(self, data_point):
image = data_point.image
return tf.image.resize_images(image, self.__size, align_corners=True)
def get_num_outputs(self):
return 1
class NormalizationStage(PipelineStage):
""" Performs per-image normalization, linearly scaling it to have a zero mean
and unit norm. Has a single image output. """
def build(self, data_point):
image = data_point.image
return tf.image.per_image_standardization(image)
def get_num_outputs(self):
return 1
class EyeExtractionStage(PipelineStage):
""" Extracts eye images from the face crop of the image. It outputs three
images, in order: The left eye crop, the right eye crop, and the face crop.
"""
def __convert_box(self, box):
""" Converts a bounding box from the x, y, w, h format to the y1, x1, y2, x2
format.
Args:
box: The bounding box to convert.
Returns:
The converted box. """
x = box[0]
y = box[1]
w = box[2]
h = box[3]
# Compute the other corners.
y2 = y + h
x2 = x + w
# Create the new tensor.
return tf.stack([y, x, y2, x2], axis=0)
def build(self, data_point):
image = data_point.image
leye_box = data_point.leye_box
reye_box = data_point.reye_box
# Convert the bounding boxes to a form that TensorFlow understands.
leye_box = self.__convert_box(leye_box)
reye_box = self.__convert_box(reye_box)
boxes = tf.stack([leye_box, reye_box], axis=0)
# Duplicate the input image so that we can crop it twice.
image_dup = tf.stack([image] * 2, axis=0)
# Extract the crops using the bounding boxes.
indices = tf.constant([0, 1])
# The crops should be resized to the same size as the image.
crop_size = image.shape[0:2]
crops = tf.image.crop_and_resize(image_dup, boxes, indices, crop_size)
leye_crop = crops[0]
reye_crop = crops[1]
return (leye_crop, reye_crop, image)
def get_num_outputs(self):
return 3
class FaceMaskStage(PipelineStage):
""" Creates face mask images. It outputs 2 images, in order: The face mask
image, and the original face crop. """
def build(self, data_point):
image = data_point.image
grid_box = data_point.grid_box
# The box is in frame fractions initially, so we have to convert it.
box_sq = grid_box * 25
box_sq = tf.cast(box_sq, tf.int32)
# The GazeCapture data is one-indexed. Convert to zero-indexed.
box_sq -= tf.constant([1, 1, 0, 0])
# Create the inner section.
mask_x = box_sq[0]
mask_y = box_sq[1]
mask_w = box_sq[2]
mask_h = box_sq[3]
# Keep the padding in range.
mask_x = tf.clip_by_value(mask_x, 0, 24)
mask_y = tf.clip_by_value(mask_y, 0, 24)
mask_w = tf.clip_by_value(mask_w, 0, 25 - mask_x)
mask_h = tf.clip_by_value(mask_h, 0, 25 - mask_y)
inner_shape = tf.stack((mask_h, mask_w), axis=0)
inner = tf.ones(inner_shape, dtype=tf.float32)
# Compute how much we have to pad by.
pad_l = mask_x
pad_r = 25 - (pad_l + mask_w)
pad_t = mask_y
pad_b = 25 - (pad_t + mask_h)
# Pad the inner section to create the mask.
pad_x = tf.stack((pad_l, pad_r), axis=0)
pad_y = tf.stack((pad_t, pad_b), axis=0)
paddings = tf.stack((pad_y, pad_x), axis=0)
mask = tf.pad(inner, paddings)
# Explicitly define the shape of the mask.
mask = tf.reshape(mask, (25, 25))
return (mask, image)
def get_num_outputs(self):
return 2
class HeadPoseStage(PipelineStage):
""" Extracts the head pose so that it can be used as an input. It passes
through the image input unchanged, and outputs two tensors, in order: The
pose, and the original face crop. """
def build(self, data_point):
return (data_point.pose, data_point.image)
def get_num_outputs(self):
return 2
class SessionNumStage(PipelineStage):
""" Extracts the session number so that it can be used as an input. It passes
through the image input unchanged, and outputs two tensors, in order: The
session number, and the original face crop. """
def build(self, data_point):
# Cast to float so the pipeline code likes it.
float_session_num = tf.cast(data_point.session_num, tf.float32)
return (float_session_num, data_point.image)
def get_num_outputs(self):
return 2
| 14,185 | 4,537 |
import pprint
def parse_input(data):
result = {}
for el in data.split('>'):
if el =='\n':
continue
result[el[:14]] = el[14:].strip().replace('\n', '')
pprint.pprint(result)
return result
def make_profile(data):
dna_strings = []
for _, v in data.items():
if len(v):
dna_strings.append([ch for ch in v])
dna_len = len(dna_strings[0])
# initialize profile with 0s
profile = {}
profile['A'] = [0 for i in range(dna_len)]
profile['C'] = [0 for i in range(dna_len)]
profile['G'] = [0 for i in range(dna_len)]
profile['T'] = [0 for i in range(dna_len)]
for col in range(dna_len):
for row in range(len(dna_strings)):
profile[dna_strings[row][col]][col] += 1
output(profile, dna_len)
def output(data, dna_len):
output_string = ''
for i in range(dna_len):
max_k = ''
max_v = 0
for k, v in data.items():
if v[i] > max_v:
max_v, max_k = v[i], k
output_string += max_k
print(output_string)
for k, v in data.items():
print(k, end=': ')
for ch in v:
print(ch, end=' ')
print()
def main():
data = """
>Rosalind_7877
TACGATTCGGGTACATTAGTCCGCTTGTGGACTTAGCTTAGATTAGTAAACATTTTTCGA
GGACTGATCGACCTCTCTAGAACTGAATAGCCGGGAACTAGCTTCGCGACAACTTGTACT
GGGGCACCTTATTGACGTTAGGGTACGAACCCTATTACCGGTGTTCACCGATTAGACCGC
CCTAATCGAGCACGAAGCGGCATACGAACTAAAAGAACATTAAAGGATGAAGTTCTGGCA
TTAGATGTGTGTAACGTCTCGGTCGCTCAGTGGGCCAAGTAGGGTCACGGAGAGGCCTCT
TAAGCGACGTTTTATAGCATTTTTGGTCTCCATGAGTACGCGTAACGTATAGCGTCCCAC
TCACAGCCATCGTCACGATTAGCAATTTAACACTCGCTCCATAGGGTCTCGCGTGTCTGA
GCGCTGCGTGTTTCCCCCCTGTTCACTTGAACTAGTAGATCGTGTAGGGGACACTTCTGG
AGAGACTTGATATAGGTCAAAAGGAAAACCTCGTCATGACGGACCAAACCCGGATAACTT
GGACTAGGCCCAACAAATAGGGCTTTACTTAGACCTTAAGAGTATAACGGTATCTACGTC
AATATGTGGACATCTATGCTATAAACGTCTACAAAGGCTCGAAGCGTGGTTTGCCCATTT
CATCCGAGAATCCTCATGTCGGTGTGGCCTAAACTTGCGGTATTGGGAGGGGGCTGATCT
GTCCCAGACGTCCAAAACGATTGTGCAGGTCGCAGGCACGAGGTTAGATTTAACACGCCT
TTCCCCTTCAGCTCTTGCGTGTCATTCGAGTCTAATGCTGATGCGGTAGACGGCCATATA
AGGCGGAACCCGTGACCTTCGAGACAGCCGAGAATCGTTACTAGGACTATCTAAATAACC
AAAACCTGGTGGTCGCCAAACGCATTGCAAACCATACGAGGGTTTATCCA
>Rosalind_9115
CAAAAGCGCTCCAGCTACGCACAGATCGCTTGATACGCACCCCACTGATTAATATTCTCA
GACCGTTACGTTAAACCTTGCAGGGTAAGATTATTCAGCGTAGCACTGCGCCTGGCGCAA
CCCAGGCCAGTTGAAATCTCCTATGTTGTACGCACGCACTCCATGGTAGTCGTCCCTAAG
TATCCACTGGGAAAGGTGCTCTAACCAAGGCCCCAAGGAAGCGGTGCTCGTTGGTAGTAG
TAGAGCGGCGACATTCATCTAGCAGCGTCAAAGATCCTTGTTTAGGACTCTTTGGTCAGG
CATCAACAGCCACCCTGGTGCCTGGCGATAATAAATCGCGGGCCCGCTAAAGTTGTTCAA
GGTTACAATTGCGAGTTCCCAGTGGTATGTCACCTCAAACGCCCACTGTGACGAAAATAG
GCAGGGCGTTTTCGCGATACCCTCTTGCCGTTGTGTGCGAACCATCTCACGTAGACGCGA
CACGGAAATGACACATATTATAACCGTCACTTTCGCGATATTGTAGCAGCCTCTTACGCA
CTTACACGTAATCCATCACCCGTATTGCCTGTCATTACGCTGCGCCATGAGTCACGTAAT
AAACTGGAATCCTTCCCGATGGGATCCGCTCAAGGAACAAAACACCGCTTTACAGTTTTG
GCAAAGCCAGAAACTAGAACAGTCAATACTGCCATTCACGGGGCAAAACGCCGACGACGA
GCATATGCATCTGGACTTAGAATATGGCGACTCCCAATCTCCATCGCGAGCCGAACCTAA
GCAGCGGCGCTTGACCTTCCGAGTCAGGACACAATTGTGGAAAGACATAAGAGGAGGTTC
CTTCCGATGCTATCCCGAGATGGCACACCTCCCAGAATATTTCCTAAACCCCTGACGTTA
GGCGCGGCGTAGGATGGTGAGGTCACCTGCCCATGACCGCATAGATTGCG
>Rosalind_0640
CGAATGATACTCGTACTCTCCAACATTTCATAAGCAAATAGATACTCCCACATTTGCGAA
TTCACGAGTAGCGAGCAGGCCTATAACGCTGCTTGGTTAGTTGCTTCGGTAACGTACCGG
ATCTGGCGTAACTCAATAATTGTGCTACCTATGCTCAAATGCTATTCACAGACTCTCCAT
CACGTCGGGACCCCGAATATGTTTTTATACAGCTAAGTACGCCAGCAAAACGACGTAACG
AGTTTCGGTTATTCAATGAGCAGACCCTGATACGGATCGACTACCGTAACTGTCAACGTG
AGGGTGAAAGAGAAGGTAATTGTCGTATGCTAAGGCGGGTATGCGACGGGGTTGCGAACT
CCGGAGGAGTTAGACTGTCGCGATATCTTCACGTACTGCACAAAGCCTACCAGTTATAAG
GTAAAGGTCCCCCGTTGTCAAATCTGAGAGGCGCTCCCAAAGATGGCTAGACACCACCTT
AGCGCACGGCTCGGATTATATACTTAAGAGACTAAACCCTCCCCGTAGAGACGCAGGCGG
TTAAAACTAGAACAGGCACTTGAAGTTACCCGGAACGCTACCTGCTAATTTCAGCTGTTC
TTGGTACCACTTCAGGCAGCTCCGGCAACAAGGCCTCTCCGTTAGTCAATATGGACACTG
CATTAGGCGTAGGGATGTCAGGGAGCACTTGTGCAGACGGATAGCTCGAAGCCGCTGGCG
TCCGAGAATCTCCTAGAGGATACGATCGTTAATGCAGTAAGCACACCCTCCTAGACCTCT
CTTGCGGTCGCTGGCCCTTGGGCAGTGCAACCAACACCATCCGATCTTATAGCCCGCGCA
TACACAATGCTCGCCAGTGAATACCGGAGGCTAGGCCTGCAAAACTCTGGCGATGGTGGA
GACAATTGTTCTCCCGGGAGGGGCTGGGTTAAGCGCTAATCTGACCCTAC
>Rosalind_9012
TTCCTACCTTGATCGTGGTTATCAGCGGTCGTGGGTAGGGAACTGGAGAGTTACAATCAT
ACACAACCGCTACTAATCACAACTCTCGTTTTGAACCGCTGTCCAGCCGGCGGATGACCA
CGTAAGTGAACTTCGGAATCACCTGCGGTGCATTGTAAAAGAGCAGCTCAGCAAACACAG
CCTGCAAGGGTCCATAATAAGGCCAAGGCCACCAACCACCCAGACTAAGATCACATCCGG
AACGGGCCTTAAACGTTTTGTGCCTGTCCAGGTCGCGCTCTTTTTAGGAAATACGAAATT
CCTGGGTAGAATTTCGCCAGATCGTTCGGTAAAGTAAAGAGGTACCTTGGATCGAATCAA
GAATAGCGCTTTGTTTTGCGACTCAGAACGGGTAATTTTTTTTTGCACGCAATTGCCACA
GAAAGACAGGTGGTGCGGTGGGCATTACTTTAGTGTACTGGGACGGACTCGCTTCCTCCA
AGAAGCCTTCAATATCATTGGCTGCGTGGTTTGTTCAGGCTCGCGGACCCGACTGCTCGG
AGTAACGCACGGCTGTTGTCATCGACACGGGAGAACGATTGTCTCTAGCTTGTTATCCGG
ATCTGGAGGCCCGATAGTGTATCATCCCTTACCCCCCCGACGTGAATCAACCAGTGTATA
GTTGAAGAACAAGGGACCACATGGTAAGATCCGAAGAACTTGCCCCCGAACTACAGAGAA
GACGACCGTCTTCGGCTCGTATGAAAAGTCTGTAGCAAGCGATGTATGGCTGTGCAGTAG
TAGGTTTGCTATCCACGTGATAGTCGCCCATGACACAGAGTAGGGTACGAGGGGAGGCGG
TGACGTTACGGCGTAACGTCACCCCGGGTCATGACGATATGGGTCGCCATTGATTTGATT
GTGCCTTGCATCTGCAGTTGGTTCGACAACGGTGGTTGACGCATCTCATG
>Rosalind_6116
AGCAGGATTGAGCGCACGGTGGGTACGTTTACACTATCAGCGTCAGTAGAGTGAGGTCGG
CACTAGTACATCGTAGAGTTGAAAACAAGGCCTGCACGTCGGCGTGCCATTTGCACTCAT
AGTCCTTCGCTACGAGCTAATAGGAATTTCGGGGGATCAAACTCCGCACCATACGATAGT
TTATATAGGTCAGGCGTCTCACTAATCTTTAACCGACACATATAATCACAAATAGAGATT
GTCGATCTCGCAGTATAATATACGATCAGAACAGTGGGGCCCGGCGCCAGTTCCACGGCG
CATGGCAGGCATTTTGGTGTTGTCGCTGTACGAAAATTTGGATCAGACCCTGCTAAATTT
CAGCCAAGACTCGACCTCGCTTTCAGGATTAAGCGGTCTAGATCCCGATCGCCATTTTCC
CCGTTGTCCCACTGGGAACACCTACAGTAGGTACCAGACCACGCACTGAATACGGTTAAG
GCGAGCCCTTCTCCTACATCATTTATTCCTGGTCATACATTCATCTCAAGGAGTGATTGG
TACGTCCATGCTGATTTAATCACACGGTTAGCTCATATATGAAGCAAGAGTGTCATGTAT
ATGTTAGTTAACGACAAAGCTAAGCCCGGGGGGCAACTGGATAGTCACTCTGCTGGGGCC
TTACCGCAGCGGACTCCGTTCAAACGTATAATTTAAATTTATCCATTTGTGTAATGGAAG
ACCGCTATTGTCATCCGATAAGCTGGTAGAACAATATAAGTCGCCATGGGTAGTTCTTTC
GTATGCGTAGGATCGCGTCGGCTTTTCCGATAACCCCGCATTCGACCAAGTTGTCGTCGA
CTGCCAGTAGTAATTACTTTTGGGTATGCGGAGTCGATACTCTTTGAAACCAGAGAGTTT
GAGGGCAAGCCTGCTCCATTGACACCTTGAAAAGTATGAGCTCCCTAGAA
>Rosalind_4523
TCGCGTTTTATCCAGGCTGAGATAAGGGGCCTGTCTTGCGCAAATGATTCCCGCATGAAA
TGAACCCGCCGTAAGCTTCAGCTTTCGATAACATACTGTGCGTTCGGTACAAGGATAACT
TAAAACCTCTCGAGCTAGAAACGTAGAATGTCCTTAGCCAGGGTTCTCCAAGTACAGTCT
AGGCGGTGTAGTGTGATACAGCCGGTGGCATCTCTCCTTTGACTACTCTTAGGTGCCCTC
GCTCGACGCATGGAAGAGCCGGATAAAACAGAGTGGAGTACACTCGCTGAAAACCACCTA
TTCAGGCCTACCGCAAAGGCATGCAACGTAACGTACGGAGTTGCATATTAAAAGGCACAC
TGACGCGAACCGAAAGCCGGGTCGGTGATCGGCGTCATCGTATATCACGCATTGCAGTGG
CAGCGTATTACTCTGGTAACCGAACGACCTTGGTCCACTACAACCCTGGCCCCAGCTATT
TTTATATAGTTCCATTTCGGGTGCTGCGTCTCGCACGCAGCAGTTTTGAGATAGGCGCCG
TTCAGGCGCCTGCTGACGTCAAAATTGCTACAGTGGCCAGAAATCTCGATCGTCGAGTAA
ATAGCCAGATACCTCGCCAAATACCTGTAACCGTCTGTCTACTGTTTTTATGGGTATCAT
CTTCAATCGTACACCTCTAGTAACATCACATGGGGGGTGAATCATGGGCATAACGGGTTT
TGGAACCGTGACCTTAAATCGGTATGTGTGTTTGGTCGTAAATGTGCGTTCACTTCGGGT
CGCCAAACGGCCGTATCGACGCTTTGTTAGGGATTTAACGGCCGCGTATGCCGGTGGCCC
TGGATACAGTGTTGGTAAAGCTCTACCAACAATGTCAAAATACTCACATCATCTTACTAA
AGAGCCCCAACGTCGAGTCGGGGGACTCGGCGATGAATAAAGTTCTTGTG
>Rosalind_9863
TTCCAACGGGCCTGAACATCTTGCCGTACCACAGACGGCGGTCAGACTGTTATGACAGAT
GTGCACATCGTCAGGTCACGGCTTGACGAGGGGGCTATTTACATATGGGGTTCCGGACTT
GATGTAACCGTGATCTAACTCATGATGAGCGCCGTTAGGGTTGGGCACCGGGCCGCGGCC
ACTCGGAGACTTCAAGATTAAGATCCTGATTATCTCCTACCCAGGGGGGAAACAATTGCA
GTCATGAGGGGCTATAATGCTTCGGCTGTGCTATCTTTGTGGGGCCTTCTTTAACACAAT
TCAACTCCGTTAAAGCTTAAAGCATTGGACGAGATAAATTTGTCAGTAGACTATACGGTC
ATCTCGGTTCCCGGCGCTGGCCAGTACCATATCGACCACAGTGTTTCCTAAAAATTCCAT
GTATAGGCGTCATGGGTCGAACCCCACGTACGCAGTCCTGAGTATGCACACACATCAGCG
ACAAAGTGACCTTATAGGTGGGCTACCTCGCTCGATCGGCCCATGAAGAAGTGTCTGCCA
TCTTCGGGGTTCCCTGGTACTTGGGTGGATGTTCCGGGAACTCTGCATCTAGATCTCTGA
TGCGGCTTGTACTCGGGTTGTCTCAAGGGGGGTTGTATGGAGGCATCTTTTGGATGATCA
CGCCTTTTCATTAATCCGCGCGCTTAGTTATCCACTTCAACCCACAACTAGTTATCCGGC
TATACGGAACCAAGTTAAGCGTAATGCGGTAGCAGACTCGCCACCACTTATTGCGTTACT
GCGATAGCGAAACTGGATTTGCTCCGAACAACCGAAAAGTAATCGGATGTGGATGATGCG
GGCCGCTTTGCCTGAGTTGGGAGTACATCTGGTGATCTGTTCTGGTGGTCATTCCACGAG
AGTACTCGAGGGCGTAGCAGATACAAGAGAAGGGCGCCGCTAGGACTAAA
>Rosalind_0174
GATATAAACGTGTGTCCCGCTACTAGGGGCCACATGTAATCAAGACTTTGTTTATATGAC
AACTTCAGGCCTTACCGATCTGGTGCCAACATGTCAATTTTCCCCTGTTCCAGTATCTAG
CCTTCATCGCTGCAGGCTTTCCGAGACAAGCAACCGCTCTTAACTACAGGCAAGACCGGG
AATACCTGTCTTAATGACGCTATGACCGGATGCGGAGTACCGCATCGTGATGCAACAACT
GTGGACAGTTAGTGTGCAGGGTCATGGAAAGGAGCAGGCGCTTACGTTTTTCGTATACAC
AACCACGAGGGGTTAACTTGTGAACAATAAGGTCCGTTAGTAGCACCTCCCCAGGGACAG
CACGGGCTCAAGGTCTTCTTCGGATGGGTTGAAACCTCTGGTCGGCGGGCGGGCACTTAG
AAAGTCGAAATCCCCACTACGATCAAGCATTCACCTTATCGGCTCGATTGGATCGTCGGA
TGGAAGGTCTACCAACCGGCTGGTCAGATTCGCTTTCTTCGATGTACATGCCGGAGTTCT
ACATGCACCAAAATTAGCTAGGGTTCCCATGGCCAAGACAACTCATCCTCACTGTGGGAA
AGAGTCTTTTGTGATCCAGTTTAGCTGGCGTCACCCCGAATGGCACACATTACATGGTCG
GACGCTGGACAGTGAGTGTTCCGCTACAACGCATCGGGCGACCCGTAAACATGTGTTACC
CGTCATGATCCACCTAACCAGAAATCAAAGAAGTACTACTTTCCGGCCATGCAACAGGAG
CGCGTCATCCTAGTGCGCTAGCCGGGCCATCCTCTAGTAGATCAGGCGTAACGCGATTCC
TTCGTAGGCATCGCGCTAATGTAGCAATAGAGAAGCACAAGCCTTCAGGGATAACCCAGT
GATTATGCACCTATCTGTTCGAAAAGGGCAAGACGGCACGGCCTCCGCGT
>Rosalind_4563
ATGACCCACTAGAAATATTTGCTGCAGCAATAAAGACGCGGTCGTTATTAAAGGACCCCA
TCAAGCACGTACACGAGTACGCGTTCACTCCCTAGGCCCGTTCAGCTGTAATGCTCTCCT
TACGCGCTAGGGGTACGCAGAGTTTCTATTTCCCGCCTCCAATTATCGTATTTGCCCGCG
GCCTTCCGGGCGTCGCTTTATTTCGCCAATACTCGCATCGCGCTCGCACCGCCGTCTGGG
GCAGGTTGATACCTGGCACATGTCTCACCCCTTCTATTTTGACGAAGCTCGTAGCGCCCG
ACGCGATATAGGGTCGGCGGTATTCGATCGCCTAGTCACCGAGTTCCATGGTGCGATAGG
TCGAACTGGTTCGTGTCCCTGGTCAGGAAACTATTCCTCACAGATGATGCTTATTCCTGT
TTGTTAGTCTACCCACATGTCCATCTTCCTGCTAATCCATGCCTTTCGGTTAACACTGAC
ATAGTAACTAATTCGGCTGCTCCTTCCGGCATATATTTGGCGCCTTCGGCGTGGCGGCCC
GGCGCAAGTCCCCCAATGGGGCTGCCCACACTCAGCGGCCCTTCATACGTATGTTTGGAG
CACGTTTTAGGTGTAACACGCCCTACCGCGGCGAGGATAACTAACTAATCCGCATACATC
TAACCATTCTGCATGGCAGCCTCGTAGCAGCATCCGTTCTACCCGTAACTCGACAAGTCT
TTCAAACTAGCAGCGCCCCACCGAAGAGATACGGAGTACCGCAGCCGTAGTAAGACTCAG
TTTAACAGGAGAATCTCTGATGGGAATCCCATGAAGGATACAAGAACAAATCGCCTGAGT
TGCGATAGGGTGACCGCATTAAGGCTGCTCAGTCATGGCCTCGTATTTCCTTGTGCCATA
ATGTTCCGCTGCGGTACGGTGGTTTGGTGGTTAAAGAAGGACTCCAAAGA
>Rosalind_8396
GGCGTTCACAAGTTAAGCTGGCTGAAGACTTGTAAAACTCCGGAGCAACACAAGTAACCA
TTCGTTGTCGGTCCGGTGCAGGCCAGGGGGTTAGAGAGATCTACATACTAGACCCTTTCA
CCTCTTAGGATTAATGTCCAGCCACCGAAACTGCGCGACATCGTGACACTTGCGCCCATA
TGCAGTAATGTAATAACCAGCTCTAATATTTCTATCCGACGTCAGCACTACGGTTAAGTC
AGGTCCTTAGTGGAATGTAGAGAATCGAGCTGTGAGTAGTAGGGGGGGCTAGCACCCCAT
TTAAGCAACGCTACTCACTTAGATCGGGCATAAGAACCTAAAGTGTAAGTAGAGATGATG
TAGCCTCCGGTAAACCAGAATTTCCCCGGCCTCTATACCGGGCCTTAACAGTCGTGAGGC
GCATCCCTCAGTTCAGTCCGGGACAAACCATAGGTAATAATAAATGGTATTGTTTCAAGC
TCATCCCAATTTTGCAATCGGGAACACCACGCTTTATTAACGTCGCATTGCCGTCGTATA
AGTCTCGGAGGAAAGGCCACTGTGATAGTTAATGCATAGCTCCAATCTGACGAGCGCGAC
CATGTAAACCATTCCTCCGGCACGTTGTAAACGATTGGCTGCCTTATCACTCCACCCCTG
CATCGTGGATAATGGACTTGAGCGTAAGTCAATACTCGGTGTTGGCCCTTTTCTCCGTGC
ATATTATGCGTTTGAAATCGAGCGACGTTCGGTAATTTCCTTGGTCCGGTTCTTGTCAAT
CGACAACATAGGCGTGCTTACGCTTCTTCACGACGAACCATAGATAGCGCCCCTAAGCGC
AACGTGTGAGAGCATGATGACGAAACTGGGTGTGCCACTAGGTGTTTTCTACTGTTACGC
CAACCTTCCTATTCCACAGTACCTGCGGCTGAACCTTAGTACCTTCTCTA
""".strip()
parsed_data = parse_input(data)
make_profile(parsed_data)
if __name__ == '__main__':
main()
| 11,178 | 5,992 |
import rethinkdb as r
from pbkdf2 import crypt
import uuid
class User(object):
def __init__(self, name, email, password):
self.name = name
self.email = email
self.fullname = name
self.password = password
self.id = email
self.apikey = uuid.uuid1().hex
self.birthtime = r.now()
self.mtime = self.birthtime
self.avatar = ""
self.description = ""
self.affiliation = ""
self.homepage = ""
self.demo_installed = False
self.last_login = r.now()
self.notes = []
self.admin = False
self.beta_user = False
self.fake_user = True
self.preferences = {
"tags": [],
"templates": []
}
def make_password_hash(password):
salt = uuid.uuid1().hex
return crypt(password, salt, iterations=4000)
def make_fake_user(user_name, user_id, user_password, apikey):
pwhash = make_password_hash(user_password)
u = User(user_name, user_id, pwhash)
u.beta_user = True
u.apikey = apikey
return u | 1,083 | 347 |
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from datetime import datetime, timezone
from typing import Optional
from slixmpp import JID
from slixmpp.stanza import Presence
from slixmpp.plugins import BasePlugin
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.plugins.xep_0319 import stanza
def get_local_timezone():
return datetime.now(timezone.utc).astimezone().tzinfo
class XEP_0319(BasePlugin):
name = 'xep_0319'
description = 'XEP-0319: Last User Interaction in Presence'
dependencies = {'xep_0012'}
stanza = stanza
def plugin_init(self):
self._idle_stamps = {}
register_stanza_plugin(Presence, stanza.Idle)
self.api.register(self._set_idle, 'set_idle', default=True)
self.api.register(self._get_idle, 'get_idle', default=True)
self.xmpp.register_handler(Callback(
'Idle Presence',
StanzaPath('presence/idle'),
self._idle_presence
))
self.xmpp.add_filter('out', self._stamp_idle_presence)
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('urn:xmpp:idle:1')
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature='urn:xmpp:idle:1')
self.xmpp.del_filter('out', self._stamp_idle_presence)
self.xmpp.remove_handler('Idle Presence')
async def idle(self, jid: Optional[JID] = None,
since: Optional[datetime] = None):
"""Set an idle duration for a JID
.. versionchanged:: 1.8.0
This function is now a coroutine.
"""
seconds = None
timezone = get_local_timezone()
if since is None:
since = datetime.now(timezone)
else:
seconds = datetime.now(timezone) - since
await self.api['set_idle'](jid, None, None, since)
await self.xmpp['xep_0012'].set_last_activity(jid=jid, seconds=seconds)
async def active(self, jid: Optional[JID] = None):
"""Reset the idle timer.
.. versionchanged:: 1.8.0
This function is now a coroutine.
"""
await self.api['set_idle'](jid, None, None, None)
await self.xmpp['xep_0012'].del_last_activity(jid)
def _set_idle(self, jid, node, ifrom, data):
self._idle_stamps[jid] = data
def _get_idle(self, jid, node, ifrom, data):
return self._idle_stamps.get(jid, None)
def _idle_presence(self, pres):
self.xmpp.event('presence_idle', pres)
async def _stamp_idle_presence(self, stanza):
if isinstance(stanza, Presence):
since = await self.api['get_idle'](stanza['from'] or self.xmpp.boundjid)
if since:
stanza['idle']['since'] = since
return stanza
| 2,980 | 1,045 |
from shlex import split as command_split
from subprocess import Popen, PIPE
from modules.Debug import log
class ImageMagickInterface:
"""
This class describes an interface to ImageMagick. If initialized with a
valid docker container (name or ID), then all given ImageMagick commands
will be run through that docker container.
Note: This class does not validate the provided container corresponds to
a valid ImageMagick container. Commands are passed to docker so long as any
container is fiben.
The command I use for launching an ImageMagick container is:
>>> docker run --name="ImageMagick" --entrypoint="/bin/bash" \
-dit -v "/mnt/user/":"/mnt/user/" 'dpokidov/imagemagick'
"""
def __init__(self, container: str=None,
use_magick_prefix: bool=False) -> None:
"""
Constructs a new instance. If docker_id is None/0/False, then commands
will not use a docker container.
:param container: The container for sending requests to
ImageMagick, can be a name or container ID.
"""
# Definitions of this interface, i.e. whether to use docker and how
self.container = container
self.use_docker = bool(container)
# Whether to prefix commands with "magick" or not
self.prefix = 'magick ' if use_magick_prefix else ''
# Command history for debug purposes
self.__history = []
@staticmethod
def escape_chars(string: str) -> str:
"""
Escape the necessary characters within the given string so that they
can be sent to ImageMagick.
:param string: The string to escape.
:returns: Input string with all necessary characters escaped. This
assumes that text will be wrapped in "", and so only escapes
" and ` characters.
"""
# Handle possible None strings
if string is None:
return None
return string.replace('"', r'\"').replace('`', r'\`')
def run(self, command: str) -> (bytes, bytes):
"""
Wrapper for running a given command. This uses either the host machine
(i.e. direct calls); or through the provided docker container (if
preferences has been set; i.e. wrapped through "docker exec -t {id}
{command}").
:param command: The command (as string) to execute.
:returns: Tuple of the STDOUT and STDERR of the executed command.
"""
# If a docker image ID is specified, execute the command in that container
# otherwise, execute on the host machine (no docker wrapper)
if self.use_docker:
command = f'docker exec -t {self.container} {self.prefix}{command}'
else:
command = f'{self.prefix}{command}'
# Split command into list of strings for Popen
cmd = command_split(command)
# Execute, capturing stdout and stderr
stdout, stderr = b'', b''
try:
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
# Add command to history
self.__history.append((command, stdout, stderr))
return stdout, stderr
except FileNotFoundError as e:
if 'docker' in str(e):
log.critical(f'ImageMagick docker container not found')
exit(1)
else:
log.error(f'Command error "{e}"')
return b'', b''
def run_get_output(self, command: str) -> str:
"""
Wrapper for run(), but return the byte-decoded stdout.
:param command: The command (as string) being executed.
:returns: The decoded stdout output of the executed command.
"""
return b''.join(self.run(command)).decode()
def delete_intermediate_images(self, *paths: tuple) -> None:
"""
Delete all the provided intermediate files.
:param paths: Any number of files to delete. Must be Path objects.
"""
# Delete (unlink) each image, don't raise FileNotFoundError if DNE
for image in paths:
image.unlink(missing_ok=True)
def print_command_history(self) -> None:
"""
Prints the command history of this Interface.
"""
for entry in self.__history:
command, stdout, stderr = entry
sep = '-' * 60
log.debug(f'Command: {command}\n\nstdout: {stdout}\n\nstderr: '
f'{stderr}\n{sep}')
| 4,687 | 1,231 |
import random
from math import sqrt
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from torchvision import transforms
class DatasetAll_FDA(Dataset):
"""
Combine Seperated Datasets
"""
def __init__(self, data_list, alpha=1.0):
self.data = ConcatDataset(data_list)
self.pre_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(), lambda x: np.asarray(x)
])
self.post_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.alpha = alpha
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img, label = self.data[idx]
# randomly sample an item from the dataset
img_s, _ = self._sample_item()
# do pre_transform before FDA
img = self.pre_transform(img)
img_s = self.pre_transform(img_s)
# FDA
img_mix = self._colorful_spectrum_mix(img, img_s, self.alpha)
# do post_transform after FDA
img = self.post_transform(img)
img_mix = self.post_transform(img_mix)
img = [img, img_mix]
label = [label, label]
return img, label
def _colorful_spectrum_mix(self, img1, img2, alpha, ratio=1.0):
"""Input image size: ndarray of [H, W, C]"""
lam = np.random.uniform(0, alpha)
assert img1.shape == img2.shape
h, w, c = img1.shape
h_crop = int(h * sqrt(ratio))
w_crop = int(w * sqrt(ratio))
h_start = h // 2 - h_crop // 2
w_start = w // 2 - w_crop // 2
img1_fft = np.fft.fft2(img1, axes=(0, 1))
img2_fft = np.fft.fft2(img2, axes=(0, 1))
img1_abs, img1_pha = np.abs(img1_fft), np.angle(img1_fft)
img2_abs, img2_pha = np.abs(img2_fft), np.angle(img2_fft)
img1_abs = np.fft.fftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.fftshift(img2_abs, axes=(0, 1))
img1_abs_ = np.copy(img1_abs)
img2_abs_ = np.copy(img2_abs)
img1_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img2_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img1_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img1_abs = np.fft.ifftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.ifftshift(img2_abs, axes=(0, 1))
img21 = img1_abs * (np.e**(1j * img1_pha))
img21 = np.real(np.fft.ifft2(img21, axes=(0, 1)))
img21 = np.uint8(np.clip(img21, 0, 255))
return img21
def _sample_item(self):
idxs = list(range(len(self.data)))
selected_idx = random.sample(idxs, 1)[0]
return self.data[selected_idx]
class DatasetAll(Dataset):
"""
Combine Seperated Datasets
"""
def __init__(self, data_list):
self.data = ConcatDataset(data_list)
self.pre_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale()
])
self.post_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
| 3,939 | 1,464 |
import math
import time
from scipy import interpolate
from threading import Lock
from abc import abstractmethod
from library.pid import PID
from stack import ActionStack
class BaseAction:
@abstractmethod
def undo(self):
pass
@staticmethod
def is_checkpoint():
return False
class Checkpoint(BaseAction):
@staticmethod
def is_checkpoint():
return True
def undo(self):
pass
class SyncAction(BaseAction):
@abstractmethod
def exec(self):
pass
class AsyncAction(BaseAction):
@abstractmethod
def begin(self):
pass
@abstractmethod
def end(self):
pass
class DriveSpeedAction(AsyncAction):
def __init__(self, robot, x=0.0, y=0.0, z=0.0):
self.robot = robot
self.speeds = [x, y, z]
self.start_time = 0
self.duration = 0
def begin(self):
# Aktuelle Zeit
self.start_time = time.time()
self.robot.chassis.drive_speed(self.speeds[0], self.speeds[1], self.speeds[2])
pass
def end(self):
# Zeitdifferenz
self.duration = time.time() - self.start_time
self.robot.chassis.drive_wheels(0, 0, 0, 0)
pass
def undo(self):
self.robot.chassis.drive_speed(-self.speeds[0], -self.speeds[1], -self.speeds[2])
time.sleep(self.duration)
self.robot.chassis.drive_wheels(0, 0, 0, 0)
class MoveDistanceSyncAction(SyncAction):
def __init__(self, robot, x=0.0, y=0.0, z=0.0, xy_speed=0.0, z_speed=0.0):
self.robot = robot
self.coords = [x, y, z]
self.speeds = [xy_speed, z_speed]
def undo(self):
self.robot.chassis.move(self.coords[0], self.coords[1], self.coords[2], self.speeds[0], self.speeds[1])\
.wait_for_completed()
def exec(self):
self.robot.chassis.move(-self.coords[0], -self.coords[1], -self.coords[2], self.speeds[0], self.speeds[1])\
.wait_for_completed()
class FollowLine(AsyncAction):
def __init__(self, robot):
self.lock = Lock()
self.active = False
self.stack = ActionStack()
self.robot = robot
self.last_action = None
self.last_vision = None
self.last_pid = 0
self.pid = PID(115, 0, 12, setpoint=0.5, sample_time=0.1) # <- TODO
def begin(self):
self.active = True
if self.robot is None:
# Testmodus ohne Robotersteuerung
print("FollowLine: Robot nicht definiert")
else:
self.robot.vision.sub_detect_info(name="line", color="blue", callback=self.vision_update)
def get_last_data(self):
return self.last_vision
def end(self):
self.active = False
self.robot.vision.unsub_detect_info("line")
# Letzter Befehl stoppen und auf Stack legen
if self.last_action is not None:
self.last_action.end()
self.stack.push(self.last_action)
def vision_update(self, vision_data):
# Ignorieren, falls Bereich noch gesperrt oder falls abgebrochen
if not self.active or self.lock.locked():
print("FollowLine: Übersprungen!")
return
# Bereich sperren
self.lock.acquire()
self.last_vision = vision_data
next_x = 0.5
points = 0
i = 0
# Erste drei Punkte aus vision_data auswählen (falls vorhanden) und Durchschnitte berechnen
# Notiz: Erstes Element in vision_data ist line_type (int) und muss ignoriert werden
for d in vision_data[1:4]: # Um letzte 3 Pkt. zu betrachten: "for d in vision_data[-3:]:"
x, y, theta, c = d
# x-Koord. des zweiten Pkts. auswählen
# TODO Anderen Punkt wählen?
if i == 1:
next_x = x
points += 1
i += 1
# PID-Algorithmus aufrufen
output = -1 * self.pid(next_x) # TODO Output invertieren?
if output == self.last_pid:
# Cooldown
self.lock.release()
return
else:
self.last_pid = output
y_spd = 0
x_spd = 0.5
# Geschwindigkeitslimit
z_spd = max(-90, min(90, output))
print(f"X: {str(round(next_x, 2))}; \t"
f"PID: {str(round(output, 2))}°/s \t"
f"=> Z (limit): {str(round(z_spd, 2))}°/s\t"
f"||\tY: {str(round(y_spd, 2))}m/s\t"
f"||\tX: {str(round(x_spd, 2))}m/s")
if self.robot is None:
# Testmodus ohne Robotersteuerung
self.lock.release()
return
if self.last_action is not None:
# Letzter Befehl stoppen und auf Stack legen
self.last_action.end()
self.stack.push(self.last_action)
self.last_action = None
# Neuen Fahr-Befehl starten
action = DriveSpeedAction(self.robot, x_spd, y_spd, z_spd)
action.begin()
self.last_action = action
# Bereich entsperren
self.lock.release()
def undo(self):
self.stack.undo_all()
| 5,108 | 1,751 |
from Canvas import Canvas
from Detector import Detector
from GUI import GUI
from Tracker import Tracker
from Function import *
from Video import Video
from Pen import Pens
from Key import Key
from Image import ImageManager
import tkinter
import tkinter.messagebox
import tkinter.font
import tkinter.simpledialog
import time
import cv2
import os
class Touchable:
def __init__(self):
os.chdir(os.path.dirname(os.path.realpath(__file__)))
to_dir = [r'./data/', r'./data/pen_data/', r'./data/image_save/', r'./data/source/']
for dir_ in to_dir:
if not os.path.isdir(dir_):
os.mkdir(dir_)
self.pen = Pens(r'./data/pen_data/')
self.video = Video()
self.detector = Detector()
self.tracker = Tracker()
self.image_manager = ImageManager(self, r'./data/source/')
self.function = None
self.var = None
self.stop = None
self.canvas = Canvas()
self.gui = GUI(self)
self.key = Key(self, self.canvas)
self.gui.start_gui()
def show_camera(self):
if not self.video.is_working():
return False
top_level = tkinter.Toplevel(self.gui.window)
top_level.title('Touchable - Camera')
top_level.geometry('320x180')
canvas = tkinter.Canvas(top_level, bg='black')
canvas.place(x=0, y=0, relwidth=1, relheight=1)
top_level.update()
canvas.update()
try:
while True:
if self.video.is_working():
img = self.video.get_frame()
if img is not None:
width, height = canvas.winfo_width(), canvas.winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_resize = cv2.resize(img_rgb, dsize=(int(1280 * scale), int(720 * scale)), interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_resize)
canvas.create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
canvas.update()
else:
top_level.destroy()
break
except Exception as e:
print(f'Error in show_camera; {e}')
raise e
def set_detect(self):
if not self.video.is_working():
success = self.video.set_camera('on')
if not success:
print('Video is not working; cannot enter set_detect')
return False
self.var = {'run': True, 'hsv': (0, 0, 0), 'pick_hsv': (0, 0, 255), 'roi': None, 'pick_roi': None, 'clicked': False}
self.enter('set_detect')
ret_counter = 0
while True:
while self.var['run']: # determine detect color
try:
img = self.video.get_frame() # get image from camera; type(img) = numpy.nd array
if img is None:
ret_counter += 1
if ret_counter == 20:
return self.exit('set_detect')
time.sleep(0.1)
continue
else:
ret_counter = 0
except AttributeError as e:
print('AttributeError; set_detect', e)
return self.exit('set_detect')
self.detector.bg_subtract(img)
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_resize = cv2.resize(img_rgb, dsize=(int(1280 * scale), int(720 * scale)),
interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_resize)
self.gui.widget['canvas'].create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
roi_size = [150, 150]
roi = img[720 // 2 - roi_size[0]:720 // 2 + roi_size[0], 1280 // 2 - roi_size[1]:1280 // 2 + roi_size[1]]
circles = self.detector.find_circle(roi, set_detect=True, roi=roi_size)
d, u = convert_pos(scale, width_margin, height_margin, x=720 // 2 - roi_size[0],
y=720 // 2 + roi_size[1])
l, r = convert_pos(scale, width_margin, height_margin, x=1280 // 2 - roi_size[0],
y=1280 // 2 + roi_size[1])
self.gui.widget['canvas'].create_rectangle(l, d, r, u, width=2, outline='red')
if circles is None:
w, h = convert_pos(scale, width_margin, height_margin, relx=0.5, rely=0.9)
self.gui.widget['canvas'].create_rectangle(w - 100, h - 20, w + 100, h + 20, fill='red',
outline='red')
self.gui.widget['canvas'].create_text((w, h), font=tkinter.font.Font(size=15), fill='white',
text='Adjust the distance')
else:
x, y, max_rad = 0, 0, 0
for circle in circles: # for every circle
if circle[2] > max_rad: # circle[2] == radius
x, y, max_rad = circle[0], circle[1], circle[2] # circle center 좌표
self.var['roi'] = (img, (x, y), max_rad)
self.var['clicked'] = True
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = center_color(img_hsv, x, y, int(max_rad * 0.5))
self.var['hsv'] = hsv
x, y = convert_pos(scale, width_margin, height_margin, x=x, y=y)
max_rad = int(max_rad * scale)
self.gui.widget['canvas'].create_line(x - 5, y, x + 5, y, fill='white')
self.gui.widget['canvas'].create_line(x, y - 5, x, y + 5, fill='white')
self.gui.widget['canvas'].create_oval(x - max_rad - 3, y - max_rad - 3, x + max_rad + 3,
y + max_rad + 3, outline=color_type(hsv, 'hsv', 'hex'),
width=6)
self.gui.widget['canvas'].create_oval(x - max_rad, y - max_rad, x + max_rad, y + max_rad,
outline='white', width=3)
self.gui.widget['palette'].delete('all')
self.gui.widget['palette'].create_rectangle(0, 0, self.gui.widget['palette'].winfo_width(),
self.gui.widget['palette'].winfo_height(),
fill=color_type(self.var['pick_hsv'], 'hsv', 'hex'))
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
self.gui.widget['canvas'].delete('all')
if self.pen.make_pen(self):
break
else:
self.var['run'] = True
# TODO
# self.detector.set_backprojection(image=self.var['pick_roi'][0], pos=self.var['pick_roi'][1]
time.sleep(0.05)
self.detector.set_backprojection(image=self.var['pick_roi'][0], pos=self.var['pick_roi'][1],
rad=self.var['pick_roi'][2])
return self.exit('set_detect', True)
def detect(self, pen=None, color_reflect=0.01, back_image=None):
if self.pen.is_empty():
new_pen = self.set_detect()
if not new_pen:
print('No new pen; cannot enter detect')
return False
if not self.video.is_working():
print('Video is not working; cannot enter detect')
return False
if pen is None:
pen = self.pen.get_pen()
self.var = {'run': True, 'pen': pen, 'pos': None, 'target': None, 'mark': None, 'event': None, 'scale': 1}
self.enter('detect')
backup_pen_hsv = pen.access_hsv()
no_circle = 0
ret_counter = 0
self.gui.widget['canvas'].configure(bg='white')
self.detector.reset_bg_subtract()
last_result = None
tracked = False
tracker_roi = None
tracker_result = None
roi_size = 2
self.stop = False
while self.var['run']: # determine detect color # TODO turn off
try:
img = self.video.get_frame() # get image from camera; type(img) = numpy.nd array
if img is None:
ret_counter += 1
if ret_counter == 20:
print('Cannot get frame for long time; leave detect')
return self.exit('detect')
time.sleep(0.1)
continue
else:
ret_counter = 0
except AttributeError as e:
print('AttributeError; detect', e)
return self.exit('detect')
if no_circle > 20: # hard-coding / 20 can be change / for initialize color
print('No circle; reset color')
no_circle = 0
pen.access_hsv(backup_pen_hsv)
self.gui.widget['palette'].create_rectangle(0, 0, self.gui.widget['palette'].winfo_width(),
self.gui.widget['palette'].winfo_height(),
fill=color_type(pen.access_color(), 'hsv', 'rgb'))
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
if back_image is not None: # TODO
height_, width_, _ = back_image.shape
scale_, width_margin_, height_margin_ = fit_resize(width_, height_, width, height)
img_cvt = cv2.cvtColor(back_image, cv2.COLOR_BGR2RGB)
img_res = cv2.resize(img_cvt, dsize=(int(width_ * scale_), int(height_ * scale_)), interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_res)
self.gui.widget['canvas'].create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
self.canvas.draw(scale, width_margin, height_margin)
result = None
# 0. Preprocessing
img_subtract = self.detector.bg_subtract(img)
'''
if self.stop:
time.sleep(0.01)
continue
'''
img_color = self.detector.backprojection(img_subtract)
img_color = cv2.bilateralFilter(img_color, 9, 75, 75)
img_color = self.detector.morph(img_color)
# 1. Contour
contours = self.detector.contour(img_color)
answer = self.detector.contour_process(contours)
if answer is not None:
contour, x, y, rad = answer
contour_color = self.detector.contour_color(img, contour)
if hsv_square_distance(pen.access_hsv(), contour_color, only_h=True) < 0.6 and rad > 10:
result = [[x, y], int(0.7*rad)] # calibration
cv2.circle(img, (x, y), rad, (255, 0, 0))
if result is None:
# 2. Tracker
if tracked:
pos, rad = tracker_roi
r1 = int(max(pos[1]-roi_size*rad, 0))
r2 = int(min(pos[1]+roi_size*rad, int(img.shape[0])))
r3 = int(max(pos[0]-roi_size*rad, 0))
r4 = int(min(pos[0]+roi_size*rad, int(img.shape[1])))
roi = img[r1:r2, r3:r4].copy()
rect = self.tracker.track(roi)
if rect is None:
tracked = False
tracker_result = None
else:
rect = [int(rect[0]+r3), int(rect[1]+r1), int(rect[2]+r3), int(rect[3]+r1)]
pos_ = [int((rect[0]+rect[2])/2), int((rect[1]+rect[3])/2)]
rad_ = min(int((-rect[0]+rect[2])/2), int((-rect[1]+rect[3])/2))
tracker_result = [pos_, rad_]
cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0, 255), 3)
# 3. Detector
circles = self.detector.find_circle(img_color, blob=True) # TODO ROI
if circles is None:
no_circle += 1
tracked = False
self.tracker.reset()
if circles is not None:
no_circle = 0
temp_pos, temp_rad = [0, 0], 0
priority_ = 2 # small is good
if tracked:
for circle in circles: # for every circle
x, y, rad = circle
if rad < 10:
continue
in_rect = -int(rect[0] <= x <= rect[2] and rect[1] <= y <= rect[3])
center_hsv = center_color(img, x, y, int(rad*0.9))
hsv_distance = hsv_square_distance(center_hsv, pen.access_hsv(), only_h=True)
priority = hsv_distance-in_rect
if priority > 0.3:
continue
elif priority < priority_:
temp_pos, temp_rad, priority_ = [x, y], rad, priority
else:
for circle in circles: # for every circle
x, y, rad = circle
if rad < 10:
continue
center_hsv = center_color(img, x, y, int(rad * 0.9))
priority = hsv_square_distance(center_hsv, pen.access_hsv(), only_h=True)
if priority > 0.3:
continue
elif priority < priority_:
temp_pos, temp_rad, priority_ = [x, y], rad, priority
if priority_ != 2:
result = [temp_pos, int(temp_rad*0.7)] # calibration
cv2.circle(img, tuple(result[0]), result[1], (0, 0, 255))
if result is None:
# TODO - not needed
if tracker_result is not None:
if (not (0 < tracker_result[0][0] < 1280)) or (not(0 < tracker_result[0][1] < 720)):
outside = True
elif last_result is not None:
if (not (0 < last_result[0][0] < 1280)) or (not(0 < last_result[0][1] < 720)):
outside = True
tracked = False
else:
pos, rad = result
if last_result is None or square_distance(last_result[0], result[0], root=True) < 50:
last_result = result
tracked = True
self.tracker.reset()
if tracker_result is not None:
track_rad = max(rad, tracker_result[1], 50)
else:
track_rad = max(rad, 50)
tracker_roi = [pos, track_rad]
y1 = int(max(pos[1]-roi_size*track_rad, 0))
y2 = int(min(pos[1]+roi_size*track_rad, int(img.shape[0])))
x1 = int(max(pos[0]-roi_size*track_rad, 0))
x2 = int(min(pos[0]+roi_size*track_rad, int(img.shape[1])))
self.tracker.set(img, (x1, y1, x2-x1, y2-y1))
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))
# self.detector.set_backprojection(image=img, pos=pos, rad=int(rad * 0.7 * 0.3)) # MIGHT ERROR - calibration
self.key.access_pos(pos)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
temp_hsv = center_color(img_hsv, pos[0], pos[1], int(rad * 0.3))
pen.access_hsv([int(pen.access_hsv()[i_] * (1 - color_reflect) + temp_hsv[i_] * color_reflect) for i_ in range(3)])
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
x_, y_ = convert_pos(scale, width_margin, height_margin, x=pos[0], y=pos[1])
if self.key.access_event() is not None and self.key.access_event()[0] == '_':
cross_color = 'red'
else:
cross_color = 'black'
self.gui.widget['canvas'].create_line(x_ - 5, y_, x_ + 5, y_, fill=cross_color, width=1)
self.gui.widget['canvas'].create_line(x_, y_ - 5, x_, y_ + 5, fill=cross_color, width=1)
cv2.imshow('ori', img)
self.gui.widget['palette'].delete('all')
w, h = self.gui.widget['palette'].winfo_width(), self.gui.widget['palette'].winfo_height()
self.gui.widget['palette'].create_rectangle(0, 0, w, h, fill=color_type(pen.access_color(), 'hsv', 'hex'))
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
self.gui.widget['canvas'].delete('all')
return self.exit('detect')
def stop_detect(self, reset_drawing=True):
if self.function == 'detect':
self.var['run'] = False
if reset_drawing:
self.canvas.clear()
def enter(self, command):
self.function = command
print(f'Enter {command}')
self.key.key_map(command)
def exit(self, command="all", success=False):
self.function = None
print(f'Leave {command}')
if not success:
if command == 'set_detect':
self.gui.widget['canvas'].delete('all')
self.gui.widget['palette'].delete('all')
elif command == 'detect':
self.gui.widget['canvas'].delete('all')
self.gui.widget['palette'].delete('all')
self.gui.widget['canvas'].configure(bg='black')
elif command == 'all':
self.video.close()
cv2.destroyAllWindows()
self.gui.window.destroy()
exit()
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
return False
else:
return True
main = Touchable()
| 19,001 | 5,756 |
"""Main entry point of the application"""
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from pymongo import MongoClient
from app.core.config import get_settings
from app.internal import create_api_internal
from app.routers import create_api_router
mongo_client = None
def get_client(uri: str):
"""
Setup a mongo client for the site
:return:
"""
global mongo_client
if bool(mongo_client):
return mongo_client
return MongoClient(uri)
def create_app() -> FastAPI:
""" Complete creation of the app """
global mongo_client # TODO: to remove; too messy
# Instanciate settings
settings = get_settings()
# Instanciate database
mongo_client = get_client(settings.MONGO_DATABASE_URI)
# Instanciate app
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url=settings.OPENAPI_URL,
debug=settings.DEBUG,
)
# C.O.R.S
if settings.CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=settings.CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add static folder
app.mount(settings.STATIC_FOLDER, StaticFiles(directory="static"), name="static")
# Include all routers
app.include_router(create_api_router(settings), prefix=settings.API_VERSION_URL)
# Include all internals
app.include_router(create_api_internal(settings), prefix=settings.API_VERSION_URL)
# HELLO WORLD ROUTE
@app.get('/hello-world')
def test_route():
return {'message': 'Hello World'}
# ROOT ROUTE
@app.get("/", include_in_schema=False)
def redirect_to_docs() -> RedirectResponse:
return RedirectResponse("/docs")
"""@app.on_event("startup")
async def connect_to_database() -> None:
database = _get_database()
if not database.is_connected:
await database.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database = _get_database()
if database.is_connected:
await database.disconnect()"""
return app
app = create_app()
| 2,292 | 700 |
## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Planform
# wing_fuel_volume.py
#
# Created: Apr 2014, T. Orra
# Modified: Sep 2016, E. Botero
# ----------------------------------------------------------------------
# Correlation-based methods for wing fuel capacity estimation
# ----------------------------------------------------------------------
## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Planform
def wing_fuel_volume(wing):
"""Calculates the available fuel volume in a wing.
Assumptions:
None
Source:
Torenbeek, E., "Advanced Aircraft Design", 2013 (equation 10.30)
Inputs:
wing.
areas.reference [m^2]
aspect_ratio [-]
thickness_to_chord [-]
Outputs:
wing.volume [m^3]
Properties Used:
N/A
"""
# Unpack
sref = wing.areas.reference
ar = wing.aspect_ratio
tc = wing.thickness_to_chord
# Calculate
volume = 0.90* tc * sref** 1.5 * ar**-0.5 * 0.55
# Pack
wing.fuel_volume = volume | 1,057 | 366 |
"""Find zero-crossings for individual cycles."""
from operator import gt, lt
import numpy as np
###################################################################################################
###################################################################################################
def find_zerox(sig, peaks, troughs):
"""Find zero-crossings within each cycle, from identified peaks and troughs.
Parameters
----------
sig : 1d array
Time series.
peaks : 1d array
Samples of oscillatory peaks.
troughs : 1d array
Samples of oscillatory troughs.
Returns
-------
rises : 1d array
Samples at which oscillatory rising zero-crossings occur.
decays : 1d array
Samples at which oscillatory decaying zero-crossings occur.
Notes
-----
- Zero-crossings are defined as when the voltage crosses midway between one extrema and
the next. For example, a 'rise' is halfway from the trough to the peak.
- If this halfway voltage is crossed at multiple times, the temporal median is taken
as the zero-crossing.
- Sometimes, due to noise in estimating peaks and troughs when the oscillation
is absent, the estimated peak might be lower than an adjacent trough. If this
occurs, the rise and decay zero-crossings will be set to be halfway between
the peak and trough.
- Burst detection should be used to restrict phase estimation to periods with oscillations
present, in order to ignore periods of the signal in which estimation is poor.
Examples
--------
Find the rise and decay zero-crossings locations of a simulated signal:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> from bycycle.cyclepoints import find_extrema
>>> fs = 500
>>> sig = sim_bursty_oscillation(10, fs, freq=10)
>>> peaks, troughs = find_extrema(sig, fs, f_range=(8, 12))
>>> rises, decays = find_zerox(sig, peaks, troughs)
"""
# Calculate the number of rises and decays
n_rises = len(peaks)
n_decays = len(troughs)
idx_bias = 0
# Offset values, depending on order of peaks & troughs
if peaks[0] < troughs[0]:
n_rises -= 1
else:
n_decays -= 1
idx_bias += 1
rises = _find_flank_midpoints(sig, 'rise', n_rises, troughs, peaks, idx_bias)
decays = _find_flank_midpoints(sig, 'decay', n_decays, peaks, troughs, idx_bias)
return rises, decays
def find_flank_zerox(sig, flank):
"""Find zero-crossings on rising or decaying flanks of a filtered signal.
Parameters
----------
sig : 1d array
Time series to detect zero-crossings in.
flank : {'rise', 'decay'}
Which flank, rise or decay, to use to get zero crossings.
Returns
-------
zero_xs : 1d array
Samples of the zero crossings.
Examples
--------
Find rising flanks in a filtered signal:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> from neurodsp.filt import filter_signal
>>> sig = sim_bursty_oscillation(10, 500, freq=10)
>>> sig_filt = filter_signal(sig, 500, 'lowpass', 30)
>>> rises_flank = find_flank_zerox(sig_filt, 'rise')
"""
assert flank in ['rise', 'decay']
pos = sig <= 0 if flank == 'rise' else sig > 0
zero_xs = (pos[:-1] & ~pos[1:]).nonzero()[0]
# If no zero-crossing's found (peak and trough are same voltage), output dummy value
zero_xs = [int(len(sig) / 2)] if len(zero_xs) == 0 else zero_xs
return zero_xs
def _find_flank_midpoints(sig, flank, n_flanks, extrema_start, extrema_end, idx_bias):
"""Helper function for find_zerox."""
assert flank in ['rise', 'decay']
idx_bias = -idx_bias + 1 if flank == 'rise' else idx_bias
comp = gt if flank == 'rise' else lt
flanks = np.zeros(n_flanks, dtype=int)
for idx in range(n_flanks):
sig_temp = np.copy(sig[extrema_start[idx]:extrema_end[idx + idx_bias] + 1])
sig_temp -= (sig_temp[0] + sig_temp[-1]) / 2.
# If data is all zeros, just set the zero-crossing to be halfway between
if np.sum(np.abs(sig_temp)) == 0:
flanks[idx] = extrema_start[idx] + int(len(sig_temp) / 2.)
# If flank is actually an extrema, just set the zero-crossing to be halfway between
elif comp(sig_temp[0], sig_temp[-1]):
flanks[idx] = extrema_start[idx] + int(len(sig_temp) / 2.)
else:
flanks[idx] = extrema_start[idx] + int(np.median(find_flank_zerox(sig_temp, flank)))
return flanks
| 4,559 | 1,531 |
# coding: utf-8
import ctypes
# from objc_util import *
#import headers.gl_c
#import headers.glext_c
from OpenGLES.GLES.headers.gl_c import *
from OpenGLES.GLES.headers.glext_c import *
#reload(headers.gl_c)
# reload(headers.glext_c)
# ObjCClass("NSBundle").bundleWithPath_("/System/Library/Frameworks/OpenGLES.framework").load()
| 334 | 127 |
import os
__version__ = '1.0.1'
def get_path():
return os.path.abspath(os.path.dirname(__file__))
def setup(app):
return {
'version': __version__,
'parallel_read_safe': True
}
| 209 | 79 |
import matplotlib.pyplot as plt
fig = plt.figure(facecolor="#979899")
ax = plt.gca()
ax.set_facecolor("#d1d1d1")
plt.xticks([1,2,3,4,5],["11/13\nTue","11/14\nWed","11/15\nThu","11/16\nFri","11/17\nSat"])
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5],["0 %","0.1 %","0.2 %","0.3 %","0.4 %","0.5 %"])
x = [1,2,3,4,5]
y = [0.31,0.22,0.22,0.22,0.21]
for i,item in enumerate(y):
xP = x[i]
yP = y[i]
# plt.text(xP-0.1,yP+0.01,str(item) + "%",fontsize=11)
plt.text(xP-0.1,yP+0.01,str(item),fontsize=11)
plt.scatter(x,y)
plt.plot(x,y)
plt.show() | 552 | 347 |
import numpy as np
import matplotlib.pyplot as plt
def read_drifter(filename):
with open(filename) as f:
lines = f.readlines()
NPD = float(lines[3].split()[0]) ## NPD, number of particles specified on line 4
times_list = lines[4::2]
drifter_list = lines[5::2]
times_np = np.zeros([len(times_list)])
drift_x = np.zeros([len(times_list), int(NPD)])
drift_y = np.zeros([len(times_list), int(NPD)])
drift_z = np.zeros([len(times_list), int(NPD)])
for t in range(0, len(times_list)):
times_np[t] = float(times_list[t].split()[0])
for d in range(0, int(NPD)):
if t == 0:
step = 3
Lall = 1
else:
step = 3
Lall = 1
drift_x[t,d] = float(drifter_list[t].split()[1 - Lall + (d*step)])
drift_y[t,d] = float(drifter_list[t].split()[2 - Lall + (d*step)])
drift_z[t,d] = float(drifter_list[t].split()[3 - Lall + (d*step)])
drift_x[drift_x == 0] = np.nan
drift_y[drift_y == 0] = np.nan
return drift_x, drift_y, drift_z
def main():
drifter_filename = 'DRIFTER.OUT'
drift_x, drift_y, drift_z = read_drifter(drifter_filename)
plt.plot(drift_x , drift_y, '.')
plt.plot([0,105], [260,260])
if __name__ == "__main__":
main()
| 1,327 | 549 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import json
import psycopg2
from typing import Dict, List, Tuple, Union
from abc import abstractmethod
import src.helpers
import src.util
from src.base_with_database_logger import BaseWithDatabaseAndLogger
from src.client.custom_sdk_client import CustomClient
from src.helpers import DBMode
import src.sql_queries
class TradingAlgorithm(BaseWithDatabaseAndLogger):
def __init__(
self,
algo_name,
mode,
logger_wrapper: src.util.LoggerWrapper,
open_db_connection=False,
client=None
):
super().__init__(mode, logger_wrapper, open_db_connection)
self.__name: str = algo_name
query_id = src.sql_queries.query_algo_id(self.name)
raw_result = self.db_connector.execute_dql(query_id)
if len(raw_result) == 1:
self.__algo_id = raw_result[0][0]
else:
raise Exception("Too many results")
self.__current_order_id = None
self.logger_wrapper.order_id = self.__current_order_id
if self.mode in (DBMode.DEV, DBMode.TEST):
self.__simulation = True
else:
self.__simulation = False
self.__configuration = self.load_config()
if client:
self.__client = client
else:
self.__client = CustomClient(
os.getenv('API_KEY_BLOCKSIZE'),
logger=self.logger_wrapper.logger
)
try:
self.exchanges = None
exchange_configs = self.configuration["EXCHANGES"]
# TODO: remove BASE and QUOTE because they are replaced with
self.base = self.configuration["BASE"]
self.quote = self.configuration["QUOTE"]
self.precision = self.configuration["PRESCISION"]
self.lot_size = float(self.configuration["LOT_SIZE"])
self.min_lot_size = float(self.configuration["MIN_LOT_SIZE"])
self.fund_update_lock_period = self.configuration["FUND_UPDATE_LOCK_PERIOD"]
self.slippage_buffer_bps = self.configuration["SLIPPAGE_BUFFER_BPS"]
self.fund_buffer = float(self.configuration["FUND_BUFFER"])
currencies = set()
self.currency_pair_exchange_association = {}
for currency_pair in self.configuration["CURRENCY_PAIRS"]:
currencies.add(currency_pair['code_base'])
currencies.add(currency_pair['code_quote'])
self.currency_pair_exchange_association[currency_pair['symbol']] = []
for exchange_key, exchange in self.configuration["EXCHANGES"].items():
for exchange_currency_pairs in exchange['CURRENCY PAIRS']:
if exchange_currency_pairs['symbol'] == currency_pair['symbol']:
self.currency_pair_exchange_association[currency_pair['symbol']].append(exchange_key)
break
self.currencies = list(currencies)
self.set_exchange_data(exchange_configs)
self._init_fund_map()
self.update_funds()
except Exception:
self.logger_wrapper.logger.error(
"Error during configuration of the trader", exc_info=True
)
@abstractmethod
def trade_algorithm(self):
pass
@property
def client(self):
return self.__client
@property
def algo_id(self):
return self.__algo_id
@property
def current_order_id(self):
return self.__current_order_id
@property
def name(self):
return self.__name
@property
def simulation(self):
return self.__simulation
@property
def configuration(self):
return self.__configuration
@property
def client(self):
return self.__client
@name.setter
def name(self, name):
self.__name = name
@current_order_id.setter
def current_order_id(self, order_id):
self.__current_order_id = order_id
def set_exchange_data(self, exchanges_config: Dict[str, Dict[str, Union[float, Dict]]]):
self.exchanges = list(exchanges_config.keys())
for main_exchange, exchange_settings in exchanges_config.items():
self.fee_map[main_exchange] = exchange_settings["FEE"]
for ask_exchange in self.exchanges:
if ask_exchange == main_exchange:
continue
if main_exchange not in self.threshold_map.keys():
self.threshold_map[main_exchange] = {}
if ask_exchange in exchange_settings["THRESHOLDS"].keys():
self.threshold_map[main_exchange][ask_exchange] = exchange_settings["THRESHOLDS"][ask_exchange]
else:
self.threshold_map[main_exchange][ask_exchange] = exchange_settings["THRESHOLDS"]["DEFAULT"]
def update_funds(self):
balances_raw_resp = self.client.query_funds()
balances_all = balances_raw_resp.get('funds')
for item in balances_all:
exchange = item.get('name')
if exchange not in self.exchanges:
continue
balance = item.get('balances')
# if exchange should have data and it doesn't stop balance collection and return None
# reason: with incomplete balance statements we end up with wrong portfolio values
if balance is None:
self.logger_wrapper.logger.debug(
f"exchange data was missing, exchange: {exchange}"
)
# Todo implement multiple retries
self.update_funds()
return None
for balance_item in balance:
currency = balance_item.get('currency')
if currency not in self.currencies:
continue
self.funds[exchange][currency] = float(balance_item.get("amount"))
# Fund Management
#
def _init_fund_map(self):
self.funds = {}
for exchange in self.exchanges:
self.funds[exchange]: Dict[str, float] = {}
for currency in [self.base, self.quote]:
self.funds[exchange][currency] = 0.0
def load_config(self):
try:
with self.db_connector.connection as conn:
with conn.cursor() as cursor:
# query of standard configuration for trading algorithm
algo_config_query = src.sql_queries.query_algo_configuration(self.name)
cursor.execute(algo_config_query)
result_algo_configuration = cursor.fetchall()
query_currency_pairs_with_symbols = src.sql_queries.query_currency_pairs()
# query of currencies associated to algorithm
currency_pairs_query = src.sql_queries.query_algo_specific_currency_pairs(self.name)
cursor.execute(currency_pairs_query)
result_currency_pairs = cursor.fetchall()
currency_pairs = [{"code_base": item[2], "code_quote": item[4], "symbol": item[5]} for item in result_currency_pairs]
# query for exchanges
cursor.execute(src.sql_queries.query_algo_exchange_association(self.name))
result_exchanges = cursor.fetchall()
exchanges = {exchange[1]: {'EXCHANGE_NAME': exchange[1], "ID": exchange[0]} for exchange in result_exchanges}
# currency pairs available at exchanges
for key, exchange in exchanges.items():
cursor.execute(src.sql_queries.query_exchange_currency_pairs(self.name, exchange['ID']))
result_currency_pair_exchange = cursor.fetchall()
exchanges[key]['CURRENCY PAIRS'] = [{"code_base": item[1], "code_quote": item[2], "symbol": item[3]} for item in result_currency_pair_exchange]
# TODO: fees
for key, exchange in exchanges.items():
exchanges[key]['FEE'] = {"BUY": 0, "SELL": 0, "LIMIT_BUY": 0, "LIMIT_SELL": 0}
# TODO: thresholds
for key, exchange in exchanges.items():
exchanges[key]['THRESHOLDS'] = {'DEFAULT': -25}
configuration = {item[1]: item[2] for item in result_algo_configuration}
configuration['CURRENCY_PAIRS'] = currency_pairs
configuration['EXCHANGES'] = exchanges
return configuration
except(Exception, psycopg2.Error) as error:
self.logger_wrapper.logger.error(f"Unable to fetch configuration from database", exc_info=True)
with open("example_config.json") as config_file:
configuration = json.load(config_file)
return configuration
| 9,013 | 2,447 |
import numpy as np
population_size = 35000000
admin1_nb = 10
admin1_share = np.array([0.005, 0.020, 0.045, 0.075, 0.095, 0.105, 0.125, 0.130, 0.150, 0.250])
admin1_size = admin1_share * population_size
if sum(admin1_share) != 1.000:
raise AssertionError("The admin level 1 shares must sum to 1")
# admin1 = np.random.choice(
# a=np.linspace(1, admin1_nb, admin1_nb, dtype="int8"), size=population_size, p=admin1_share
# )
admin2_nb = 45
admin2_nb_by_admnin1 = np.array([1, 1, 2, 3, 5, 6, 5, 7, 10, 5])
admin2_share_1 = np.array([1]) * admin1_share[0]
admin2_share_2 = np.array([1]) * admin1_share[1]
admin2_share_3 = np.array([0.3, 0.7]) * admin1_share[2]
admin2_share_4 = np.array([0.4, 0.4, 0.2]) * admin1_share[3]
admin2_share_5 = (np.ones(5) / 5) * admin1_share[4]
admin2_share_6 = (np.ones(6) / 6) * admin1_share[5]
admin2_share_7 = np.linspace(1, 10, 5) / sum(np.linspace(1, 10, 5)) * admin1_share[6]
admin2_share_8 = np.linspace(1, 10, 7) / sum(np.linspace(1, 10, 7)) * admin1_share[7]
admin2_share_9 = np.linspace(1, 10, 10) / sum(np.linspace(1, 10, 10)) * admin1_share[8]
admin2_share_10 = np.linspace(1, 10, 5) / sum(np.linspace(1, 10, 5)) * admin1_share[9]
admin2_share = np.concatenate(
(
admin2_share_1,
admin2_share_2,
admin2_share_3,
admin2_share_4,
admin2_share_5,
admin2_share_6,
admin2_share_7,
admin2_share_8,
admin2_share_9,
admin2_share_10,
)
)
admin2_share = admin2_share / sum(admin2_share)
admin2 = np.random.choice(
a=np.linspace(1, admin2_nb, admin2_nb, dtype="int8"), size=population_size, p=admin2_share,
)
_, size2 = np.unique(admin2, return_counts=True)
# print(size2 / population_size)
# print(admin2)
number_admin3 = 120 # equivalent to health disctrict for this use case
number_admin4 = 550
number_admin5 = 1250
# proportion_female = 0.55
female_age_distribution_urban = {
"0-4": 7.3,
"5-9": 6.6,
"10-14": 5.8,
"15-19": 5.1,
"20-24": 4.4,
"25-29": 3.9,
"30-34": 3.5,
"35-39": 3.0,
"40-44": 2.4,
"45-49": 1.9,
"50-54": 1.6,
"55-59": 1.3,
"60-64": 1.1,
"65-69": 0.8,
"70-74": 0.6,
"75-79": 0.3,
"80-84": 0.2,
"85-89": 0.06,
"90-94": 0.03,
"95-99": 0.01,
"100+": 0.0,
}
male_age_distribution_urban = {
"0-4": 7.5,
"5-9": 6.8,
"10-14": 6.0,
"15-19": 5.2,
"20-24": 4.5,
"25-29": 3.9,
"30-34": 3.5,
"35-39": 2.9,
"40-44": 2.4,
"45-49": 1.9,
"50-54": 1.5,
"55-59": 1.2,
"60-64": 1.0,
"65-69": 0.7,
"70-74": 0.5,
"75-79": 0.3,
"80-84": 0.1,
"85-89": 0.1,
"90-94": 0.05,
"95-99": 0.03,
"100+": 0.02,
}
female_age_distribution_rural = {
"0-4": 7.3,
"5-9": 6.6,
"10-14": 5.8,
"15-19": 5.1,
"20-24": 4.4,
"25-29": 3.9,
"30-34": 3.5,
"35-39": 3.0,
"40-44": 2.4,
"45-49": 1.9,
"50-54": 1.6,
"55-59": 1.3,
"60-64": 1.1,
"65-69": 0.8,
"70-74": 0.6,
"75-79": 0.3,
"80-84": 0.2,
"85-89": 0.06,
"90-94": 0.03,
"95-99": 0.01,
"100+": 0.0,
}
male_age_distribution_rural = {
"0-4": 7.5,
"5-9": 6.8,
"10-14": 6.0,
"15-19": 5.2,
"20-24": 4.5,
"25-29": 3.9,
"30-34": 3.5,
"35-39": 2.9,
"40-44": 2.4,
"45-49": 1.9,
"50-54": 1.5,
"55-59": 1.2,
"60-64": 1.0,
"65-69": 0.7,
"70-74": 0.5,
"75-79": 0.3,
"80-84": 0.1,
"85-89": 0.1,
"90-94": 0.05,
"95-99": 0.03,
"100+": 0.02,
}
# print(sum(list(female_age_distribution.values())))
# print(sum(list(male_age_distribution.values())))
# print(np.random.choice((1, 2, 3), size=150000, p=(0.1, 0.2, 0.7)))
| 3,705 | 2,212 |
import sys
import yaml
def persona(old, new, overwrite_language):
old_t = old['translations']
new_t = new['translations']
for key in old_t:
if key in new_t and overwrite_language in new_t[key]:
old_t[key][overwrite_language] = new_t[key][overwrite_language]
def questions(old, new, overwrite_language):
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if overwrite_language in n['explanation']:
o['explanation'][overwrite_language] = n['explanation'][overwrite_language]
if overwrite_language in n['explanationmore']:
o['explanationmore'][overwrite_language] = n['explanationmore'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
def main(mode, base_file, new_file=None, overwrite_language=None):
old = yaml.load(file(base_file).read())
if new_file is not None and overwrite_language is not None:
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
if mode == 'persona':
persona(old, new, overwrite_language)
elif mode == 'questions':
questions(old, new, overwrite_language)
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(*sys.argv[1:])
| 1,656 | 524 |
from django.views.generic import TemplateView, CreateView
from django.urls import reverse_lazy
from .models import Contact
class HomeView(TemplateView):
template_name = 'page/home.html'
class AboutView(TemplateView):
template_name = 'page/about.html'
class ContactView(CreateView):
template_name = 'page/contact.html'
success_url = '/'
model = Contact
fields = ('firstname', 'lastname', 'phone', 'email', 'infromation')
| 449 | 134 |