text stringlengths 1 1.05M |
|---|
from typing import List
def calculate_coverage_percentage(coverage_info: List[str]) -> float:
total_lines = len(coverage_info)
covered_lines = sum(1 for line in coverage_info if line.strip() and line.strip() != "#####")
coverage_percentage = (covered_lines / total_lines) * 100
return coverage_percentage |
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Ads
module GoogleAds
module V8
module Errors
# Describes how a GoogleAds API call failed. It's returned inside
# google.rpc.Status.details when a call fails.
# @!attribute [rw] errors
# @return [::Array<::Google::Ads::GoogleAds::V8::Errors::GoogleAdsError>]
# The list of errors that occurred.
# @!attribute [rw] request_id
# @return [::String]
# The unique id of the request that is used for debugging purposes.
class GoogleAdsFailure
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# GoogleAds-specific error.
# @!attribute [rw] error_code
# @return [::Google::Ads::GoogleAds::V8::Errors::ErrorCode]
# An enum value that indicates which error occurred.
# @!attribute [rw] message
# @return [::String]
# A human-readable description of the error.
# @!attribute [rw] trigger
# @return [::Google::Ads::GoogleAds::V8::Common::Value]
# The value that triggered the error.
# @!attribute [rw] location
# @return [::Google::Ads::GoogleAds::V8::Errors::ErrorLocation]
# Describes the part of the request proto that caused the error.
# @!attribute [rw] details
# @return [::Google::Ads::GoogleAds::V8::Errors::ErrorDetails]
# Additional error details, which are returned by certain error codes. Most
# error codes do not include details.
class GoogleAdsError
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The error reason represented by type and enum.
# @!attribute [rw] request_error
# @return [::Google::Ads::GoogleAds::V8::Errors::RequestErrorEnum::RequestError]
# An error caused by the request
# @!attribute [rw] bidding_strategy_error
# @return [::Google::Ads::GoogleAds::V8::Errors::BiddingStrategyErrorEnum::BiddingStrategyError]
# An error with a Bidding Strategy mutate.
# @!attribute [rw] url_field_error
# @return [::Google::Ads::GoogleAds::V8::Errors::UrlFieldErrorEnum::UrlFieldError]
# An error with a URL field mutate.
# @!attribute [rw] list_operation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ListOperationErrorEnum::ListOperationError]
# An error with a list operation.
# @!attribute [rw] query_error
# @return [::Google::Ads::GoogleAds::V8::Errors::QueryErrorEnum::QueryError]
# An error with an AWQL query
# @!attribute [rw] mutate_error
# @return [::Google::Ads::GoogleAds::V8::Errors::MutateErrorEnum::MutateError]
# An error with a mutate
# @!attribute [rw] field_mask_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FieldMaskErrorEnum::FieldMaskError]
# An error with a field mask
# @!attribute [rw] authorization_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AuthorizationErrorEnum::AuthorizationError]
# An error encountered when trying to authorize a user.
# @!attribute [rw] internal_error
# @return [::Google::Ads::GoogleAds::V8::Errors::InternalErrorEnum::InternalError]
# An unexpected server-side error.
# @!attribute [rw] quota_error
# @return [::Google::Ads::GoogleAds::V8::Errors::QuotaErrorEnum::QuotaError]
# An error with the amonut of quota remaining.
# @!attribute [rw] ad_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdErrorEnum::AdError]
# An error with an Ad Group Ad mutate.
# @!attribute [rw] ad_group_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdGroupErrorEnum::AdGroupError]
# An error with an Ad Group mutate.
# @!attribute [rw] campaign_budget_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignBudgetErrorEnum::CampaignBudgetError]
# An error with a Campaign Budget mutate.
# @!attribute [rw] campaign_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignErrorEnum::CampaignError]
# An error with a Campaign mutate.
# @!attribute [rw] authentication_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AuthenticationErrorEnum::AuthenticationError]
# Indicates failure to properly authenticate user.
# @!attribute [rw] ad_group_criterion_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdGroupCriterionErrorEnum::AdGroupCriterionError]
# Indicates failure to properly authenticate user.
# @!attribute [rw] ad_customizer_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdCustomizerErrorEnum::AdCustomizerError]
# The reasons for the ad customizer error
# @!attribute [rw] ad_group_ad_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdGroupAdErrorEnum::AdGroupAdError]
# The reasons for the ad group ad error
# @!attribute [rw] ad_sharing_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdSharingErrorEnum::AdSharingError]
# The reasons for the ad sharing error
# @!attribute [rw] adx_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdxErrorEnum::AdxError]
# The reasons for the adx error
# @!attribute [rw] asset_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AssetErrorEnum::AssetError]
# The reasons for the asset error
# @!attribute [rw] bidding_error
# @return [::Google::Ads::GoogleAds::V8::Errors::BiddingErrorEnum::BiddingError]
# The reasons for the bidding errors
# @!attribute [rw] campaign_criterion_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignCriterionErrorEnum::CampaignCriterionError]
# The reasons for the campaign criterion error
# @!attribute [rw] collection_size_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CollectionSizeErrorEnum::CollectionSizeError]
# The reasons for the collection size error
# @!attribute [rw] country_code_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CountryCodeErrorEnum::CountryCodeError]
# The reasons for the country code error
# @!attribute [rw] criterion_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CriterionErrorEnum::CriterionError]
# The reasons for the criterion error
# @!attribute [rw] customer_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomerErrorEnum::CustomerError]
# The reasons for the customer error
# @!attribute [rw] date_error
# @return [::Google::Ads::GoogleAds::V8::Errors::DateErrorEnum::DateError]
# The reasons for the date error
# @!attribute [rw] date_range_error
# @return [::Google::Ads::GoogleAds::V8::Errors::DateRangeErrorEnum::DateRangeError]
# The reasons for the date range error
# @!attribute [rw] distinct_error
# @return [::Google::Ads::GoogleAds::V8::Errors::DistinctErrorEnum::DistinctError]
# The reasons for the distinct error
# @!attribute [rw] feed_attribute_reference_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedAttributeReferenceErrorEnum::FeedAttributeReferenceError]
# The reasons for the feed attribute reference error
# @!attribute [rw] function_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FunctionErrorEnum::FunctionError]
# The reasons for the function error
# @!attribute [rw] function_parsing_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FunctionParsingErrorEnum::FunctionParsingError]
# The reasons for the function parsing error
# @!attribute [rw] id_error
# @return [::Google::Ads::GoogleAds::V8::Errors::IdErrorEnum::IdError]
# The reasons for the id error
# @!attribute [rw] image_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ImageErrorEnum::ImageError]
# The reasons for the image error
# @!attribute [rw] language_code_error
# @return [::Google::Ads::GoogleAds::V8::Errors::LanguageCodeErrorEnum::LanguageCodeError]
# The reasons for the language code error
# @!attribute [rw] media_bundle_error
# @return [::Google::Ads::GoogleAds::V8::Errors::MediaBundleErrorEnum::MediaBundleError]
# The reasons for the media bundle error
# @!attribute [rw] media_upload_error
# @return [::Google::Ads::GoogleAds::V8::Errors::MediaUploadErrorEnum::MediaUploadError]
# The reasons for media uploading errors.
# @!attribute [rw] media_file_error
# @return [::Google::Ads::GoogleAds::V8::Errors::MediaFileErrorEnum::MediaFileError]
# The reasons for the media file error
# @!attribute [rw] multiplier_error
# @return [::Google::Ads::GoogleAds::V8::Errors::MultiplierErrorEnum::MultiplierError]
# The reasons for the multiplier error
# @!attribute [rw] new_resource_creation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::NewResourceCreationErrorEnum::NewResourceCreationError]
# The reasons for the new resource creation error
# @!attribute [rw] not_empty_error
# @return [::Google::Ads::GoogleAds::V8::Errors::NotEmptyErrorEnum::NotEmptyError]
# The reasons for the not empty error
# @!attribute [rw] null_error
# @return [::Google::Ads::GoogleAds::V8::Errors::NullErrorEnum::NullError]
# The reasons for the null error
# @!attribute [rw] operator_error
# @return [::Google::Ads::GoogleAds::V8::Errors::OperatorErrorEnum::OperatorError]
# The reasons for the operator error
# @!attribute [rw] range_error
# @return [::Google::Ads::GoogleAds::V8::Errors::RangeErrorEnum::RangeError]
# The reasons for the range error
# @!attribute [rw] recommendation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::RecommendationErrorEnum::RecommendationError]
# The reasons for error in applying a recommendation
# @!attribute [rw] region_code_error
# @return [::Google::Ads::GoogleAds::V8::Errors::RegionCodeErrorEnum::RegionCodeError]
# The reasons for the region code error
# @!attribute [rw] setting_error
# @return [::Google::Ads::GoogleAds::V8::Errors::SettingErrorEnum::SettingError]
# The reasons for the setting error
# @!attribute [rw] string_format_error
# @return [::Google::Ads::GoogleAds::V8::Errors::StringFormatErrorEnum::StringFormatError]
# The reasons for the string format error
# @!attribute [rw] string_length_error
# @return [::Google::Ads::GoogleAds::V8::Errors::StringLengthErrorEnum::StringLengthError]
# The reasons for the string length error
# @!attribute [rw] operation_access_denied_error
# @return [::Google::Ads::GoogleAds::V8::Errors::OperationAccessDeniedErrorEnum::OperationAccessDeniedError]
# The reasons for the operation access denied error
# @!attribute [rw] resource_access_denied_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ResourceAccessDeniedErrorEnum::ResourceAccessDeniedError]
# The reasons for the resource access denied error
# @!attribute [rw] resource_count_limit_exceeded_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ResourceCountLimitExceededErrorEnum::ResourceCountLimitExceededError]
# The reasons for the resource count limit exceeded error
# @!attribute [rw] youtube_video_registration_error
# @return [::Google::Ads::GoogleAds::V8::Errors::YoutubeVideoRegistrationErrorEnum::YoutubeVideoRegistrationError]
# The reasons for YouTube video registration errors.
# @!attribute [rw] ad_group_bid_modifier_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdGroupBidModifierErrorEnum::AdGroupBidModifierError]
# The reasons for the ad group bid modifier error
# @!attribute [rw] context_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ContextErrorEnum::ContextError]
# The reasons for the context error
# @!attribute [rw] field_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FieldErrorEnum::FieldError]
# The reasons for the field error
# @!attribute [rw] shared_set_error
# @return [::Google::Ads::GoogleAds::V8::Errors::SharedSetErrorEnum::SharedSetError]
# The reasons for the shared set error
# @!attribute [rw] shared_criterion_error
# @return [::Google::Ads::GoogleAds::V8::Errors::SharedCriterionErrorEnum::SharedCriterionError]
# The reasons for the shared criterion error
# @!attribute [rw] campaign_shared_set_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignSharedSetErrorEnum::CampaignSharedSetError]
# The reasons for the campaign shared set error
# @!attribute [rw] conversion_action_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionActionErrorEnum::ConversionActionError]
# The reasons for the conversion action error
# @!attribute [rw] conversion_adjustment_upload_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionAdjustmentUploadErrorEnum::ConversionAdjustmentUploadError]
# The reasons for the conversion adjustment upload error
# @!attribute [rw] conversion_custom_variable_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionCustomVariableErrorEnum::ConversionCustomVariableError]
# The reasons for the conversion custom variable error
# @!attribute [rw] conversion_upload_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionUploadErrorEnum::ConversionUploadError]
# The reasons for the conversion upload error
# @!attribute [rw] conversion_value_rule_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionValueRuleErrorEnum::ConversionValueRuleError]
# The reasons for the conversion value rule error
# @!attribute [rw] conversion_value_rule_set_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ConversionValueRuleSetErrorEnum::ConversionValueRuleSetError]
# The reasons for the conversion value rule set error
# @!attribute [rw] header_error
# @return [::Google::Ads::GoogleAds::V8::Errors::HeaderErrorEnum::HeaderError]
# The reasons for the header error.
# @!attribute [rw] database_error
# @return [::Google::Ads::GoogleAds::V8::Errors::DatabaseErrorEnum::DatabaseError]
# The reasons for the database error.
# @!attribute [rw] policy_finding_error
# @return [::Google::Ads::GoogleAds::V8::Errors::PolicyFindingErrorEnum::PolicyFindingError]
# The reasons for the policy finding error.
# @!attribute [rw] enum_error
# @return [::Google::Ads::GoogleAds::V8::Errors::EnumErrorEnum::EnumError]
# The reason for enum error.
# @!attribute [rw] keyword_plan_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanErrorEnum::KeywordPlanError]
# The reason for keyword plan error.
# @!attribute [rw] keyword_plan_campaign_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanCampaignErrorEnum::KeywordPlanCampaignError]
# The reason for keyword plan campaign error.
# @!attribute [rw] keyword_plan_campaign_keyword_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanCampaignKeywordErrorEnum::KeywordPlanCampaignKeywordError]
# The reason for keyword plan campaign keyword error.
# @!attribute [rw] keyword_plan_ad_group_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanAdGroupErrorEnum::KeywordPlanAdGroupError]
# The reason for keyword plan ad group error.
# @!attribute [rw] keyword_plan_ad_group_keyword_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanAdGroupKeywordErrorEnum::KeywordPlanAdGroupKeywordError]
# The reason for keyword plan ad group keyword error.
# @!attribute [rw] keyword_plan_idea_error
# @return [::Google::Ads::GoogleAds::V8::Errors::KeywordPlanIdeaErrorEnum::KeywordPlanIdeaError]
# The reason for keyword idea error.
# @!attribute [rw] account_budget_proposal_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AccountBudgetProposalErrorEnum::AccountBudgetProposalError]
# The reasons for account budget proposal errors.
# @!attribute [rw] user_list_error
# @return [::Google::Ads::GoogleAds::V8::Errors::UserListErrorEnum::UserListError]
# The reasons for the user list error
# @!attribute [rw] change_event_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ChangeEventErrorEnum::ChangeEventError]
# The reasons for the change event error
# @!attribute [rw] change_status_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ChangeStatusErrorEnum::ChangeStatusError]
# The reasons for the change status error
# @!attribute [rw] feed_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedErrorEnum::FeedError]
# The reasons for the feed error
# @!attribute [rw] geo_target_constant_suggestion_error
# @return [::Google::Ads::GoogleAds::V8::Errors::GeoTargetConstantSuggestionErrorEnum::GeoTargetConstantSuggestionError]
# The reasons for the geo target constant suggestion error.
# @!attribute [rw] campaign_draft_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignDraftErrorEnum::CampaignDraftError]
# The reasons for the campaign draft error
# @!attribute [rw] feed_item_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedItemErrorEnum::FeedItemError]
# The reasons for the feed item error
# @!attribute [rw] label_error
# @return [::Google::Ads::GoogleAds::V8::Errors::LabelErrorEnum::LabelError]
# The reason for the label error.
# @!attribute [rw] billing_setup_error
# @return [::Google::Ads::GoogleAds::V8::Errors::BillingSetupErrorEnum::BillingSetupError]
# The reasons for the billing setup error
# @!attribute [rw] customer_client_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomerClientLinkErrorEnum::CustomerClientLinkError]
# The reasons for the customer client link error
# @!attribute [rw] customer_manager_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomerManagerLinkErrorEnum::CustomerManagerLinkError]
# The reasons for the customer manager link error
# @!attribute [rw] feed_mapping_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedMappingErrorEnum::FeedMappingError]
# The reasons for the feed mapping error
# @!attribute [rw] customer_feed_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomerFeedErrorEnum::CustomerFeedError]
# The reasons for the customer feed error
# @!attribute [rw] ad_group_feed_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdGroupFeedErrorEnum::AdGroupFeedError]
# The reasons for the ad group feed error
# @!attribute [rw] campaign_feed_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignFeedErrorEnum::CampaignFeedError]
# The reasons for the campaign feed error
# @!attribute [rw] custom_interest_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomInterestErrorEnum::CustomInterestError]
# The reasons for the custom interest error
# @!attribute [rw] campaign_experiment_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CampaignExperimentErrorEnum::CampaignExperimentError]
# The reasons for the campaign experiment error
# @!attribute [rw] extension_feed_item_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ExtensionFeedItemErrorEnum::ExtensionFeedItemError]
# The reasons for the extension feed item error
# @!attribute [rw] ad_parameter_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AdParameterErrorEnum::AdParameterError]
# The reasons for the ad parameter error
# @!attribute [rw] feed_item_validation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedItemValidationErrorEnum::FeedItemValidationError]
# The reasons for the feed item validation error
# @!attribute [rw] extension_setting_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ExtensionSettingErrorEnum::ExtensionSettingError]
# The reasons for the extension setting error
# @!attribute [rw] feed_item_set_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedItemSetErrorEnum::FeedItemSetError]
# The reasons for the feed item set error
# @!attribute [rw] feed_item_set_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedItemSetLinkErrorEnum::FeedItemSetLinkError]
# The reasons for the feed item set link error
# @!attribute [rw] feed_item_target_error
# @return [::Google::Ads::GoogleAds::V8::Errors::FeedItemTargetErrorEnum::FeedItemTargetError]
# The reasons for the feed item target error
# @!attribute [rw] policy_violation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::PolicyViolationErrorEnum::PolicyViolationError]
# The reasons for the policy violation error
# @!attribute [rw] partial_failure_error
# @return [::Google::Ads::GoogleAds::V8::Errors::PartialFailureErrorEnum::PartialFailureError]
# The reasons for the mutate job error
# @!attribute [rw] policy_validation_parameter_error
# @return [::Google::Ads::GoogleAds::V8::Errors::PolicyValidationParameterErrorEnum::PolicyValidationParameterError]
# The reasons for the policy validation parameter error
# @!attribute [rw] size_limit_error
# @return [::Google::Ads::GoogleAds::V8::Errors::SizeLimitErrorEnum::SizeLimitError]
# The reasons for the size limit error
# @!attribute [rw] offline_user_data_job_error
# @return [::Google::Ads::GoogleAds::V8::Errors::OfflineUserDataJobErrorEnum::OfflineUserDataJobError]
# The reasons for the offline user data job error.
# @!attribute [rw] not_allowlisted_error
# @return [::Google::Ads::GoogleAds::V8::Errors::NotAllowlistedErrorEnum::NotAllowlistedError]
# The reasons for the not allowlisted error
# @!attribute [rw] manager_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ManagerLinkErrorEnum::ManagerLinkError]
# The reasons for the manager link error
# @!attribute [rw] currency_code_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CurrencyCodeErrorEnum::CurrencyCodeError]
# The reasons for the currency code error
# @!attribute [rw] access_invitation_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AccessInvitationErrorEnum::AccessInvitationError]
# The reasons for the access invitation error
# @!attribute [rw] reach_plan_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ReachPlanErrorEnum::ReachPlanError]
# The reasons for the reach plan error
# @!attribute [rw] invoice_error
# @return [::Google::Ads::GoogleAds::V8::Errors::InvoiceErrorEnum::InvoiceError]
# The reasons for the invoice error
# @!attribute [rw] payments_account_error
# @return [::Google::Ads::GoogleAds::V8::Errors::PaymentsAccountErrorEnum::PaymentsAccountError]
# The reasons for errors in payments accounts service
# @!attribute [rw] time_zone_error
# @return [::Google::Ads::GoogleAds::V8::Errors::TimeZoneErrorEnum::TimeZoneError]
# The reasons for the time zone error
# @!attribute [rw] asset_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AssetLinkErrorEnum::AssetLinkError]
# The reasons for the asset link error
# @!attribute [rw] user_data_error
# @return [::Google::Ads::GoogleAds::V8::Errors::UserDataErrorEnum::UserDataError]
# The reasons for the user data error.
# @!attribute [rw] batch_job_error
# @return [::Google::Ads::GoogleAds::V8::Errors::BatchJobErrorEnum::BatchJobError]
# The reasons for the batch job error
# @!attribute [rw] account_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::AccountLinkErrorEnum::AccountLinkError]
# The reasons for the account link status change error
# @!attribute [rw] third_party_app_analytics_link_error
# @return [::Google::Ads::GoogleAds::V8::Errors::ThirdPartyAppAnalyticsLinkErrorEnum::ThirdPartyAppAnalyticsLinkError]
# The reasons for the third party app analytics link mutate error
# @!attribute [rw] customer_user_access_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomerUserAccessErrorEnum::CustomerUserAccessError]
# The reasons for the customer user access mutate error
# @!attribute [rw] custom_audience_error
# @return [::Google::Ads::GoogleAds::V8::Errors::CustomAudienceErrorEnum::CustomAudienceError]
# The reasons for the custom audience error
class ErrorCode
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Describes the part of the request proto that caused the error.
# @!attribute [rw] field_path_elements
# @return [::Array<::Google::Ads::GoogleAds::V8::Errors::ErrorLocation::FieldPathElement>]
# A field path that indicates which field was invalid in the request.
class ErrorLocation
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# A part of a field path.
# @!attribute [rw] field_name
# @return [::String]
# The name of a field or a oneof
# @!attribute [rw] index
# @return [::Integer]
# If field_name is a repeated field, this is the element that failed
class FieldPathElement
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Additional error details.
# @!attribute [rw] unpublished_error_code
# @return [::String]
# The error code that should have been returned, but wasn't. This is used
# when the error code is not published in the client specified version.
# @!attribute [rw] policy_violation_details
# @return [::Google::Ads::GoogleAds::V8::Errors::PolicyViolationDetails]
# Describes an ad policy violation.
# @!attribute [rw] policy_finding_details
# @return [::Google::Ads::GoogleAds::V8::Errors::PolicyFindingDetails]
# Describes policy violation findings.
# @!attribute [rw] quota_error_details
# @return [::Google::Ads::GoogleAds::V8::Errors::QuotaErrorDetails]
# Details on the quota error, including the scope (account or developer), the
# rate bucket name and the retry delay.
# @!attribute [rw] resource_count_details
# @return [::Google::Ads::GoogleAds::V8::Errors::ResourceCountDetails]
# Details for a resource count limit exceeded error.
class ErrorDetails
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Error returned as part of a mutate response.
# This error indicates single policy violation by some text
# in one of the fields.
# @!attribute [rw] external_policy_description
# @return [::String]
# Human readable description of policy violation.
# @!attribute [rw] key
# @return [::Google::Ads::GoogleAds::V8::Common::PolicyViolationKey]
# Unique identifier for this violation.
# If policy is exemptible, this key may be used to request exemption.
# @!attribute [rw] external_policy_name
# @return [::String]
# Human readable name of the policy.
# @!attribute [rw] is_exemptible
# @return [::Boolean]
# Whether user can file an exemption request for this violation.
class PolicyViolationDetails
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Error returned as part of a mutate response.
# This error indicates one or more policy findings in the fields of a
# resource.
# @!attribute [rw] policy_topic_entries
# @return [::Array<::Google::Ads::GoogleAds::V8::Common::PolicyTopicEntry>]
# The list of policy topics for the resource. Contains the PROHIBITED or
# FULLY_LIMITED policy topic entries that prevented the resource from being
# saved (among any other entries the resource may also have).
class PolicyFindingDetails
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Additional quota error details when there is QuotaError.
# @!attribute [rw] rate_scope
# @return [::Google::Ads::GoogleAds::V8::Errors::QuotaErrorDetails::QuotaRateScope]
# The rate scope of the quota limit.
# @!attribute [rw] rate_name
# @return [::String]
# The high level description of the quota bucket.
# Examples are "Get requests for standard access" or "Requests per account".
# @!attribute [rw] retry_delay
# @return [::Google::Protobuf::Duration]
# Backoff period that customers should wait before sending next request.
class QuotaErrorDetails
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Enum of possible scopes that quota buckets belong to.
module QuotaRateScope
# Unspecified enum
UNSPECIFIED = 0
# Used for return value only. Represents value unknown in this version.
UNKNOWN = 1
# Per customer account quota
ACCOUNT = 2
# Per project or DevToken quota
DEVELOPER = 3
end
end
# Error details returned when an resource count limit was exceeded.
# @!attribute [rw] enclosing_id
# @return [::String]
# The ID of the resource whose limit was exceeded.
# External customer ID if the limit is for a customer.
# @!attribute [rw] enclosing_resource
# @return [::String]
# The name of the resource (Customer, Campaign etc.) whose limit was
# exceeded.
# @!attribute [rw] limit
# @return [::Integer]
# The limit which was exceeded.
# @!attribute [rw] limit_type
# @return [::Google::Ads::GoogleAds::V8::Enums::ResourceLimitTypeEnum::ResourceLimitType]
# The resource limit type which was exceeded.
# @!attribute [rw] existing_count
# @return [::Integer]
# The count of existing entities.
class ResourceCountDetails
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
end
end
end
end
|
<filename>local/in/dhis-mobile/dhis-service-mobile/src/main/java/org/hisp/dhis/mobile/api/DefaultMobileImportService.java
/*
* Copyright (c) 2004-2007, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hisp.dhis.mobile.api;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.amplecode.quick.BatchHandlerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hisp.dhis.dataelement.DataElement;
import org.hisp.dhis.dataelement.DataElementCategoryOptionCombo;
import org.hisp.dhis.dataelement.DataElementCategoryService;
import org.hisp.dhis.dataelement.DataElementService;
import org.hisp.dhis.datavalue.DataValue;
import org.hisp.dhis.datavalue.DataValueService;
import org.hisp.dhis.external.location.LocationManager;
import org.hisp.dhis.mobile.SmsService;
import org.hisp.dhis.organisationunit.OrganisationUnit;
import org.hisp.dhis.organisationunit.OrganisationUnitService;
import org.hisp.dhis.period.DailyPeriodType;
import org.hisp.dhis.period.MonthlyPeriodType;
import org.hisp.dhis.period.Period;
import org.hisp.dhis.period.PeriodService;
import org.hisp.dhis.period.PeriodType;
import org.hisp.dhis.period.WeeklyPeriodType;
import org.hisp.dhis.period.YearlyPeriodType;
import org.hisp.dhis.user.User;
import org.hisp.dhis.user.UserCredentials;
import org.hisp.dhis.user.UserStore;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.support.rowset.SqlRowSet;
import org.springframework.transaction.annotation.Transactional;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
public class DefaultMobileImportService
implements MobileImportService
{
private static final Log LOG = LogFactory.getLog( DefaultMobileImportService.class );
// -------------------------------------------------------------------------
// Dependencies
// -------------------------------------------------------------------------
private SendSMSService sendSMSService;
public void setSendSMSService( SendSMSService sendSMSService )
{
this.sendSMSService = sendSMSService;
}
private ReceiveSMSService receiveSMSService;
public void setReceiveSMSService( ReceiveSMSService receiveSMSService )
{
this.receiveSMSService = receiveSMSService;
}
SmsService smsService;
public void setSmsService( SmsService smsService )
{
this.smsService = smsService;
}
private JdbcTemplate jdbcTemplate;
public void setJdbcTemplate( JdbcTemplate jdbcTemplate )
{
this.jdbcTemplate = jdbcTemplate;
}
private LocationManager locationManager;
public void setLocationManager( LocationManager locationManager )
{
this.locationManager = locationManager;
}
private UserStore userStore;
public void setUserStore( UserStore userStore )
{
this.userStore = userStore;
}
private PeriodService periodService;
public void setPeriodService( PeriodService periodService )
{
this.periodService = periodService;
}
private DataElementService dataElementService;
public void setDataElementService( DataElementService dataElementService )
{
this.dataElementService = dataElementService;
}
private DataValueService dataValueService;
public void setDataValueService( DataValueService dataValueService )
{
this.dataValueService = dataValueService;
}
private DataElementCategoryService dataElementCategoryService;
public void setDataElementCategoryService( DataElementCategoryService dataElementCategoryService )
{
this.dataElementCategoryService = dataElementCategoryService;
}
private OrganisationUnitService organisationUnitService;
public void setOrganisationUnitService( OrganisationUnitService organisationUnitService )
{
this.organisationUnitService = organisationUnitService;
}
private BatchHandlerFactory batchHandlerFactory;
public void setBatchHandlerFactory( BatchHandlerFactory batchHandlerFactory )
{
this.batchHandlerFactory = batchHandlerFactory;
}
// -------------------------------------------------------------------------
// Parameters
// -------------------------------------------------------------------------
private String storedBy;
// -------------------------------------------------------------------------
// Services
// -------------------------------------------------------------------------
@Override
public void readAllMessages()
{
smsService.readAllMessages();
System.out.println( "Message reading done" );
}
@Override
public User getUserInfo( String mobileNumber )
{
Collection<User> userList = userStore.getUsersByPhoneNumber( mobileNumber );
User selectedUser = null;
if ( userList != null && userList.size() > 0 )
{
selectedUser = userList.iterator().next();
}
return selectedUser;
}
@Override
public Period getPeriodInfo( String startDate, String periodType ) throws Exception
{
SimpleDateFormat dateFormat = new SimpleDateFormat( "yyyy-MM-dd" );
List<Period> periods = null;
PeriodType pt = null;
if ( periodType.equals( "3" ) )
{
pt = new MonthlyPeriodType();
periods = new ArrayList<Period>( periodService.getPeriodsByPeriodType( pt ) );
} else
{
if ( periodType.equals( "1" ) )
{
pt = new DailyPeriodType();
periods = new ArrayList<Period>( periodService.getPeriodsByPeriodType( pt ) );
} else
{
if ( periodType.equals( "6" ) )
{
pt = new YearlyPeriodType();
periods = new ArrayList<Period>( periodService.getPeriodsByPeriodType( pt ) );
} else
{
if ( periodType.equals( "2" ) )
{
pt = new WeeklyPeriodType();
periods = new ArrayList<Period>( periodService.getPeriodsByPeriodType( pt ) );
}
}
}
}
for ( Period period : periods )
{
String tempDate = dateFormat.format( period.getStartDate() );
if ( tempDate.equalsIgnoreCase( startDate ) )
{
return period;
}
}
Period period = pt.createPeriod( dateFormat.parse( startDate ) );
period = reloadPeriodForceAdd( period );
periodService.addPeriod( period );
return period;
}
private final Period reloadPeriod( Period period )
{
return periodService.getPeriod( period.getStartDate(), period.getEndDate(), period.getPeriodType() );
}
private final Period reloadPeriodForceAdd( Period period )
{
Period storedPeriod = reloadPeriod( period );
if ( storedPeriod == null )
{
periodService.addPeriod( period );
return period;
}
return storedPeriod;
}
@Override
public MobileImportParameters getParametersFromXML( String fileName )
throws Exception
{
File importFile = locationManager.getFileForReading( fileName, "mi", "pending" );
String mobileNumber;
String smsTime;
String startDate;
String periodType;
String formType;
String anmName;
String anmQuery;
String tempDeid;
String tempDataValue;
Map<String, String> dataValues = new HashMap<String, String>();
MobileImportParameters mobileImportParameters = new MobileImportParameters();
try
{
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder();
Document doc = docBuilder.parse( importFile );
if ( doc == null )
{
return null;
}
// To get Mobile Number
NodeList sourceInfo = doc.getElementsByTagName( "source" );
Element sourceInfoElement = (Element) sourceInfo.item( 0 );
NodeList textsourceInfoNameList = sourceInfoElement.getChildNodes();
mobileNumber = textsourceInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setMobileNumber( mobileNumber );
// To get Period
NodeList periodInfo = doc.getElementsByTagName( "period" );
Element periodInfoElement = (Element) periodInfo.item( 0 );
NodeList textperiodInfoNameList = periodInfoElement.getChildNodes();
startDate = textperiodInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setStartDate( startDate );
// To get TimeStamp
NodeList timeStampInfo = doc.getElementsByTagName( "timeStamp" );
Element timeStampInfoElement = (Element) timeStampInfo.item( 0 );
NodeList texttimeStampInfoNameList = timeStampInfoElement.getChildNodes();
smsTime = texttimeStampInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setSmsTime( smsTime );
// To get PeriodType
NodeList periodTypeInfo = doc.getElementsByTagName( "periodType" );
Element periodTypeInfoElement = (Element) periodTypeInfo.item( 0 );
NodeList textPeriodTypeInfoNameList = periodTypeInfoElement.getChildNodes();
periodType = textPeriodTypeInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setPeriodType( periodType );
// To get FormType
NodeList formTypeInfo = doc.getElementsByTagName( "formtype" );
Element formTypeInfoElement = (Element) formTypeInfo.item( 0 );
NodeList formTypeInfoNameList = formTypeInfoElement.getChildNodes();
formType = formTypeInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setFormType( formType );
if ( formType.equalsIgnoreCase( MobileImportParameters.FORM_TYPE_DATAFORM ) )
{
NodeList listOfDataValues = doc.getElementsByTagName( "dataValue" );
int totalDataValues = listOfDataValues.getLength();
for ( int s = 0; s < totalDataValues; s++ )
{
Node dataValueNode = listOfDataValues.item( s );
if ( dataValueNode.getNodeType() == Node.ELEMENT_NODE )
{
Element dataValueElement = (Element) dataValueNode;
NodeList dataElementIdList = dataValueElement.getElementsByTagName( "dataElement" );
Element dataElementElement = (Element) dataElementIdList.item( 0 );
NodeList textdataElementIdList = dataElementElement.getChildNodes();
tempDeid = textdataElementIdList.item( 0 ).getNodeValue().trim();
NodeList valueList = dataValueElement.getElementsByTagName( "value" );
Element valueElement = (Element) valueList.item( 0 );
NodeList textValueElementList = valueElement.getChildNodes();
tempDataValue = textValueElementList.item( 0 ).getNodeValue();
String tempDeID = tempDeid;
// Integer tempDV = Integer.parseInt( tempDataValue );
dataValues.put( tempDeID, tempDataValue );
}
}// end of for loop with s var
mobileImportParameters.setDataValues( dataValues );
} else
{
if ( formType.equalsIgnoreCase( MobileImportParameters.FORM_TYPE_ANMREGFORM ) )
{
// To get ANM Name
NodeList anmNameInfo = doc.getElementsByTagName( "anmname" );
Element anmNameInfoElement = (Element) anmNameInfo.item( 0 );
NodeList anmNameInfoNameList = anmNameInfoElement.getChildNodes();
anmName = anmNameInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setAnmName( anmName );
} else
{
if ( formType.equalsIgnoreCase( MobileImportParameters.FORM_TYPE_ANMQUERYFORM ) )
{
// To get ANM Query
NodeList anmQueryInfo = doc.getElementsByTagName( "anmquery" );
Element anmQueryInfoElement = (Element) anmQueryInfo.item( 0 );
NodeList anmQueryInfoNameList = anmQueryInfoElement.getChildNodes();
anmQuery = anmQueryInfoNameList.item( 0 ).getNodeValue().trim();
mobileImportParameters.setAnmQuery( anmQuery );
}
}
}
}// try block end
catch ( SAXParseException err )
{
System.out.println( "** Parsing error" + ", line " + err.getLineNumber() + ", uri " + err.getSystemId() );
System.out.println( " " + err.getMessage() );
} catch ( SAXException e )
{
Exception x = e.getException();
( ( x == null ) ? e : x ).printStackTrace();
} catch ( Throwable t )
{
t.printStackTrace();
}
return mobileImportParameters;
}// getParametersFromXML end
@Override
public List<String> getImportFiles()
{
List<String> fileNames = new ArrayList<String>();
try
{
String importFolderPath = System.getProperty( "user.home" ) + File.separator + "dhis" + File.separator
+ "mi" + File.separator + "pending";
String newpath = System.getenv( "DHIS2_HOME" );
if ( newpath != null )
{
importFolderPath = newpath + File.separator + "mi" + File.separator + "pending";
}
File dir = new File( importFolderPath );
String[] files = dir.list();
fileNames = Arrays.asList( files );
} catch ( Exception e )
{
System.out.println( e.getMessage() );
}
return fileNames;
}
public int moveFile( File source, File dest )
throws IOException
{
if ( !dest.exists() )
{
dest.createNewFile();
}
InputStream in = null;
OutputStream out = null;
try
{
in = new FileInputStream( source );
out = new FileOutputStream( dest );
byte[] buf = new byte[1024];
int len;
while ( ( len = in.read( buf ) ) > 0 )
{
out.write( buf, 0, len );
}
} catch ( Exception e )
{
return -1;
} finally
{
in.close();
out.close();
}
return 1;
}
@Override
public void moveImportedFile( String fileName )
{
try
{
String sourceFilePath = System.getProperty( "user.home" ) + File.separator + "dhis" + File.separator + "mi"
+ File.separator + "pending" + File.separator + fileName;
String destFilePath = System.getProperty( "user.home" ) + File.separator + "dhis" + File.separator + "mi"
+ File.separator + "completed" + File.separator + fileName;
String newpath = System.getenv( "DHIS2_HOME" );
if ( newpath != null )
{
sourceFilePath = newpath + File.separator + "mi" + File.separator + "pending" + File.separator
+ fileName;
destFilePath = newpath + File.separator + "mi" + File.separator + "completed" + File.separator
+ fileName;
}
File sourceFile = new File( sourceFilePath );
File destFile = new File( destFilePath );
int status = moveFile( sourceFile, destFile );
if ( status == 1 )
{
sourceFile.delete();
}
} catch ( Exception e )
{
System.out.println( e.getMessage() );
}
}
@Override
public void moveFailedFile( String fileName )
{
try
{
String sourceFilePath = System.getProperty( "user.home" ) + File.separator + "dhis" + File.separator + "mi"
+ File.separator + "pending" + File.separator + fileName;
String destFilePath = System.getProperty( "user.home" ) + File.separator + "dhis" + File.separator + "mi"
+ File.separator + "bounced" + File.separator + fileName;
String newpath = System.getenv( "DHIS2_HOME" );
if ( newpath != null )
{
sourceFilePath = newpath + File.separator + "mi" + File.separator + "pending" + File.separator
+ fileName;
destFilePath = newpath + File.separator + "mi" + File.separator + "bounced" + File.separator + fileName;
}
File sourceFile = new File( sourceFilePath );
File destFile = new File( destFilePath );
int status = moveFile( sourceFile, destFile );
if ( status == 1 )
{
sourceFile.delete();
}
} catch ( Exception e )
{
System.out.println( e.getMessage() );
}
}
@Override
@Transactional
public void importPendingFiles()
{
try
{
List<String> fileNames = new ArrayList<String>( getImportFiles() );
for ( String importFile : fileNames )
{
String statusMsg = importXMLFile( importFile );
String senderInfo = importFile.replace( ".xml", "" );
SendSMS sendSMS = sendSMSService.getSendSMS( senderInfo );
if ( sendSMS == null )
{
sendSMS = new SendSMS( senderInfo, statusMsg );
sendSMSService.addSendSMS( sendSMS );
} else
{
sendSMS.setSendingMessage( statusMsg );
sendSMSService.updateSendSMS( sendSMS );
}
}
} catch ( Exception e )
{
System.out.println( e.getMessage() );
}
}
@Transactional
public String importANMRegData( String importFile, MobileImportParameters mobImportParameters )
{
String importStatus = "";
try
{
User curUser = getUserInfo( mobImportParameters.getMobileNumber() );
if ( curUser != null )
{
UserCredentials userCredentials = userStore.getUserCredentials( curUser );
if ( ( userCredentials != null )
&& ( mobImportParameters.getMobileNumber().equals( curUser.getPhoneNumber() ) ) )
{
} else
{
LOG.error( " Import File Contains Unrecognised Phone Numbers : "
+ mobImportParameters.getMobileNumber() );
moveFailedFile( importFile );
return "Phone number is not registered to any facility. Please contact admin";
}
List<OrganisationUnit> sources = new ArrayList<OrganisationUnit>( curUser.getOrganisationUnits() );
if ( sources == null || sources.size() <= 0 )
{
LOG.error( " No User Exists with corresponding Phone Numbers : "
+ mobImportParameters.getMobileNumber() );
moveFailedFile( importFile );
return "Phone number is not registered to any facility. Please contact admin";
}
OrganisationUnit source = sources.get( 0 );
String anmName = mobImportParameters.getAnmName();
if ( source == null || anmName == null || anmName.trim().equalsIgnoreCase( "" ) )
{
LOG.error( importFile + " Import File is not Properly Formated" );
moveFailedFile( importFile );
return "Data not Received Properly, Please send again";
}
source.setComment( anmName );
organisationUnitService.updateOrganisationUnit( source );
moveImportedFile( importFile );
importStatus = "YOUR NAME IS REGISTERD SUCCESSFULLY";
} else
{
LOG.error( importFile + " Phone number not found... Sending to Bounced" );
importStatus = "Phone number is not registered to any facility. Please contact admin";
moveFailedFile( importFile );
}
} catch ( Exception e )
{
e.printStackTrace();
LOG.error( e.getMessage() );
LOG.error( "Exception caused in importing... Moving to Bounced" );
importStatus = "Data not Received Properly, Please send again";
moveFailedFile( importFile );
} finally
{
}
return importStatus;
}
@Transactional
public String importANMQueryData( String importFile, MobileImportParameters mobImportParameters )
{
String importStatus = "";
try
{
User curUser = getUserInfo( mobImportParameters.getMobileNumber() );
if ( curUser != null )
{
UserCredentials userCredentials = userStore.getUserCredentials( curUser );
if ( ( userCredentials != null )
&& ( mobImportParameters.getMobileNumber().equals( curUser.getPhoneNumber() ) ) )
{
} else
{
LOG.error( " Import File Contains Unrecognised Phone Numbers : "
+ mobImportParameters.getMobileNumber() );
moveFailedFile( importFile );
return "Phone number is not registered to any facility. Please contact admin";
}
List<OrganisationUnit> sources = new ArrayList<OrganisationUnit>( curUser.getOrganisationUnits() );
if ( sources == null || sources.size() <= 0 )
{
LOG.error( " No User Exists with corresponding Phone Numbers : "
+ mobImportParameters.getMobileNumber() );
moveFailedFile( importFile );
return "Phone number is not registered to any facility. Please contact admin";
}
String anmQuery = mobImportParameters.getAnmQuery();
if ( anmQuery == null || anmQuery.trim().equalsIgnoreCase( "" ) )
{
LOG.error( importFile + " Import File is not Properly Formated" );
moveFailedFile( importFile );
return "Data not Received Properly, Please send again";
}
ReceiveSMS receiveSMS = new ReceiveSMS( importFile, anmQuery );
receiveSMSService.addReceiveSMS( receiveSMS );
moveImportedFile( importFile );
importStatus = "YOUR Query IS REGISTERD SUCCESSFULLY";
} else
{
LOG.error( importFile + " Phone number not found... Sending to Bounced" );
importStatus = "Phone number is not registered to any facility. Please contact admin";
moveFailedFile( importFile );
}
} catch ( Exception e )
{
e.printStackTrace();
LOG.error( e.getMessage() );
LOG.error( "Exception caused in importing... Moving to Bounced" );
importStatus = "Data not Received Properly, Please send again";
moveFailedFile( importFile );
} finally
{
}
return importStatus;
}
@Transactional
public OrganisationUnit getOrganisationUnitByPhone( String phoneNumber )
{
try
{
String query = "SELECT organisationunitid FROM organisationunit WHERE phoneNumber LIKE '" + phoneNumber
+ "'";
SqlRowSet sqlResultSet = jdbcTemplate.queryForRowSet( query );
if ( sqlResultSet != null && sqlResultSet.next() )
{
Integer orgUnitId = sqlResultSet.getInt( 1 );
OrganisationUnit orgUnit = organisationUnitService.getOrganisationUnit( orgUnitId );
if ( orgUnit != null )
{
return orgUnit;
}
}
return null;
} catch ( Exception e )
{
System.out.println( "Exception occurred while getting OrganisationUnit by phone number" );
return null;
}
}
@Transactional
private User getUserbyOrgUnit( int orgUnitId )
{
try
{
String query = "SELECT userinfoid FROM usermembership WHERE organisationunitid =" + orgUnitId;
SqlRowSet sqlResultSet = jdbcTemplate.queryForRowSet( query );
if ( sqlResultSet != null && sqlResultSet.next() )
{
Integer userId = sqlResultSet.getInt( 1 );
User user = userStore.getUser( userId );
if ( user != null )
{
return user;
}
}
return null;
} catch ( Exception e )
{
System.out.println( "Exception occurred while getting User by orgunit id" );
System.out.println( e.getMessage() );
return null;
}
}
@Override
@Transactional
public String importXMLFile( String importFile )
{
int insertFlag = 1;
String insertQuery = "INSERT INTO datavalue (dataelementid, periodid, sourceid, categoryoptioncomboid, value, storedby, lastupdated ) VALUES ";
String importStatus = "";
try
{
MobileImportParameters mobImportParameters = getParametersFromXML( importFile );
if ( mobImportParameters == null )
{
LOG.error( importFile + " Import File is not Properly Formatted" );
moveFailedFile( importFile );
return "Data not Received Properly, Please send again";
}
// Checking for FormType, if formtype is ANMREG
if ( mobImportParameters.getFormType().equalsIgnoreCase( MobileImportParameters.FORM_TYPE_ANMREGFORM ) )
{
importStatus = importANMRegData( importFile, mobImportParameters );
return importStatus;
} else
{
if ( mobImportParameters.getFormType().equalsIgnoreCase( MobileImportParameters.FORM_TYPE_ANMQUERYFORM ) )
{
importStatus = importANMQueryData( importFile, mobImportParameters );
return importStatus;
}
}
OrganisationUnit source = getOrganisationUnitByPhone( mobImportParameters.getMobileNumber() );
if ( source == null )
{
LOG.error( " No Faciliy Exists with corresponding Phone Number : " + mobImportParameters.getMobileNumber() );
moveFailedFile( importFile );
return "Phone number is not registered to any facility. Please contact admin";
}
User curUser = getUserbyOrgUnit( source.getId() );
if ( curUser == null )
{
LOG.error( " No User Exists with corresponding Facility : " + mobImportParameters.getMobileNumber() );
storedBy = "[unknown]-" + mobImportParameters.getMobileNumber();
} else
{
UserCredentials userCredentials = userStore.getUserCredentials( curUser );
storedBy = userCredentials.getUsername();
}
Period period = getPeriodInfo( mobImportParameters.getStartDate(), mobImportParameters.getPeriodType() );
SimpleDateFormat dateFormat = new SimpleDateFormat( "yyyy-MM-dd" );
SimpleDateFormat monthFormat = new SimpleDateFormat( "MMM-yy" );
Date timeStamp = dateFormat.parse( mobImportParameters.getSmsTime() );
long t;
if ( timeStamp == null )
{
Date d = new Date();
t = d.getTime();
} else
{
t = timeStamp.getTime();
}
java.sql.Date lastUpdatedDate = new java.sql.Date( t );
Map<String, String> dataValueMap = new HashMap<String, String>( mobImportParameters.getDataValues() );
if ( dataValueMap == null || dataValueMap.size() <= 0 )
{
LOG.error( "dataValue map is null" );
} else
{
if ( source == null )
{
LOG.error( "source is null" );
} else
{
if ( period == null )
{
LOG.error( "period is null" );
} else
{
if ( timeStamp == null )
{
LOG.error( "timeStamp is null" );
}
}
}
}
if ( source == null || period == null || timeStamp == null || dataValueMap == null || dataValueMap.size() <= 0 )
{
LOG.error( importFile + " Import File is not Properly Formated" );
moveFailedFile( importFile );
return "Data not Received Properly, Please send again";
}
Set<String> keys = dataValueMap.keySet();
for ( String key : keys )
{
String parts[] = key.split( "\\." );
String deStr = parts[0];
String optStr = parts[1];
String value = String.valueOf( dataValueMap.get( key ) );
DataElement dataElement = dataElementService.getDataElement( Integer.valueOf( deStr ) );
DataElementCategoryOptionCombo optionCombo = new DataElementCategoryOptionCombo();
optionCombo = dataElementCategoryService.getDataElementCategoryOptionCombo( Integer.valueOf( optStr ) );
DataValue dataValue = dataValueService.getDataValue( source, dataElement, period, optionCombo );
if ( value.trim().equalsIgnoreCase( "" ) )
{
value = null;
}
if ( dataValue == null )
{
if ( value != null )
{
insertQuery += "( " + dataElement.getId() + ", " + period.getId() + ", " + source.getId()
+ ", " + optionCombo.getId() + ", '" + value + "', '" + storedBy + "', '" + lastUpdatedDate
+ "' ), ";
insertFlag = 2;
}
} else
{
/*
dataValue.setValue( value );
dataValue.setTimestamp( timeStamp );
dataValue.setStoredBy( storedBy );
dataValueService.updateDataValue( dataValue );
*/
String updateQuery = "UPDATE datavalue SET value = '" + value + "', storedby = '" + storedBy + "', lastupdated = '" + lastUpdatedDate + "' "
+ "WHERE dataelementid = " + dataElement.getId()
+ " AND periodid = " + period.getId()
+ " AND sourceid = " + source.getId()
+ " AND categoryoptioncomboid = " + optionCombo.getId();
jdbcTemplate.update( updateQuery );
}
}
if ( insertFlag != 1 )
{
insertQuery = insertQuery.substring( 0, insertQuery.length() - 2 );
jdbcTemplate.update( insertQuery );
}
moveImportedFile( importFile );
if ( period.getPeriodType().getName().equalsIgnoreCase( "monthly" ) )
{
importStatus = "THANK YOU FOR SENDING MONTHLY REPORT FOR " + monthFormat.format( period.getStartDate() );
} else
{
if ( period.getPeriodType().getName().equalsIgnoreCase( "daily" ) )
{
importStatus = "THANK YOU FOR SENDING DAILY REPORT FOR " + dateFormat.format( period.getStartDate() );
} else
{
importStatus = "THANK YOU FOR SENDING REPORT FOR " + dateFormat.format( period.getStartDate() ) + " : "
+ dateFormat.format( period.getEndDate() );
}
}
} catch ( Exception e )
{
e.printStackTrace();
LOG.error( e.getMessage() );
LOG.error( "Exception caused in importing... Moving to Bounced" );
importStatus = "Data not Received Properly, Please send again";
moveFailedFile( importFile );
} finally
{
}
return importStatus;
}
public void importInteractionMessage( String smsText, String sender, Date sendTime )
{
String insertQuery = "INSERT INTO datavalue (dataelementid, periodid, sourceid, categoryoptioncomboid, value, storedby, lastupdated ) VALUES ";
try
{
String[] smstext = smsText.split( "#" );
System.out.println( "original text: " + smsText );
String dataelementid = smstext[1];
String periodid = smstext[2];
String comboid = smstext[3];
String value = smstext[4];
OrganisationUnit source = getOrganisationUnitByPhone( sender );
System.out.println( "-----------------source--------------" + source );
User curUser = getUserbyOrgUnit( source.getId() );
// User curUser = userStore.getUser(1);
// User curUser = null;
if ( curUser == null )
{
LOG.error( " No User Exists with corresponding Facility : " + sender );
storedBy = "[unknown]-" + sender;
} else
{
UserCredentials userCredentials = userStore.getUserCredentials( curUser );
storedBy = userCredentials.getUsername();
}
DataElement dataElement = dataElementService.getDataElement( dataelementid );
DataElementCategoryOptionCombo optionCombo = new DataElementCategoryOptionCombo();
optionCombo = dataElementCategoryService.getDataElementCategoryOptionCombo( comboid );
Period period = periodService.getPeriod( Integer.parseInt( periodid ) );
DataValue dataValue = dataValueService.getDataValue( source, dataElement, period, optionCombo );
/*
* if( value.trim().equalsIgnoreCase("") ) { value = null; }
*/
SimpleDateFormat dateFormat = new SimpleDateFormat( "yyyy-MM-dd" );
SimpleDateFormat monthFormat = new SimpleDateFormat( "MMM-yy" );
int date = sendTime.getDate();
int month = sendTime.getMonth() + 1;
int year = sendTime.getYear() + 1900;
int hour = sendTime.getHours();
int minutes = sendTime.getMinutes();
int seconds = sendTime.getSeconds();
String sendtime = "";
sendtime += "" + year;
sendtime += "-";
if ( month < 10 )
{
sendtime += "0" + month;
} else
{
sendtime += "" + month;
}
sendtime += "-";
if ( date < 10 )
{
sendtime += "0" + date;
} else
{
sendtime += "" + date;
}
sendtime += "_";
if ( hour < 10 )
{
sendtime += "0" + hour;
} else
{
sendtime += "" + hour;
}
sendtime += "-";
if ( minutes < 10 )
{
sendtime += "0" + minutes;
} else
{
sendtime += "" + minutes;
}
sendtime += "-";
if ( seconds < 10 )
{
sendtime += "0" + seconds;
} else
{
sendtime += "" + seconds;
}
System.out.println( "Time: " + sendtime );
Date timeStamp = dateFormat.parse( sendtime );
long t;
if ( timeStamp == null )
{
Date d = new Date();
t = d.getTime();
} else
{
t = timeStamp.getTime();
}
java.sql.Date lastUpdatedDate = new java.sql.Date( t );
System.out.println( "( " + Integer.parseInt( dataelementid ) + ", " + period.getId() + ", "
+ source.getId() + ", " + Integer.parseInt( comboid ) + ", '" + value + "', '" + storedBy + "', '"
+ lastUpdatedDate + "' ) " );
if ( dataValue == null )
{
if ( value != null )
{
insertQuery += "( " + Integer.parseInt( dataelementid ) + ", " + period.getId() + ", "
+ source.getId() + ", " + Integer.parseInt( comboid ) + ", '" + value + "', '" + storedBy
+ "', '" + lastUpdatedDate + "' ) ";
jdbcTemplate.update( insertQuery );
}
} else
{
dataValue.setValue( value );
dataValue.setTimestamp( timeStamp );
dataValue.setStoredBy( storedBy );
dataValueService.updateDataValue( dataValue );
}
} catch ( Exception e )
{
e.printStackTrace();
LOG.error( e.getMessage() );
LOG.error( "Interactive message not processed" );
}
}
public String updateInsertBuildQueryForExternalClient( int dataelementid, int comboid, int periodId, int sourceId, String dataValue, String phoneNumber, String timestamp )
{
String query;
String insertQuery;
SimpleDateFormat dateFormat = new SimpleDateFormat( "yyyy-MM-dd" );
Date timeStamp = null;
try
{
timeStamp = dateFormat.parse( timestamp );
} catch ( ParseException ex )
{
ex.printStackTrace();
Logger.getLogger( DefaultMobileImportService.class.getName() ).log( Level.SEVERE, null, ex );
}
long t;
if ( timeStamp == null )
{
Date d = new Date();
t = d.getTime();
} else
{
t = timeStamp.getTime();
}
java.sql.Date lastUpdatedDate = new java.sql.Date( t );
User curUser = getUserbyOrgUnit( sourceId );
if ( curUser == null )
{
LOG.error( " No User Exists with corresponding Facility : " + phoneNumber );
storedBy = "[unknown]-" + phoneNumber;
} else
{
UserCredentials userCredentials = userStore.getUserCredentials( curUser );
storedBy = userCredentials.getUsername();
}
query = "SELECT value FROM datavalue WHERE dataelementid = " + dataelementid
+ " AND categoryoptioncomboid = " + comboid
+ " AND periodid = " + periodId
+ " AND sourceid = " + sourceId;
SqlRowSet sqlResultSet1 = jdbcTemplate.queryForRowSet( query );
if ( sqlResultSet1 != null && sqlResultSet1.next() )
{
String updateQuery = "UPDATE datavalue SET value = '" + dataValue + "', storedby = '" + storedBy + "',lastupdated='" + lastUpdatedDate + "' WHERE dataelementid = " + dataelementid + " AND periodid = " + periodId + " AND sourceid = " + sourceId + " AND categoryoptioncomboid = " + comboid;
jdbcTemplate.update( updateQuery );
return "update";
// updateCount++;
} else
{
if ( dataValue != null && !dataValue.trim().equalsIgnoreCase( "" ) )
{
insertQuery = "( " + dataelementid + ", " + periodId + ", " + sourceId + ", " + comboid + ", '" + dataValue + "', '" + storedBy + "', '" + lastUpdatedDate + "' ), ";
return insertQuery;
}
}
return "error";
}
public void insertQueryForExternalClient( String insertQuery )
{
System.out.append( insertQuery);
insertQuery = insertQuery.substring( 0, insertQuery.length() - 2 );
jdbcTemplate.update( insertQuery );
}
public int queryForPeriod(String date,String periodTypeId){
jdbcTemplate=new JdbcTemplate();
String query="select periodid from period where periodtypeid='"+periodTypeId+"' and startdate='"+date+"'";
if (jdbcTemplate == null){
System.out.println("sjkbdfkjabsfkaf");
}
SqlRowSet sqlResultSet = jdbcTemplate.queryForRowSet( query );
if ( sqlResultSet != null )
{
if( sqlResultSet.next()){
Integer periodId = sqlResultSet.getInt( 1 );
return periodId;
}
}
return -1;
}
}
|
module.exports = (sequelize, DataTypes) => {
const accountCode = sequelize.define('accountCode', {
accountCodeId: { type: DataTypes.INTEGER, primaryKey: true, autoIncrement: true },
schemeCodeId: DataTypes.INTEGER,
lineDescription: DataTypes.STRING,
accountCodeAP: DataTypes.STRING,
accountCodeAR: DataTypes.STRING
},
{
tableName: 'accountCodes',
freezeTableName: true,
timestamps: false
})
accountCode.associate = function (models) {
accountCode.belongsTo(models.schemeCode, {
foreignKey: 'schemeCodeId',
as: 'schemeCode'
})
}
return accountCode
}
|
<reponame>Polidea/SiriusObfuscator
//===-- CompilerDecl.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef liblldb_CompilerDecl_h_
#define liblldb_CompilerDecl_h_
#include "lldb/Core/ConstString.h"
#include "lldb/Symbol/CompilerType.h"
#include "lldb/lldb-private.h"
namespace lldb_private {
class CompilerDecl {
public:
//----------------------------------------------------------------------
// Constructors and Destructors
//----------------------------------------------------------------------
CompilerDecl() : m_type_system(nullptr), m_opaque_decl(nullptr) {}
CompilerDecl(TypeSystem *type_system, void *decl)
: m_type_system(type_system), m_opaque_decl(decl) {}
~CompilerDecl() {}
//----------------------------------------------------------------------
// Tests
//----------------------------------------------------------------------
explicit operator bool() const { return IsValid(); }
bool operator<(const CompilerDecl &rhs) const {
if (m_type_system == rhs.m_type_system)
return m_opaque_decl < rhs.m_opaque_decl;
return m_type_system < rhs.m_type_system;
}
bool IsValid() const {
return m_type_system != nullptr && m_opaque_decl != nullptr;
}
bool IsClang() const;
//----------------------------------------------------------------------
// Accessors
//----------------------------------------------------------------------
TypeSystem *GetTypeSystem() const { return m_type_system; }
void *GetOpaqueDecl() const { return m_opaque_decl; }
void SetDecl(TypeSystem *type_system, void *decl) {
m_type_system = type_system;
m_opaque_decl = decl;
}
void Clear() {
m_type_system = nullptr;
m_opaque_decl = nullptr;
}
ConstString GetName() const;
ConstString GetMangledName() const;
CompilerDeclContext GetDeclContext() const;
// If this decl represents a function, return the return type
CompilerType GetFunctionReturnType() const;
// If this decl represents a function, return the number of arguments for the
// function
size_t GetNumFunctionArguments() const;
// If this decl represents a function, return the argument type given a zero
// based argument index
CompilerType GetFunctionArgumentType(size_t arg_idx) const;
private:
TypeSystem *m_type_system;
void *m_opaque_decl;
};
bool operator==(const CompilerDecl &lhs, const CompilerDecl &rhs);
bool operator!=(const CompilerDecl &lhs, const CompilerDecl &rhs);
} // namespace lldb_private
#endif // #ifndef liblldb_CompilerDecl_h_
|
<reponame>gdnwxf/netty_stu
package org.xtwy.oldthriftrpc;
import org.apache.thrift.TProcessor;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TSimpleServer;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.xtwy.thriftrpc.HelloServiceImpl;
import com.hzins.thrift.demo.HelloWorldService;
import com.hzins.thrift.demo.HelloWorldService.Processor;
public class ThreadThriftServer {
public static void startServer(int port) throws Exception{
TProcessor processor = new Processor<HelloWorldService.Iface>(new HelloServiceImpl());
TServerSocket transport = new TServerSocket(port);
TThreadPoolServer.Args ttpsArgs = new TThreadPoolServer.Args(
transport);
ttpsArgs.processor(processor);
ttpsArgs.protocolFactory(new TBinaryProtocol.Factory());
TServer server = new TThreadPoolServer(ttpsArgs);
server.serve();
}
public static void main(String[] args) throws Exception {
startServer(8080);
}
}
|
<filename>internal/testdata/lang_ru_decimal_float64.go
package testdata
//nolint:gochecknoglobals
// тесты для женского рода
var TestCaseLangRUDecimalFloat64GenderFemale = map[float64]string{
0.001: "ноль целых одна тысячная",
2.2: "две целых две десятых",
3.002: "три целых две тысячных",
2.54: "две целых пятьдесят четыре сотых",
0.1: "ноль целых одна десятая",
2.232: "две целых двести тридцать две тысячных",
1231: "Одна тысяча двести тридцать одна", // Одна тысяча двести тридцать одна подушка
32.32: "Тридцать две целых тридцать две сотых",
31.31: "Тридцать одна целая тридцать одна сотая",
1234567.12345: "Один миллион двести тридцать четыре тысячи пятьсот шестьдесят семь целых двенадцать тысяч триста сорок пять стотысячных",
3.2: "три целых две десятых",
1.1: "Одна целая одна десятая",
1.01: "Одна целая одна сотая",
1.001: "Одна целая одна тысячная",
1.0001: "Одна целая одна десятитысячная",
1.00001: "Одна целая одна стотысячная",
1.000001: "Одна целая одна миллионная",
1.0000001: "Одна целая одна десятимиллионная",
1.00000001: "Одна целая одна стомиллионная",
1.2: "Одна целая две десятых",
1.02: "Одна целая две сотых",
1.002: "Одна целая две тысячных",
1.0002: "Одна целая две десятитысячных",
2.1: "Две целых одна десятая",
1.000345: "Одна целая триста сорок пять миллионных",
}
//nolint:gochecknoglobals
// тесты для мужского рода
var TestCaseLangRUDecimalFloat64GenderMale = map[float64]string{
32.32: "Тридцать два целых тридцать две сотых", // Тридцать два целых тридцать две сотых квадратных метра
1231: "Одна тысяча двести тридцать один", // Одна тысяча двести тридцать один карандаш
31.31: "Тридцать один целых тридцать одна сотая",
3.2: "три целых две десятых", // три целых две десятых карандаша
0.1: "Ноль целых одна десятая",
}
//nolint:gochecknoglobals
// тесты для среднего рода
var TestCaseLangRUDecimalFloat64GenderNeuter = map[float64]string{
1231: "Одна тысяча двести тридцать одно", // Одна тысяча двести тридцать одно сообщение
31.31: "Тридцать одно целое тридцать одна сотая",
3.2: "три целых две десятых", // три целых две десятых сообщения
0.1: "Ноль целых одна десятая",
}
|
export class UpdateUserSeatPreference
{
sessionid : string;
seatcodes : string;
} |
<reponame>rrinat/CustomizableCalendar<gh_stars>100-1000
package com.molo17.customizablecalendar.library.presenter.interfeaces;
import com.molo17.customizablecalendar.library.interactors.ViewInjector;
import com.molo17.customizablecalendar.library.view.CustomizableCalendarView;
import java.util.List;
/**
* Created by francescofurlan on 23/06/17.
*/
public interface CustomizableCalendarPresenter extends BasePresenter<CustomizableCalendarView>, ViewInjector {
List<String> setupWeekDays();
}
|
<reponame>tuckerbeauchamp/whatToWatch
export const ADD_FAVORITE = "ADD_FAVORITE";
export const REMOVE_FAVORITE = "REMOVE_FAVORITE";
|
package io.github.vampirestudios.obsidian.addon_modules;
import io.github.vampirestudios.obsidian.Obsidian;
import io.github.vampirestudios.obsidian.api.obsidian.AddonModule;
import io.github.vampirestudios.obsidian.api.obsidian.EntityModel;
import io.github.vampirestudios.obsidian.configPack.ObsidianAddon;
import io.github.vampirestudios.obsidian.utils.ModIdAndAddonPath;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import static io.github.vampirestudios.obsidian.configPack.ObsidianAddonLoader.*;
public class EntityModels implements AddonModule {
@Override
public void init(ObsidianAddon addon, File file, ModIdAndAddonPath id) throws FileNotFoundException {
EntityModel entityModel = Obsidian.GSON.fromJson(new FileReader(file), EntityModel.class);
try {
if (entityModel == null) return;
register(ENTITY_MODELS, "entity_model", entityModel.name, entityModel);
} catch (Exception e) {
failedRegistering("entity_model", entityModel.name.toString(), e);
}
}
@Override
public String getType() {
return "entities/models";
}
}
|
#!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2020-12-04 17:10:48 +0000 (Fri, 04 Dec 2020)
#
# https://github.com/HariSekhon/bash-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
# https://www.jetbrains.com/help/teamcity/rest-api-reference.html#Build+Requests
set -euo pipefail
[ -n "${DEBUG:-}" ] && set -x
srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1090
. "$srcdir/lib/utils.sh"
# shellcheck disable=SC2034,SC2154
usage_description="
Lists the Teamcity BuildTypes (pipelines) via the Teamcity API
Output format:
<BuildType_ID> <Project> <BuildType_Name>
Specify \$NO_HEADER to omit the header line
See adjacent teamcity_api.sh for authentication details
"
# used by usage() in lib/utils.sh
# shellcheck disable=SC2034
usage_args="[<curl_options>]"
help_usage "$@"
{
if [ -z "${NO_HEADER:-}" ]; then
printf 'BuildType_ID\tProject\tBuildType_Name\n'
fi
"$srcdir/teamcity_api.sh" /buildTypes |
jq -r '.buildType[] | [.id, .projectId, .name] | @tsv'
} |
# the $'' quoting evaluates the tab \t properly - has to be single not double quotes
column -t -s $'\t'
# POSIX, but above works just fine in bash
#column -t -s "$(printf '\t')"
|
<filename>ProxySever/src/main/java/com/efei/proxy/ProxyTransmitServer.java
package com.efei.proxy;
import com.Server;
import com.efei.proxy.channelHandler.HeartBeatServerHandler;
import com.efei.proxy.channelHandler.LoginChannelHandler;
import com.efei.proxy.channelHandler.ProxyReponseDataHandler;
import com.efei.proxy.common.codec.ProxyTcpProtocolDecoder;
import com.efei.proxy.common.codec.ProxyTcpProtocolEncoder;
import com.efei.proxy.config.ProxyTransmitServerConfig;
import com.efei.proxy.config.ServerConfig;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.timeout.IdleStateHandler;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
/**
* 代理转发服务,一般转发给客户端
*/
@Component
@Slf4j
public class ProxyTransmitServer extends Server{
@Autowired
private ProxyTransmitServerConfig proxyTransmitServerConfig;
@Autowired
private HeartBeatServerHandler heartBeatServerHandler;
public ChannelInitializer<SocketChannel> getChannelInitializer() {
return new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline pip = ch.pipeline();
pip.addLast(new IdleStateHandler(10,0,0));
pip.addLast(heartBeatServerHandler);
pip.addLast(ProxyTcpProtocolDecoder.getSelf());
pip.addLast(new LoginChannelHandler());
pip.addLast(new ProxyReponseDataHandler());
//数据传出去
//pip.addLast(new HttpRequestTransmitEncoder());
pip.addLast(new ProxyTcpProtocolEncoder());
// pip.addLast(new ChannelOutboundHandlerAdapter(){
// @Override
// public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
// //super.write(ctx, msg, promise);
// if (msg instanceof ByteBuf) {
// ByteBuf in = (ByteBuf) msg;
// byte[] content = new byte[in.readableBytes()];
// in.readBytes(content);
// String key = ctx.attr(userchannelkey).get();
// ProxyTcpProtocolBean b = new ProxyTcpProtocolBean((byte)1,(byte)1,key,content.length,content);
// logger.debug(b.toStr());
// } else {
// super.write(ctx, msg, promise);
// }
// }
// });
// pip.addLast(new HttpResponseDecoder());
// pip.addLast(new HttpObjectAggregator(2*1024));
// pip.addLast(new MessageToMessageDecoder<HttpObject>(){
//
// protected void decode(ChannelHandlerContext ctx, HttpObject msg, List<Object> out) throws Exception {
// FullHttpResponse msg2 = (FullHttpResponse)msg;
// String head = msg2.headers().toString();
// System.out.println(String.format("head:%s",head));
// String sb = msg2.content().toString(CharsetUtil.UTF_8);
// System.out.println(String.format("body:%s",sb));
// }
// });
}
};
}
@Override
public ServerConfig getServerConfig() {
return proxyTransmitServerConfig;
}
public void start() throws InterruptedException {
start(proxyTransmitServerConfig.getPort());
}
}
|
/*
Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package com.mysql.cluster.crund;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import com.mysql.cluster.crund.CrundDriver.XMode;
class JdbcS extends CrundSLoad {
// JDBC settings
protected String jdbcDriver;
protected String url;
protected String username;
protected String password;
// JDBC resources
protected Class jdbcDriverClass;
protected Connection connection;
protected String sqlIns0;
protected String sqlSel0;
protected String sqlUpd0;
protected String sqlDel0;
protected String sqlDelAll;
protected PreparedStatement ins0;
protected PreparedStatement sel0;
protected PreparedStatement upd0;
protected PreparedStatement del0;
protected PreparedStatement delAll;
public JdbcS(CrundDriver driver) {
super(driver);
}
// ----------------------------------------------------------------------
// JDBC intializers/finalizers
// ----------------------------------------------------------------------
protected void initProperties() {
out.println();
out.print("setting jdbc properties ...");
final StringBuilder msg = new StringBuilder();
final String eol = System.getProperty("line.separator");
// load the JDBC driver class
jdbcDriver = driver.props.getProperty("jdbc.driver");
if (jdbcDriver == null) {
throw new RuntimeException("Missing property: jdbc.driver");
}
try {
Class.forName(jdbcDriver);
} catch (ClassNotFoundException e) {
out.println("Cannot load JDBC driver '" + jdbcDriver
+ "' from classpath '"
+ System.getProperty("java.class.path") + "'");
throw new RuntimeException(e);
}
url = driver.props.getProperty("jdbc.url");
if (url == null) {
throw new RuntimeException("Missing property: jdbc.url");
}
username = driver.props.getProperty("jdbc.user");
password = driver.props.getProperty("jdbc.password");
if (msg.length() == 0) {
out.println(" [ok]");
} else {
driver.hasIgnoredSettings = true;
out.println();
out.print(msg.toString());
}
name = url.substring(0, 10); // shortcut will do
}
protected void printProperties() {
out.println("jdbc.driver: " + jdbcDriver);
out.println("jdbc.url: " + url);
out.println("jdbc.user: \"" + username + "\"");
out.println("jdbc.password: \"" + password + "\"");
}
public void init() throws Exception {
super.init();
assert (jdbcDriverClass == null);
// load the JDBC driver class
out.print("loading jdbc driver ...");
out.flush();
try {
jdbcDriverClass = Class.forName(jdbcDriver);
} catch (ClassNotFoundException e) {
out.println("Cannot load JDBC driver '" + jdbcDriver
+ "' from classpath '"
+ System.getProperty("java.class.path") + "'");
throw new RuntimeException(e);
}
out.println(" [ok: " + jdbcDriverClass.getName() + "]");
}
public void close() throws Exception {
assert (jdbcDriverClass != null);
//out.println();
jdbcDriverClass = null;
super.close();
}
// ----------------------------------------------------------------------
// JDBC datastore operations
// ----------------------------------------------------------------------
public void initConnection() throws SQLException {
assert (jdbcDriverClass != null);
assert (connection == null);
out.println();
out.println("initializing jdbc resources ...");
// create a connection to the database
out.print("starting jdbc connection ...");
out.flush();
try {
connection = DriverManager.getConnection(url, username, password);
} catch (SQLException e) {
out.println("Cannot connect to database '" + url + "'");
throw new RuntimeException(e);
}
out.println(" [ok: " + url + "]");
out.print("setting isolation level ...");
out.flush();
// ndb storage engine only supports READ_COMMITTED
final int il = Connection.TRANSACTION_READ_COMMITTED;
connection.setTransactionIsolation(il);
out.print(" [ok: ");
switch (connection.getTransactionIsolation()) {
case Connection.TRANSACTION_READ_UNCOMMITTED:
out.print("READ_UNCOMMITTED");
break;
case Connection.TRANSACTION_READ_COMMITTED:
out.print("READ_COMMITTED");
break;
case Connection.TRANSACTION_REPEATABLE_READ:
out.print("REPEATABLE_READ");
break;
case Connection.TRANSACTION_SERIALIZABLE:
out.print("SERIALIZABLE");
break;
default:
assert false;
}
out.println("]");
initPreparedStatements();
}
public void closeConnection() throws SQLException {
assert (connection != null);
out.println();
out.println("releasing jdbc resources ...");
closePreparedStatements();
out.print("closing jdbc connection ...");
out.flush();
connection.close();
connection = null;
out.println(" [ok]");
}
public void clearData() throws SQLException {
connection.setAutoCommit(false);
out.print("deleting all rows ...");
out.flush();
final int d = delAll.executeUpdate();
connection.commit();
out.println(" [S: " + d + "]");
}
public void initPreparedStatements() throws SQLException {
assert (connection != null);
assert (ins0 == null);
assert (sel0 == null);
assert (upd0 == null);
assert (del0 == null);
out.print("using lock mode for reads ...");
out.flush();
final String lm;
switch (driver.lockMode) {
case none:
lm = "";
break;
case shared:
lm = " LOCK IN share mode";
break;
case exclusive:
lm = " FOR UPDATE";
break;
default:
lm = "";
assert false;
}
out.println(" [ok: " + "SELECT" + lm + ";]");
out.print("compiling jdbc statements ...");
out.flush();
sqlIns0 = "INSERT INTO S (c0, c1, c2, c3, c5, c6, c7, c8) VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
sqlSel0 = "SELECT * FROM S WHERE c0=?" + lm;
sqlUpd0 = "UPDATE S SET c1 = ?, c2 = ?, c3 = ?, c5 = ?, c6 = ?, c7 = ?, c8 = ? WHERE c0=?";
sqlDel0 = "DELETE FROM S WHERE c0=?";
sqlDelAll = "DELETE FROM S";
ins0 = connection.prepareStatement(sqlIns0);
sel0 = connection.prepareStatement(sqlSel0);
upd0 = connection.prepareStatement(sqlUpd0);
del0 = connection.prepareStatement(sqlDel0);
delAll = connection.prepareStatement(sqlDelAll);
out.println(" [ok]");
}
protected void closePreparedStatements() throws SQLException {
assert (ins0 != null);
assert (sel0 != null);
assert (upd0 != null);
assert (del0 != null);
assert (delAll != null);
out.print("closing jdbc statements ...");
out.flush();
ins0.close();
ins0 = null;
sel0.close();
sel0 = null;
upd0.close();
upd0 = null;
del0.close();
del0 = null;
delAll.close();
delAll = null;
out.println(" [ok]");
}
// ----------------------------------------------------------------------
protected void runInsert(XMode mode, int[] id) throws SQLException {
final String name = "S_insAttr," + mode;
final int n = id.length;
driver.beginOp(name);
connection.setAutoCommit(mode == XMode.indy);
for(int i = 0; i < n; i++)
insert(mode, id[i]);
if (mode == XMode.bulk)
ins0.executeBatch();
if (mode != XMode.indy)
connection.commit();
driver.finishOp(name, n);
}
protected void insert(XMode mode, int id) throws SQLException {
final int i = id;
final String str = Integer.toString(i);
ins0.setString(1, str); // key
ins0.setString(2, str);
ins0.setInt(3, i);
ins0.setInt(4, i);
ins0.setString(5, str);
ins0.setString(6, str);
ins0.setString(7, str);
ins0.setString(8, str);
if (mode == XMode.bulk) {
ins0.addBatch();
} else {
int cnt = ins0.executeUpdate();
assert (cnt == 1);
}
}
// ----------------------------------------------------------------------
protected void runLookup(XMode mode, int[] id) throws SQLException {
final String name = "S_getAttr," + mode;
final int n = id.length;
driver.beginOp(name);
connection.setAutoCommit(mode == XMode.indy);
if (mode != XMode.bulk) {
for(int i = 0; i < n; i++)
lookup(id[i]);
if (mode != XMode.indy)
connection.commit();
} else {
lookup(id);
connection.commit();
}
driver.finishOp(name, n);
}
protected void lookup(int[] id) throws SQLException {
final int n = id.length;
// use dynamic SQL for generic bulk queries
// The mysql jdbc driver requires property allowMultiQueries=true
// passed to DriverManager.getConnection() or in URL
// jdbc:mysql://localhost/crunddb?allowMultiQueries=true
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < n; i++)
sb.append(sqlSel0.replace("?", "'" + id[i] + "'")).append(";");
final String q = sb.toString();
final Statement s = connection.createStatement();
// allow for multi/single result sets with single/multi rows
boolean hasRS = s.execute(q);
int i = 0;
while (hasRS) {
final ResultSet rs = s.getResultSet();
while (rs.next())
check(id[i++], rs);
hasRS = s.getMoreResults();
}
verify(n, i);
}
protected void lookup(int id) throws SQLException {
sel0.setString(1, Integer.toString(id)); // key
final ResultSet rs = sel0.executeQuery();
int i = 0;
while (rs.next()) {
check(id, rs);
i++;
}
verify(1, i);
rs.close();
}
protected void check(int id, ResultSet rs) throws SQLException {
// XXX not verifying at this time
String ac0 = rs.getString(1);
String c1 = rs.getString(2);
int c2 = rs.getInt(3);
int c3 = rs.getInt(4);
int c4 = rs.getInt(5);
String c5 = rs.getString(6);
String c6 = rs.getString(7);
String c7 = rs.getString(8);
String c8 = rs.getString(9);
String c9 = rs.getString(10);
String c10 = rs.getString(11);
String c11 = rs.getString(12);
String c12 = rs.getString(13);
String c13 = rs.getString(14);
String c14 = rs.getString(15);
}
// ----------------------------------------------------------------------
protected void runUpdate(XMode mode, int[] id) throws SQLException {
final String name = "S_setAttr," + mode;
final int n = id.length;
driver.beginOp(name);
connection.setAutoCommit(mode == XMode.indy);
for(int i = 0; i < n; i++)
update(mode, id[i]);
if (mode == XMode.bulk)
upd0.executeBatch();
if (mode != XMode.indy)
connection.commit();
driver.finishOp(name, n);
}
protected void update(XMode mode, int id) throws SQLException {
final String str0 = Integer.toString(id);
final int r = -id;
final String str1 = Integer.toString(r);
upd0.setString(1, str1);
upd0.setInt(2, r);
upd0.setInt(3, r);
upd0.setString(4, str1);
upd0.setString(5, str1);
upd0.setString(6, str1);
upd0.setString(7, str1);
upd0.setString(8, str0); // key
if (mode == XMode.bulk) {
upd0.addBatch();
} else {
int cnt = upd0.executeUpdate();
assert (cnt == 1);
}
}
// ----------------------------------------------------------------------
protected void runDelete(XMode mode, int[] id) throws SQLException {
final String name = "S_del," + mode;
final int n = id.length;
driver.beginOp(name);
connection.setAutoCommit(mode == XMode.indy);
for(int i = 0; i < n; i++)
delete(mode, id[i]);
if (mode == XMode.bulk)
del0.executeBatch();
if (mode != XMode.indy)
connection.commit();
driver.finishOp(name, n);
}
protected void delete(XMode mode, int id) throws SQLException {
final String str = Integer.toString(id);
del0.setString(1, str);
if (mode == XMode.bulk) {
del0.addBatch();
} else {
int cnt = del0.executeUpdate();
assert (cnt == 1);
}
}
// ----------------------------------------------------------------------
protected void clearPersistenceContext() {
// nothing to do as we're not caching beyond Tx scope
}
}
|
<filename>libgraph/auxiliary.hh
#pragma once
#include <iostream>
#include <vector>
#include <list>
#include <set>
#include <map>
#include <sstream>
#include <assert.h>
#include <unordered_map>
#include <unordered_set>
using namespace std;
typedef int node_t;
typedef vector< vector<node_t> > neighbours_t;
typedef vector< bool > edges_t;
// Directed edge is an ordered pair of nodes
typedef pair<node_t,node_t> dedge_t;
struct edge_t : public pair<node_t,node_t> {
edge_t() {}
edge_t(const pair<node_t,node_t>& p) : pair<node_t,node_t>(min(p.first,p.second),max(p.first,p.second)) {}
edge_t(const node_t u, const node_t v): pair<node_t,node_t>(min(u,v),max(u,v)) {}
edge_t(const int index) {
node_t u=0;
for(;u*(u-1)/2<=index;u++) ;
u--;
first = u;
second = index-u*(u-1)/2;
}
inline size_t index() const {
const node_t v = first, u = second;
return u*(u-1)/2 + v;
}
};
#define insert_unique(v,x) if(std::find(v.begin(),v.end(),x) == v.end()) v.push_back(x);
template <typename S, typename T> ostream& operator<<(ostream& s, const pair<S,T>& p)
{
s << "{" << p.first << "," << p.second << "}";
return s;
}
#define container_output(container) \
template <typename T> ostream& operator<<(ostream& s, const container<T>& v) \
{ \
s << "{"; \
for(typename container<T>::const_iterator x(v.begin());x!=v.end();){ \
s << *x; \
if(++x!=v.end()) s << ","; \
} \
s << "}"; \
return s; \
}
container_output(vector);
container_output(list);
container_output(set);
// TODO: Macro instead of repeating
template<typename K, typename V> vector<K> get_keys(const map<K,V>& m)
{
vector<K> keys(m.size());
int i=0;
for(const auto &kv: m)
keys[i++] = kv.first;
return keys;
}
template<typename K, typename V> vector<K> get_keys(const unordered_map<K,V>& m)
{
vector<K> keys(m.size());
int i=0;
for(const auto &kv: m)
keys[i++] = kv.first;
return keys;
}
template<typename K, typename V> vector<K> get_keys(const vector<pair<K,V> >& m)
{
vector<K> keys(m.size());
int i=0;
for(const auto &kv: m)
keys[i++] = kv.first;
return keys;
}
template<typename K, typename V> vector<V> get_values(const map<K,V>& m)
{
vector<V> values(m.size());
int i=0;
for(const auto &kv: m)
values[i++] = kv.second;
return values;
}
template<typename K, typename V> vector<V> get_values(const unordered_map<K,V>& m)
{
vector<V> values(m.size());
int i=0;
for(const auto &kv: m)
values[i++] = kv.second;
return values;
}
template<typename K, typename V> vector<V> get_values(const vector<pair<K,V> >& m)
{
vector<V> values(m.size());
int i=0;
for(const auto &kv: m)
values[i++] = kv.second;
return values;
}
template<typename from, typename to> vector<to> convert_vector(const vector<from>& x)
{
vector<to> y(x.size());
for(int i=0;i<x.size();i++) y[i] = x[i];
return y;
}
template<typename K, typename V> K getFirst(const pair<K,V>& x){ return x.first; }
template<typename K, typename V> V getSecond(const pair<K,V>& x){ return x.second; }
template<typename S, typename T> pair<T,S> reverse(const pair<S,T>& p){ return pair<T,S>(p.second,p.first); }
template <typename T> int sgn(const T& val) { return (T(0) < val) - (val < T(0)); }
// Undirected edge is an unordered pair of nodes
template <typename T> string to_string(const T& x)
{
ostringstream s;
s << x;
return s.str();
}
template <typename T> T from_string(const string& s)
{
stringstream S(s);
T x;
S >> x;
return x;
}
string pad_string(const string& s, int length, char padchar = '0');
string filename_extension(const string& filename);
int gcd(int a, int b);
template <typename T> vector<T> operator*(const vector<T>& xs, const T& x)
{
vector<T> ys(xs.size());
for(int i=0;i<xs.size();i++) ys[i] = xs[i] * x;
return ys;
}
template <typename T> vector<T> operator+(const vector<T>& xs, const T& x)
{
vector<T> ys(xs.size());
for(int i=0;i<xs.size();i++) ys[i] = xs[i] + x;
return ys;
}
template <typename T> vector< vector<T> > operator+(const vector< vector<T> >& xs, const T& x)
{
vector< vector<T> > ys(xs.size());
for(int i=0;i<xs.size();i++) ys[i] = xs[i] + x;
return ys;
}
template<typename T> void hash_combine(size_t &seed, T const &key) {
hash<T> hasher;
seed ^= hasher(key) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
namespace std {
template<typename T1, typename T2> struct hash<pair<T1, T2>> {
size_t operator()(const pair<T1, T2> &p) const {
size_t seed(0);
hash_combine(seed, p.first);
hash_combine(seed, p.second);
return seed;
}
};
template<typename IntType> struct hash<vector<IntType>> { // Vectors of integers smaller than 32 bit
size_t operator()(const vector<IntType> &v) const {
return std::hash<u32string>()(u32string(v.begin(),v.end()));
}
};
}
template <typename T> class IDCounter: public unordered_map<T,int> {
public:
int nextid;
vector<T> reverse;
IDCounter(int start=0) : nextid(start) {}
int insert(const T& x){
typename unordered_map<T,int>::const_iterator it(unordered_map<T,int>::find(x));
if(it != this->end()) return it->second;
else {
unordered_map<T,int>::insert(make_pair(x,nextid));
reverse.push_back(x);
return nextid++;
}
}
const T& invert(int idx) const {
assert(idx>=0 && idx<nextid);
return reverse[idx];
}
int operator()(const T& x) const {
typename unordered_map<T,int>::const_iterator it(unordered_map<T,int>::find(x));
if(it != this->end()) return it->second;
else return -1;
}
};
// C++-style getline with posix files.
bool getline(FILE *file, string& str);
|
def generateLinkedList(n):
head = Node(0)
prev = head
for i in range(1, n + 1):
node = Node(i)
prev.next = node
prev = node
return head |
<reponame>michaelsabo/DeviceAgent.iOS<filename>Server/PrivateHeaders/XCTAutomationSupport/XCTElementSortingTransformer.h<gh_stars>0
// class-dump results processed by bin/class-dump/dump.rb
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jul 30 2018 09:07:48).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by <NAME>.
//
#import <Foundation/Foundation.h>
#import <CoreGraphics/CoreGraphics.h>
#import <XCTest/XCUIElementTypes.h>
#import "CDStructures.h"
@protocol OS_dispatch_queue;
@protocol OS_xpc_object;
#import "XCTElementSetCodableTransformer.h"
@class NSArray;
@interface XCTElementSortingTransformer : XCTElementSetCodableTransformer
{
NSArray *_sortDescriptors;
}
@property(readonly, copy) NSArray *sortDescriptors;
- (id)initWithSortDescriptors:(id)arg1;
- (id)iteratorForInput:(id)arg1;
- (id)requiredKeyPathsOrError:(id *)arg1;
- (BOOL)supportsAttributeKeyPathAnalysis;
- (BOOL)supportsRemoteEvaluation;
- (id)transform:(id)arg1 relatedElements:(id *)arg2;
@end
|
#!/bin/bash
if grep -q "^net.ipv4.ip_local_port_range" /etc/sysctl.conf; then
sed -i "s/^net.ipv4.ip_local_port_range.*/net.ipv4.ip_local_port_range = 34555 36888/" /etc/sysctl.conf
else
echo "net.ipv4.ip_local_port_range = 34555 36888" >> /etc/sysctl.conf
fi
sysctl -w net.ipv4.ip_local_port_range="34555 36888"
|
<filename>main.py
if __name__ == "__main__":
import pyfbx
print(dir(pyfbx)) |
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
long sign_extend(long data, int width){
int shift = sizeof(long) * 8 - width;
return data << shift >> shift;
}
long sign_extend_safe(long data, int width){
if(width > 64 || width < 0){
exit(-1);
}
if(width == 64){
return data;
}
if((data >> (width-1)) & 0x1){
// negative
return data | ~((1UL << width) - 1);
}else{
// positive
return data & ((1UL << width) - 1);
}
}
int test_sign_extend(){
printf("0x%lx\n", sign_extend(0x8, 4));
printf("0x%lx\n", sign_extend(0x8, 5));
printf("0x%lx\n", sign_extend_safe(0x8, 4));
printf("0x%lx\n", sign_extend_safe(0x8, 5));
for(int i = 0; i<64; i++){
printf("0x%lx, ", sign_extend(0x1UL << i, i + 1));
printf("0x%lx\n", sign_extend_safe(0x1UL << i, i + 1));
}
}
int test_sub(){
long base = 0xffff000000000010;
long off = 0x10;
long unsigned_off = 0xfffffffffffffff0;
printf("off: %lx\n", off);
printf("off: %lx\n", -off);
printf("unsigned off: %lx\n", unsigned_off);
printf("base - off: %lx\n", base - off);
printf("base + unsigned off: %lx\n", base + unsigned_off);
printf("base + sign_extend(0x10): %lx\n", base + sign_extend(0x10, 5));
}
int main(){
test_sign_extend();
test_sub();
}
|
import java.util.Random;
// Impose and apply a global ordering of picking up forks: N-1, ..., 2, 1, 0 to ensure that none of the
// philosophers will try to grab the same forks with the same hands
public class DiningPhilFixed1 {
private static int N = 5;
public static void main(String[] args) throws Exception {
Philosopher1[] phils = new Philosopher1[N];
Fork1[] forks = new Fork1[N];
for (int i = 0; i < N; i++) {
forks[i] = new Fork1(i);
}
for (int i = 0; i < N; i++) {
phils[i] = new Philosopher1(i, forks[i], forks[(i + N - 1) % N]);
phils[i].start();
}
}
}
class Philosopher1 extends Thread {
private final int index;
private final Fork1 left;
private final Fork1 right;
public Philosopher1(int index, Fork1 left, Fork1 right) {
this.index = index;
this.left = left;
this.right = right;
}
public void run() {
Random randomGenerator = new Random();
try {
while (true) {
if (index == 0) {
// First philosopher grabs right then left
Thread.sleep(randomGenerator.nextInt(100)); // not sleeping but thinking
System.out.println("Phil " + index + " finishes thinking.");
right.pickup();
System.out.println("Phil " + index + " picks up right fork.");
left.pickup();
System.out.println("Phil " + index + " picks up left fork.");
Thread.sleep(randomGenerator.nextInt(100)); // eating
System.out.println("Phil " + index + " finishes eating.");
right.putdown();
System.out.println("Phil " + index + " puts down right fork.");
left.putdown();
System.out.println("Phil " + index + " puts down left fork.");
} else {
// Other philosophers follow the same old rule of grabbing left first then right
Thread.sleep(randomGenerator.nextInt(100)); // not sleeping but thinking
System.out.println("Phil " + index + " finishes thinking.");
left.pickup();
System.out.println("Phil " + index + " picks up left fork.");
right.pickup();
System.out.println("Phil " + index + " picks up right fork.");
Thread.sleep(randomGenerator.nextInt(100)); // eating
System.out.println("Phil " + index + " finishes eating.");
left.putdown();
System.out.println("Phil " + index + " puts down left fork.");
right.putdown();
System.out.println("Phil " + index + " puts down right fork.");
}
}
} catch (InterruptedException e) {
System.out.println("Don't disturb me while I am sleeping, well, thinking.");
}
}
}
class Fork1 {
private final int index;
private boolean isAvailable = true;
public Fork1(int index) {
this.index = index;
}
public synchronized void pickup() throws InterruptedException {
while (!isAvailable) {
wait();
}
isAvailable = false;
notifyAll();
}
public synchronized void putdown() throws InterruptedException {
while (isAvailable) {
wait();
}
isAvailable = true;
notifyAll();
}
public String toString() {
if (isAvailable) {
return "Fork1 " + index + " is available.";
} else {
return "Fork1 " + index + " is NOT available.";
}
}
}
|
<reponame>tenebrousedge/ruby-packer
require File.expand_path('../../../../spec_helper', __FILE__)
require File.expand_path('../../fixtures/common', __FILE__)
require File.expand_path('../closed', __FILE__)
describe :dir_path, shared: true do
it "returns the path that was supplied to .new or .open" do
dir = Dir.open DirSpecs.mock_dir
begin
dir.send(@method).should == DirSpecs.mock_dir
ensure
dir.close rescue nil
end
end
it "returns the path even when called on a closed Dir instance" do
dir = Dir.open DirSpecs.mock_dir
dir.close
dir.send(@method).should == DirSpecs.mock_dir
end
with_feature :encoding do
it "returns a String with the same encoding as the argument to .open" do
path = DirSpecs.mock_dir.force_encoding Encoding::IBM866
dir = Dir.open path
begin
dir.send(@method).encoding.should equal(Encoding::IBM866)
ensure
dir.close
end
end
end
end
|
#!/bin/bash
#
# Candy Machine CLI - Automated Test
#
# To suppress prompts, you will need to set/export the following variables:
#
# ENV_URL="mainnet-beta"
# RPC="https://ssc-dao.genesysgo.net/"
# STORAGE="arweave-sol"
#
# ENV_URL="devnet"
# RPC="https://psytrbhymqlkfrhudd.dev.genesysgo.net:8899/"
# STORAGE="arweave"
#
# ITEMS=10
# MULTIPLE=0
#
# RESET="Y"
# EXT="png"
# CLOSE="Y"
# CHANGE="Y"
# TEST_IMAGE="Y"
#
# ARWEAVE_JWK="null"
# INFURA_ID="null"
# INFURA_SECRET="null"
# AWS_BUCKET="null"
#
# The custom RPC server option can be specified either by the flag -r <url>
CURRENT_DIR=$(pwd)
SCRIPT_DIR=$(cd -- $(dirname -- "${BASH_SOURCE[0]}") &>/dev/null && pwd)
PARENT_DIR="$(dirname "$SCRIPT_DIR")"
ASSETS_DIR=$SCRIPT_DIR/assets
CACHE_DIR=$SCRIPT_DIR/.cache
SRC_DIR=$PARENT_DIR/src
CMD_CMV2="ts-node ${SRC_DIR}/candy-machine-v2-cli.ts"
# Remote files to test the upload
PNG="https://arweave.net/izpWaFnueKtbRg4TY-CkUYQtwSzPNit3ZvQPY5hOK7E/?ext=png"
GIF="https://arweave.net/3I50hy1dHhRwyxtKPL60WIl4kV0rqjnl7t_DcZPAp2o/?ext=gif"
JPG="https://arweave.net/-KqqzJLtD8Pug-aCjbV6RWbGhfB74MBT71afqGFKYHA/?ext=jpg"
MP4="https://arweave.net/kM6fxv3Qj_Gcn8tcq9dU8wpZAXHNEWvEfVoIpRJzg8c/?ext=mp4"
# Metadata URL for large (max) collection tests
METADATA_URL="https://arweave.net/kM6fxv3Qj_Gcn8tcq9dU8wpZAXHNEWvEfVoIpRJzg8c"
# output colours
RED() { echo $'\e[1;31m'$1$'\e[0m'; }
GRN() { echo $'\e[1;32m'$1$'\e[0m'; }
BLU() { echo $'\e[1;34m'$1$'\e[0m'; }
MAG() { echo $'\e[1;35m'$1$'\e[0m'; }
CYN() { echo $'\e[1;36m'$1$'\e[0m'; }
# default test templates
function default_settings {
MANUAL_CACHE="n"
ITEMS=10
MULTIPLE=0
RESET="Y"
EXT="png"
CLOSE="Y"
CHANGE="Y"
TEST_IMAGE="Y"
ARWEAVE_JWK="null"
INFURA_ID="null"
INFURA_SECRET="null"
AWS_BUCKET="null"
}
# default test templates
function max_settings {
MANUAL_CACHE="Y"
ITEMS=40000
MULTIPLE=39999
RESET="Y"
EXT="png"
CLOSE="Y"
CHANGE="n"
TEST_IMAGE="n"
ARWEAVE_JWK="null"
INFURA_ID="null"
INFURA_SECRET="null"
AWS_BUCKET="null"
}
function mainnet_env {
ENV_URL="mainnet-beta"
RPC="https://ssc-dao.genesysgo.net/"
STORAGE="arweave-sol"
}
function devnet_env {
ENV_URL="devnet"
RPC="https://psytrbhymqlkfrhudd.dev.genesysgo.net:8899/"
STORAGE="arweave"
}
#-----------------------------------------------------------------------------#
# SETUP #
#-----------------------------------------------------------------------------#
echo ""
CYN "Candy Machine v2 - CLI Automated Tests"
CYN "--------------------------------------"
echo ""
CYN "Test template:"
echo "1. interactive"
echo "2. devnet standard (default)"
echo "3. mainnet-beta standard"
echo "4. devnet (40k)"
echo -n "$(CYN "Select test template [1-4]") (default 'devnet standard'): "
read Template
case "$Template" in
1)
echo ""
echo "[$(date "+%T")] Starting interactive test"
;;
3)
mainnet_env
default_settings
;;
4)
devnet_env
max_settings
;;
*)
devnet_env
default_settings
;;
esac
# Environment
if [ -z ${ENV_URL+x} ]; then
ENV_URL="devnet"
echo ""
CYN "Environment:"
echo "1. devnet (default)"
echo "2. mainnet-beta"
echo -n "$(CYN "Select the environment [1-2]") (default 'devnet'): "
read Input
case "$Input" in
1) ENV_URL="devnet" ;;
2) ENV_URL="mainnet-beta" ;;
esac
fi
# RPC server can be specified from the command-line with the flag "-r"
# Otherwise the default public one will be used
if [ -z ${RPC+x} ]; then
RPC="https://api.${ENV_URL}.solana.com"
fi
while getopts r: flag; do
case "${flag}" in
r) RPC=${OPTARG} ;;
esac
done
# Storage
if [ -z ${STORAGE+x} ]; then
STORAGE="arweave"
echo ""
CYN "Storage type:"
echo "1. arweave-bundle"
echo "2. arweave-sol"
echo "3. arweave (default)"
echo "4. ipfs"
echo "5. aws"
echo -n "$(CYN "Select the storage type [1-5]") (default 3): "
read Input
case "$Input" in
1) STORAGE="arweave-bundle" ;;
2) STORAGE="arweave-sol" ;;
3) STORAGE="arweave" ;;
4) STORAGE="ipfs" ;;
5) STORAGE="aws" ;;
esac
fi
if [ -z ${ARWEAVE_JWK+x} ]; then
ARWEAVE_JWK="null"
if [ "$STORAGE" = "arweave-bundle" ]; then
echo -n $(CYN "Arweave JWK wallet file: ")
read ARWEAVE_JWK
fi
fi
if [ -z ${INFURA_ID+x} ]; then
INFURA_ID="null"
INFURA_SECRET="null"
if [ "$STORAGE" = "ipfs" ]; then
echo -n $(CYN "Infura Project ID: ")
read INFURA_ID
echo -n $(CYN "Infura Secret: ")
read INFURA_SECRET
fi
fi
if [ -z ${AWS_BUCKET+x} ]; then
AWS_BUCKET="null"
if [ "$STORAGE" = "aws" ]; then
echo -n $(CYN "AWS bucket name: ")
read AWS_BUCKET
fi
fi
# Asset type
ANIMATION=0
if [ -z ${EXT+x} ]; then
IMAGE=$PNG
EXT="png"
echo ""
CYN "Asset type:"
echo "1. PNG (default)"
echo "2. JPG"
echo "3. GIF"
echo "4. MP4"
echo -n "$(CYN "Select the file type [1-4]") (default 1): "
read Input
case "$Input" in
1)
IMAGE=$PNG
EXT="png"
;;
2)
IMAGE=$JPG
EXT="jpg"
;;
3)
IMAGE=$GIF
EXT="gif"
;;
4)
IMAGE=$PNG
EXT="png"
ANIMATION=1
;;
esac
else
case "$EXT" in
png)
IMAGE=$PNG
;;
jpg)
IMAGE=$JPG
;;
gif)
IMAGE=$GIF
;;
mp4)
IMAGE=$PNG
EXT="png"
ANIMATION=1
;;
*)
RED "[$(date "+%T")] Aborting: invalid asset type ${EXT}"
exit 1
;;
esac
fi
# Collection size
if [ -z ${ITEMS+x} ]; then
echo ""
echo -n "$(CYN "Number of items") (default 10): "
read Number
if [ -z "$Number" ]; then
ITEMS=10
else
# make sure we are dealing with a number
ITEMS=$(($Number + 0))
fi
fi
# Test image.extension instead of index
if [ -z ${TEST_IMAGE+x} ]; then
echo ""
echo -n "$(CYN "Test image.ext replacement [Y/n]") (default 'Y'): "
read TEST_IMAGE
if [ -z "$TEST_IMAGE" ]; then
TEST_IMAGE="Y"
fi
fi
# Test reupload
if [ -z ${CHANGE+x} ]; then
echo ""
echo -n "$(CYN "Test reupload [Y/n]") (default 'Y'): "
read CHANGE
if [ -z "$CHANGE" ]; then
CHANGE="Y"
fi
fi
# Mint multiple tokens
if [ -z ${MULTIPLE+x} ]; then
echo ""
echo -n "$(CYN "Number of multiple tokens to mint") (default 0): "
read Number
if [ -z "$Number" ]; then
MULTIPLE=0
else
# make sure we are dealing with a number
MULTIPLE=$(($Number + 0))
fi
fi
# Clean up
if [ -z ${RESET+x} ]; then
echo ""
echo -n "$(CYN "Remove previous cache and assets [Y/n]") (default 'Y'): "
read RESET
if [ -z "$RESET" ]; then
RESET="Y"
fi
fi
if [ -z ${CLOSE+x} ]; then
echo ""
echo -n "$(CYN "Close candy machine and withdraw funds at the end [Y/n]") (default 'Y'): "
read CLOSE
if [ -z "$CLOSE" ]; then
CLOSE="Y"
fi
fi
echo ""
#-----------------------------------------------------------------------------#
# SETTING UP #
#-----------------------------------------------------------------------------#
# removes temporary files
function clean_up {
rm $CONFIG_FILE 2>/dev/null
rm -rf $ASSETS_DIR 2>/dev/null
rm -rf .cache 2>/dev/null
}
if [ "${RESET}" = "Y" ]; then
echo "[$(date "+%T")] Removing previous cache and assets"
clean_up
fi
# Wallet keypair file
WALLET_KEY="$(solana config get keypair | cut -d : -f 2)"
CACHE_NAME="test"
CACHE_FILE="${CURRENT_DIR}/.cache/${ENV_URL}-${CACHE_NAME}.json"
LAST_INDEX=$((ITEMS - 1))
TIMESTAMP=`date "+%d/%m/%y %T"`
# preparing the assets metadata
read -r -d '' METADATA <<-EOM
{
"name": "[$TIMESTAMP] Test #%s",
"symbol": "TEST",
"description": "Candy Machine CLI Test #%s",
"seller_fee_basis_points": 500,
"image": "%s.%s", %b
"attributes": [{"trait_type": "Background", "value": "True"}],
"properties": {
"creators": [
{
"address": "$(solana address)",
"share": 100
}],
"files": []
}
}
EOM
# Creation of the collection. This will generate ITEMS x (json, image)
# files in the ASSETS_DIR
if [ ! -d $ASSETS_DIR ]; then
mkdir $ASSETS_DIR
# loads the animation asset
if [ "$ANIMATION" -eq 1 ]; then
curl -L -s $MP4 >"$ASSETS_DIR/template_animation.mp4"
SIZE=$(wc -c "$ASSETS_DIR/template_animation.mp4" | grep -oE '[0-9]+' | head -n 1)
if [ $SIZE -eq 0 ]; then
RED "[$(date "+%T")] Aborting: could not download sample mp4"
exit 1
fi
fi
curl -L -s $IMAGE >"$ASSETS_DIR/template_image.$EXT"
SIZE=$(wc -c "$ASSETS_DIR/template_image.$EXT" | grep -oE '[0-9]+' | head -n 1)
if [ $SIZE -eq 0 ]; then
RED "[$(date "+%T")] Aborting: could not download sample image"
exit 1
fi
# initialises the assets - this will be multiple copies of the same
# image/json pair with a new index
INDEX="image"
for ((i = 0; i < $ITEMS; i++)); do
if [ ! "$TEST_IMAGE" = "Y" ]; then
INDEX=$i
fi
NAME=$(($i + 1))
cp "$ASSETS_DIR/template_image.$EXT" "$ASSETS_DIR/$i.$EXT"
if [ "$ANIMATION" = 1 ]; then
cp "$ASSETS_DIR/template_animation.mp4" "$ASSETS_DIR/$i.mp4"
printf "$METADATA" $NAME $NAME $INDEX $EXT "\n\t\"animation_url\": \"$i.mp4\"," >"$ASSETS_DIR/$i.json"
else
printf "$METADATA" $NAME $NAME $INDEX $EXT "" >"$ASSETS_DIR/$i.json"
fi
done
rm "$ASSETS_DIR/template_image.$EXT"
# quietly removes the animation template (it might not exist)
rm -f "$ASSETS_DIR/template_animation.mp4"
fi
if [ "$MANUAL_CACHE" = "Y" ]; then
if [ ! -d $CACHE_DIR ]; then
mkdir $CACHE_DIR
echo -n "{\"program\":{\"uuid\":\"\", \"candyMachine\":\"\"}, \"items\":{" >> $CACHE_FILE
for ((i = 0; i < $ITEMS; i++)); do
if [ "$i" -gt "0" ]; then
echo -n "," >> $CACHE_FILE
fi
NAME=$(($i + 1))
echo -n "\"$i\":{\"link\":\"$METADATA_URL\",\"name\":\"[$TIMESTAMP] Test #$NAME\",\"onChain\":false}" >> $CACHE_FILE
done
echo -n "},\"env\":\"$ENV_URL\", \"cacheName\": \"$CACHE_NAME\"}" >> $CACHE_FILE
fi
fi
# Candy Machine configuration
CONFIG_FILE="config.json"
cat >$CONFIG_FILE <<-EOM
{
"price": 0.1,
"number": $ITEMS,
"gatekeeper": null,
"solTreasuryAccount": "$(solana address)",
"splTokenAccount": null,
"splToken": null,
"goLiveDate": "$(date "+%d %b %Y %T %Z")",
"endSettings": null,
"whitelistMintSettings": null,
"hiddenSettings": null,
"storage": "${STORAGE}",
"arweaveJwk": "${ARWEAVE_JWK}",
"ipfsInfuraProjectId": "${INFURA_ID}",
"ipfsInfuraSecret": "${INFURA_SECRET}",
"awsS3Bucket": "${AWS_BUCKET}",
"noRetainAuthority": false,
"noMutable": false
}
EOM
# edit cache file for reupload
function change_cache {
cat $CACHE_FILE | jq -c ".items.\"0\".onChain=false|.items.\"0\".name=\"Changed #0\"|del(.items.\""$LAST_INDEX"\")" \
>$CACHE_FILE.tmp && mv $CACHE_FILE.tmp $CACHE_FILE
if [[ $(cat $CACHE_FILE | grep "Changed #0") ]]; then
GRN "Success: cache file changed"
else
RED "Failure: cache file was not changed"
fi
}
# run the verify upload command
function verify_upload {
$CMD_CMV2 verify_upload --keypair $WALLET_KEY --env $ENV_URL -c $CACHE_NAME -r $RPC
EXIT_CODE=$?
if [ ! $EXIT_CODE -eq 0 ]; then
MAG "<<<"
RED "[$(date "+%T")] Aborting: verify upload failed"
exit 1
fi
}
# run the upload command
function upload {
$CMD_CMV2 upload -cp ${CONFIG_FILE} --keypair $WALLET_KEY --env $ENV_URL -c $CACHE_NAME -r $RPC $ASSETS_DIR
EXIT_CODE=$?
if [ ! $EXIT_CODE -eq 0 ]; then
MAG "<<<"
RED "[$(date "+%T")] Aborting: upload failed"
exit 1
fi
}
#-----------------------------------------------------------------------------#
# COMMAND EXECUTION #
#-----------------------------------------------------------------------------#
if [ "${CHANGE}" = "Y" ] && [ "$(command -v jq)" = "" ]; then
RED "[$(date "+%T")] Required 'jq' command could not be found, skipping reupload"
CHANGE="n"
fi
echo "[$(date "+%T")] Environment: ${ENV_URL}"
echo "[$(date "+%T")] RPC URL: ${RPC}"
echo "[$(date "+%T")] Testing started using ${STORAGE} storage"
echo ""
CYN "1. Uploading assets and creating the candy machine"
echo ""
MAG ">>>"
upload
MAG "<<<"
echo ""
CYN "2. Verifying upload"
echo ""
MAG ">>>"
verify_upload
MAG "<<<"
echo ""
if [ "${CHANGE}" = "Y" ]; then
CYN "3. Editing cache and testing reupload"
echo ""
MAG ">>>"
change_cache
upload
verify_upload
MAG "<<<"
else
CYN "3. Editing cache and testing reupload (Skipped)"
fi
echo ""
CYN "4. Minting"
echo ""
echo "mint_one_token $(MAG ">>>")"
$CMD_CMV2 mint_one_token --keypair $WALLET_KEY --env $ENV_URL -c $CACHE_NAME -r $RPC
EXIT_CODE=$?
MAG "<<<"
if [ ! $EXIT_CODE -eq 0 ]; then
RED "[$(date "+%T")] Aborting: mint failed"
exit 1
fi
if [ "${MULTIPLE}" -gt 0 ]; then
echo ""
echo "mint_multiple_tokens $(MAG ">>>")"
$CMD_CMV2 mint_multiple_tokens --keypair $WALLET_KEY --env $ENV_URL -c $CACHE_NAME -r $RPC -n $MULTIPLE
EXIT_CODE=$?
MAG "<<<"
if [ ! $EXIT_CODE -eq 0 ]; then
RED "[$(date "+%T")] Aborting: mint multiple tokens failed"
exit 1
fi
fi
if [ "${CLOSE}" = "Y" ]; then
echo ""
CYN "5. Withdrawing CM funds and clean up"
echo ""
MAG ">>>"
$CMD_CMV2 withdraw_all -cp ${CONFIG_FILE} --keypair $WALLET_KEY --env $ENV_URL -c $CACHE_NAME -r $RPC
EXIT_CODE=$?
MAG "<<<"
if [ ! $EXIT_CODE -eq 0 ]; then
RED "[$(date "+%T")] Aborting: withdraw failed"
exit 1
fi
clean_up
fi
echo ""
echo "[$(date "+%T")] Test completed" |
package hackernews
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
)
const (
baseURI = "https://hacker-news.firebaseio.com"
apiVersion = "v0"
defaultTimeout = 15 * time.Second
)
// New creates a new hackernews client using the given http client
// If no http client is provided, will use a plain http client with a
// 15 second timeout
func New(c *http.Client) Hackernews {
if c != nil {
return &hackernews{
client: c,
}
}
return &hackernews{
client: &http.Client{
Timeout: defaultTimeout,
},
}
}
// Hackernews is an interface for interacting with the hackernews public API
type Hackernews interface {
GetItem(id int) (*Item, error)
GetUser(id string) (*User, error)
MaxItemID() (int, error)
TopStories() ([]int, error)
NewStories() ([]int, error)
AskStories() ([]int, error)
ShowStories() ([]int, error)
JobStories() ([]int, error)
}
type hackernews struct {
client *http.Client
}
func (hn *hackernews) GetItem(id int) (*Item, error) {
var story Item
url := fmt.Sprintf("%s/%s/item/%d.json", baseURI, apiVersion, id)
res, err := http.Get(url)
if err != nil {
return nil, err
}
if err := json.NewDecoder(res.Body).Decode(&story); err != nil {
return nil, err
}
return &story, nil
}
// GetUser will get user by id
func (hn *hackernews) GetUser(id string) (*User, error) {
var user User
url := fmt.Sprintf("%s/%s/user/%s.json", baseURI, apiVersion, id)
res, err := http.Get(url)
if err != nil {
return nil, err
}
if err := json.NewDecoder(res.Body).Decode(&user); err != nil {
return nil, err
}
return &user, nil
}
func (hn *hackernews) MaxItemID() (int, error) {
res, err := hn.client.Get(fmt.Sprintf("%s/%s/maxitem.json", baseURI, apiVersion))
if err != nil {
return 0, err
}
switch res.StatusCode {
case http.StatusOK:
resBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
maxItemCount, err := strconv.Atoi(string(resBytes))
if err != nil {
return 0, err
}
return maxItemCount, nil
default:
return 0, errors.New("client returned code " + string(res.StatusCode) + ":" + res.Status)
}
}
func (hn *hackernews) TopStories() ([]int, error) {
url := fmt.Sprintf("%s/%v/topstories.json", baseURI, apiVersion)
res, err := http.Get(url)
if err != nil {
return nil, err
}
var storyIDs []int
if err := json.NewDecoder(res.Body).Decode(&storyIDs); err != nil {
return nil, err
}
return storyIDs, nil
}
func (hn *hackernews) NewStories() ([]int, error) {
url := fmt.Sprintf("%s/%v/newstories.json", baseURI, apiVersion)
res, err := http.Get(url)
if err != nil {
return nil, err
}
var storyIDs []int
if err := json.NewDecoder(res.Body).Decode(&storyIDs); err != nil {
return nil, err
}
return storyIDs, nil
}
func (hn *hackernews) AskStories() ([]int, error) {
url := fmt.Sprintf("%s/%v/askstories.json", baseURI, apiVersion)
res, err := http.Get(url)
if err != nil {
return nil, err
}
var storyIDs []int
if err := json.NewDecoder(res.Body).Decode(&storyIDs); err != nil {
return nil, err
}
return storyIDs, nil
}
func (hn *hackernews) ShowStories() ([]int, error) {
url := fmt.Sprintf("%s/%v/showstories.json", baseURI, apiVersion)
res, err := http.Get(url)
if err != nil {
return nil, err
}
var storyIDs []int
if err := json.NewDecoder(res.Body).Decode(&storyIDs); err != nil {
return nil, err
}
return storyIDs, nil
}
func (hn *hackernews) JobStories() ([]int, error) {
url := fmt.Sprintf("%s/%v/jobstories.json", baseURI, apiVersion)
res, err := http.Get(url)
if err != nil {
return nil, err
}
var storyIDs []int
if err := json.NewDecoder(res.Body).Decode(&storyIDs); err != nil {
return nil, err
}
return storyIDs, nil
}
|
<filename>src/types.cpp
#include <string>
#include <math.h>
#include <iostream>
using namespace std;
#include "../include/node.h"
#include "../include/variables.hpp"
#include "../include/types.h"
#include "../include/usefull.h"
#include "../include/conversion.h"
#include "../include/types_check.h"
bool explicit_variable(string v)
{
if (is_string(v) || is_int(v) || is_bool(v) || is_float(v) || is_none(v) || is_list(v))
{
return true;
}
return false;
}
string get_string_content(string word)
{
return get_from_string(word, 1, int(word.size()) - 1);
}
string get_type(string expr)
{
if (explicit_variable(expr))
{
if (is_int(expr))
{
return "int";
}
else if (is_float(expr))
{
return "float";
}
else if (is_bool(expr))
{
return "bool";
}
else if (is_string(expr))
{
return "string";
}
else if (is_list(expr))
{
return "list";
}
else
return "none";
}
else
return "none";
}
Mtmc_variable *convert_to_spec(Mtmc_variable *start, string result_type)
{
Mtmc_variable *full_result = new_mtmc_variable_from_string("none");
string current_type = start->type;
if (current_type == "int")
{
int64_t content = *(int64_t *)start->content;
if (result_type == "string")
{
string *result = new string(to_string(to_string(content)));
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
if (result_type == "float")
{
// cast the value content to a float
float *result = new float(content);
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
string err = "cannot convert type '" + current_type + "' to type '" + result_type + "'";
Error("execution", err);
}
if (current_type == "float")
{
float content = *(float *)start->content;
if (result_type == "string")
{
string *result = new string(to_string(content));
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
if (result_type == "int")
{
int64_t *result = new int64_t(content);
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
string err = "cannot convert type '" + current_type + "' to type '" + result_type + "'";
Error("execution", err);
}
if (current_type == "bool")
{
string content = *(string *)start->content;
if (result_type == "string")
{
string *result = new string(to_string(content));
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
string err = "cannot convert type '" + current_type + "' to type '" + result_type + "'";
Error("execution", err);
}
if (current_type == "list")
{
Mtmc_listed *content = (Mtmc_listed *)start->content;
if (result_type == "string")
{
string *result = new string(to_string(Mtmc_listed_string(content)));
full_result->type = result_type;
full_result->content = (void *)result;
return full_result;
}
string err = "cannot convert type '" + current_type + "' to type '" + result_type + "'";
Error("execution", err);
}
// On est obligé de lancer une erreur sinon on aurais juste des variables de type 'none' qui se baladent partout
string err = "cannot convert type '" + current_type + "' to type '" + result_type + "'";
Error("execution", err);
return full_result;
}; |
<reponame>ibelem/wasm
#include<stdio.h>
int add(int a, int b){
return a + b;
}
int main()
{
printf("%d",add(1, 2));
} |
#include <iostream>
// Define a sample class for testing the SmartPointer
class Sample {
public:
void display() {
std::cout << "Sample class display function" << std::endl;
}
};
int main() {
// Create a raw pointer to a Sample object
Sample* rawPtr = new Sample();
// Create a SmartPointer object using the raw pointer
SmartPointer<Sample> sp(rawPtr);
// Access the value of the pointed object using the dereference operator
(*sp).display();
// Access a member of the pointed object using the member access operator
sp->display();
return 0;
} |
pub trait ConfigSetting {
const KEY: &'static str;
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct TmpPathDefaultSetting;
impl ConfigSetting for TmpPathDefaultSetting {
const KEY: &'static str = "tmp.path";
} |
#!/bin/bash
python setup.py sdist bdist_wheel
twine upload dist/*
|
<reponame>schinmayee/nimbus
//#####################################################################
// Copyright 2002-2006, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Class HEXAHEDRALIZED_VOLUME
//#####################################################################
#include <PhysBAM_Tools/Arrays/INDIRECT_ARRAY.h>
#include <PhysBAM_Tools/Data_Structures/HASHTABLE.h>
#include <PhysBAM_Tools/Grids_Uniform/GRID.h>
#include <PhysBAM_Tools/Log/LOG.h>
#include <PhysBAM_Tools/Math_Tools/RANGE.h>
#include <PhysBAM_Geometry/Basic_Geometry/HEXAHEDRON.h>
#include <PhysBAM_Geometry/Spatial_Acceleration/BOX_HIERARCHY.h>
#include <PhysBAM_Geometry/Topology_Based_Geometry/HEXAHEDRALIZED_VOLUME.h>
#include <PhysBAM_Geometry/Topology_Based_Geometry/TETRAHEDRALIZED_VOLUME.h>
#include <PhysBAM_Geometry/Topology_Based_Geometry/TRIANGULATED_SURFACE.h>
#include <PhysBAM_Geometry/Topology_Based_Geometry_Computations/HEXAHEDRALIZED_VOLUME_REFRESH.h>
using namespace PhysBAM;
//#####################################################################
// Constructor
//#####################################################################
template<class T> HEXAHEDRALIZED_VOLUME<T>::
HEXAHEDRALIZED_VOLUME(HEXAHEDRON_MESH& mesh_input,GEOMETRY_PARTICLES<TV>& particles_input)
:MESH_OBJECT<TV,HEXAHEDRON_MESH>(mesh_input,particles_input),hexahedron_list(0),tetrahedralized_volume(0),triangulated_surface(0),hierarchy(0)
{}
//#####################################################################
// Destructor
//#####################################################################
template<class T> HEXAHEDRALIZED_VOLUME<T>::
~HEXAHEDRALIZED_VOLUME()
{
Clean_Memory();
}
//#####################################################################
// Function Clean_Memory
//#####################################################################
template<class T> void HEXAHEDRALIZED_VOLUME<T>::
Clean_Memory()
{
MESH_OBJECT<TV,HEXAHEDRON_MESH>::Clean_Memory();
delete hexahedron_list;hexahedron_list=0;
delete tetrahedralized_volume;tetrahedralized_volume=0;
delete triangulated_surface;triangulated_surface=0;
delete hierarchy;hierarchy=0;
}
//#####################################################################
// Function Update_Hexahedron_List
//#####################################################################
template<class T> void HEXAHEDRALIZED_VOLUME<T>::
Update_Hexahedron_List()
{
TOPOLOGY_BASED_GEOMETRY_COMPUTATIONS::Update_Hexahedron_List(*this);
}
//#####################################################################
// Funcion Initialize_Tetrahedralized_Volume
//#####################################################################
template<class T> void HEXAHEDRALIZED_VOLUME<T>::
Initialize_Tetrahedralized_Volume()
{
TOPOLOGY_BASED_GEOMETRY_COMPUTATIONS::Initialize_Tetrahedralized_Volume(*this);
}
//#####################################################################
// Funcion Initialize_Cube_Mesh_And_Particles
//#####################################################################
template<class T> void HEXAHEDRALIZED_VOLUME<T>::
Initialize_Cube_Mesh_And_Particles(const GRID<TV>& grid)
{
TOPOLOGY_BASED_GEOMETRY_COMPUTATIONS::Initialize_Cube_Mesh_And_Particles(*this,grid);
}
//#####################################################################
// Funcion Total_Volume
//#####################################################################
template<class T> T HEXAHEDRALIZED_VOLUME<T>::
Total_Volume() const
{
T volume=0;
for(int h=1;h<=mesh.elements.m;h++){int p1,p2,p3,p4,p5,p6,p7,p8;mesh.elements(h).Get(p1,p2,p3,p4,p5,p6,p7,p8);
volume+=HEXAHEDRON<T>::Signed_Volume(particles.X(p1),particles.X(p2),particles.X(p3),particles.X(p4),particles.X(p5),particles.X(p6),particles.X(p7),particles.X(p8));}
return volume;
}
//#####################################################################
// Funcion Initialize_Triangulated_Surface
//#####################################################################
template<class T> void HEXAHEDRALIZED_VOLUME<T>::
Initialize_Triangulated_Surface()
{
mesh.Initialize_Boundary_Mesh();
triangulated_surface=new TRIANGULATED_SURFACE<T>(*mesh.boundary_mesh,particles);
}
//#####################################################################
template class HEXAHEDRALIZED_VOLUME<float>;
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
template class HEXAHEDRALIZED_VOLUME<double>;
#endif
|
package main
import (
"fmt"
)
func isPalindrome(n int) bool {
var str []int
for n != 0 {
str = append(str, n%10)
n /= 10
}
for i, j := 0, len(str)-1; i < j; i, j = i+1, j-1 {
if str[i] != str[j] {
return false
}
}
return true
}
func biggestN(n int) int {
ret := 1
for i := 0; i < n; i++ {
ret *= 10
}
return ret - 1
}
func smallestN(n int) int {
if n <= 1 {
return 0
}
ret := 1
for i := 0; i < n-1; i++ {
ret *= 10
}
return ret
}
func largestPalindrome(n int) int {
for i := biggestN(n); i >= smallestN(n); i-- {
for j := i; j >= smallestN(n); j-- {
product := i * j
if isPalindrome(product) {
return product % 1337
}
}
}
return 9
}
func main() {
fmt.Println("hello world!")
fmt.Println(largestPalindrome(3))
fmt.Println(isPalindrome(131))
}
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
sh_ver="1.0.26"
filepath=$(cd "$(dirname "$0")"; pwd)
file=$(echo -e "${filepath}"|awk -F "$0" '{print $1}')
ssr_folder="/usr/local/shadowsocksr"
config_file="${ssr_folder}/config.json"
config_user_file="${ssr_folder}/user-config.json"
config_user_api_file="${ssr_folder}/userapiconfig.py"
config_user_mudb_file="${ssr_folder}/mudb.json"
ssr_log_file="${ssr_folder}/ssserver.log"
Libsodiumr_file="/usr/local/lib/libsodium.so"
Libsodiumr_ver_backup="1.0.17"
jq_file="${ssr_folder}/jq"
source /etc/os-release
OS=$ID
ver=$VERSION_ID
Green_font_prefix="\033[32m" && Red_font_prefix="\033[31m" && Green_background_prefix="\033[42;37m" && Red_background_prefix="\033[41;37m" && Font_color_suffix="\033[0m"
Info="${Green_font_prefix}[information]${Font_color_suffix}"
Error="${Red_font_prefix}[error]${Font_color_suffix}"
Tip="${Green_font_prefix}[note]${Font_color_suffix}"
Separator_1="——————————————————————————————"
check_pid(){
PID=`ps -ef |grep -v grep | grep server.py |awk '{print $2}'`
}
Add_iptables(){
iptables -I INPUT -m state --state NEW -m tcp -p tcp --dport 1443:1543 -j ACCEPT
iptables -I INPUT -m state --state NEW -m udp -p udp --dport 1443:1543 -j ACCEPT
}
Save_iptables(){
if [[ ${OS} == "centos" ]]; then
service iptables save
service ip6tables save
else
iptables-save > /etc/iptables.up.rules
fi
}
Set_iptables(){
if [[ ${OS} == "centos" ]]; then
service iptables save
service ip6tables save
chkconfig --level 2345 iptables on
chkconfig --level 2345 ip6tables on
else
iptables-save > /etc/iptables.up.rules
echo -e '#!/bin/bash\n/sbin/iptables-restore < /etc/iptables.up.rules\n/sbin/ip6tables-restore < /etc/ip6tables.up.rules' > /etc/network/if-pre-up.d/iptables
chmod +x /etc/network/if-pre-up.d/iptables
fi
}
Set_user_api_server_pub_addr(){
ip=$(wget -qO- ipv4.icanhazip.com);
ssr_server_pub_addr="${ip}"
}
Modify_user_api_server_pub_addr(){
sed -i "s/SERVER_PUB_ADDR = '${server_pub_addr}'/SERVER_PUB_ADDR = '${ssr_server_pub_addr}'/" ${config_user_api_file}
}
Check_python(){
if [[ ${OS} == "centos" ]]; then
if [[ $ver == '7' ]]; then
yum -y install python
elif [[ $ver == '8' ]]; then
yum install -y python2
alternatives --set python /usr/bin/python2
fi
else
apt-get install -y python
fi
}
Centos_yum(){
yum update
cat /etc/redhat-release |grep 7\..*|grep -i centos>/dev/null
if [[ $? = 0 ]]; then
yum install -y vim unzip crond net-tools git
else
yum install -y vim unzip crond git
fi
}
Debian_apt(){
apt-get update
apt-get install -y vim unzip cron git net-tools
}
Download_SSR(){
cd "/usr/local"
git clone -b akkariiin/master https://github.com/shadowsocksrr/shadowsocksr.git
cd "shadowsocksr"
cp "${ssr_folder}/config.json" "${config_user_file}"
cp "${ssr_folder}/mysql.json" "${ssr_folder}/usermysql.json"
cp "${ssr_folder}/apiconfig.py" "${config_user_api_file}"
sed -i "s/API_INTERFACE = 'sspanelv2'/API_INTERFACE = 'mudbjson'/" ${config_user_api_file}
server_pub_addr="127.0.0.1"
Modify_user_api_server_pub_addr
sed -i 's/ \/\/ only works under multi-user mode//g' "${config_user_file}"
}
Service_SSR(){
if [[ ${OS} = "centos" ]]; then
wget --no-check-certificate https://raw.githubusercontent.com/hybtoy/ssrrmu/master/ssrmu_centos -O /etc/init.d/ssrmu
chmod +x /etc/init.d/ssrmu
chkconfig --add ssrmu
chkconfig ssrmu on
else
wget --no-check-certificate https://raw.githubusercontent.com/hybtoy/ssrrmu/master/ssrmu_debian -O /etc/init.d/ssrmu
chmod +x /etc/init.d/ssrmu
update-rc.d -f ssrmu defaults
fi
}
JQ_install(){
cd "${ssr_folder}"
wget --no-check-certificate "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" -O ${jq_file}
chmod +x ${jq_file}
}
Installation_dependency(){
if [[ ${OS} == "centos" ]]; then
Centos_yum
service crond restart
else
Debian_apt
/etc/init.d/cron restart
fi
}
Start_SSR(){
check_pid
wget -O /etc/init.d/ssrmu "https://raw.githubusercontent.com/Dork96/Final/main/ssrmu"
/etc/init.d/ssrmu start
}
Install_SSR(){
Set_user_api_server_pub_addr
Check_python
Installation_dependency
Download_SSR
Service_SSR
JQ_install
Set_iptables
Add_iptables
Save_iptables
Start_SSR
}
Install_SSR
wget -O /usr/bin/ssr https://raw.githubusercontent.com/Dork96/Final/main/ssrmu.sh && chmod +x /usr/bin/ssr
wget -O /usr/bin/add-ssr https://raw.githubusercontent.com/Dork96/Final/main/add-ssr.sh && chmod +x /usr/bin/add-ssr
wget -O /usr/bin/del-ssr https://raw.githubusercontent.com/Dork96/Final/main/del-ssr.sh && chmod +x /usr/bin/del-ssr
wget -O /usr/bin/renew-ssr https://raw.githubusercontent.com/Dork96/Final/main/renew-ssr.sh && chmod +x /usr/bin/renew-ssr
touch /usr/local/shadowsocksr/akun.conf
rm -f /root/ssr.sh
echo -e "Instaled SSR Succes..."
|
const graphql = require(`graphql`);
const { User, Topic, UserGroup, UserEvent, UserTopic } = require(`../db/models/index`);
const {
GraphQLObjectType, GraphQLSchema, GraphQLString, GraphQLID, GraphQLInt, GraphQLList
} = graphql;
const { UserType, UserTopicType, TopicType, GroupType, EventType } = require(`./types.js`)
const Mutation = require('./mutations')
const RootQuery = new GraphQLObjectType({
name: `RootQueryType`,
fields: {
user: { // this names the query for frontend usage
type: UserType,
args: { id: { type: GraphQLID } },
resolve(root, args) {
return User.findById(args.id)
}
},
topic: { // this names the query for frontend usage
type: TopicType,
args: { id: { type: GraphQLID } }, // what you'll use to look up individual topics
resolve(root, args) {
return Topic.findById(args.id);
},
},
group: {
type: GroupType,
args: { id: { type: GraphQLID } },
resolve(root, args) {
return UserGroup.findById(args.id);
},
},
userTopic: {
type: UserTopicType,
args: { id: { type: GraphQLID } },
resolve(root, args) {
return UserTopic.findById(args.userId);
},
},
event: {
type: EventType,
args: { id: { type: GraphQLID } },
resolve(root, args) {
return UserEvent.findById(args.id);
},
},
events: {
type: new GraphQLList(EventType),
args: { userId: { type: GraphQLID } },
async resolve (root, args) {
const events = await UserEvent.findAll({where: {userId: args.userId}});
const sortedEvents = events.sort((a,b) => new Date(a.date) - new Date(b.date))
return sortedEvents
},
},
},
});
module.exports = new GraphQLSchema({
query: RootQuery,
mutation: Mutation
});
|
<script>
// Set the dimensions of the canvas / graph
var margin = {top: 30, right: 20, bottom: 30, left: 50},
width = 600 - margin.left - margin.right,
height = 270 - margin.top - margin.bottom;
// Parse the data
var data = d3.csv.parse(`
Category,Value
X,200
Y,100
Z,50
`);
// Set the ranges
var x = d3.scale.ordinal().rangeRoundBands([0, width], 0.05);
var y = d3.scale.linear().range([height, 0]);
// Define the axis
var xAxis = d3.svg.axis()
.scale(x)
.orient('bottom')
var yAxis = d3.svg.axis()
.scale(y)
.orient('left')
.ticks(10);
// Add the SVG canvas
var svg = d3.select('body').append('svg')
.attr('width', width + margin.left + margin.right)
.attr('height', height + margin.top + margin.bottom)
.append('g')
.attr('transform',
'translate(' + margin.left + ',' + margin.top + ')');
// Scale the range of the data
x.domain(data.map(function(d) {return d.Category; }));
y.domain([0, d3.max(data, function(d) { return d.Value; })]);
// Add the bars
svg.selectAll('bar')
.data(data)
.enter().append('rect')
.attr('class', 'bar')
.attr('x', function(d) { return x(d.Category); })
.attr('width', x.rangeBand())
.attr('y', function(d) { return y(d.Value); })
.attr('height', function(d) { return height - y(d.Value); });
// Add the x-axis
svg.append('g')
.attr('class', 'x axis')
.attr('transform', 'translate(0,' + height + ')')
.call(xAxis);
// Add the y-axis
svg.append('g')
.attr('class', 'y axis')
.call(yAxis);
</script> |
package serenitylabs.tutorials.trains.search;
import java.time.LocalDate;
public enum DepartureDay {
today(0), tomorrow(1);
private int daysFromToday;
DepartureDay(int daysFromToday) {
this.daysFromToday = daysFromToday;
}
public int daysFromToday() {
return daysFromToday;
}
public DepartureDay plus(int daysAfter) {
this.daysFromToday = this.daysFromToday + daysAfter;
return this;
}
public String getDepartureDay() {
return String.valueOf(
LocalDate.now().plusDays(daysFromToday).getDayOfMonth()
);
}
} |
package com.bjdvt.platform.mapper;
import com.bjdvt.platform.model.PageGroup;
import com.bjdvt.platform.model.PageGroupExample;
import java.util.List;
import org.apache.ibatis.annotations.Param;
public interface PageGroupMapper {
int countByExample(PageGroupExample example);
int deleteByExample(PageGroupExample example);
int deleteByPrimaryKey(String id);
int insert(PageGroup record);
int insertSelective(PageGroup record);
List<PageGroup> selectByExample(PageGroupExample example);
PageGroup selectByPrimaryKey(String id);
int updateByExampleSelective(@Param("record") PageGroup record, @Param("example") PageGroupExample example);
int updateByExample(@Param("record") PageGroup record, @Param("example") PageGroupExample example);
int updateByPrimaryKeySelective(PageGroup record);
int updateByPrimaryKey(PageGroup record);
} |
package collections
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestValidateNameSearchInput(t *testing.T) {
Convey("ValidateNameSearchInput returns nil error for valid values", t, func() {
So(ValidateNameSearchInput(""), ShouldBeNil)
So(ValidateNameSearchInput("collection123"), ShouldBeNil)
})
Convey("ValidateNameSearchInput returns an error for a value over 64 characters", t, func() {
tooLongInput := "1234567890123456789012345678901234567890123456789012345678901234567890"
So(ValidateNameSearchInput(tooLongInput), ShouldEqual, ErrNameSearchTooLong)
})
}
|
from collections import deque
# Create weight matrix
W = [[0, 3, 2, 5, 0],
[3, 0, 2, 1, 2],
[2, 2, 0, 5, 2],
[5, 1, 5, 0, 4],
[0, 2, 2, 4, 0],
]
# start point
start = (2, 3)
# end point
destination = (5, 1)
# Number of Rows and Columns in W
rows = len(W)
cols = len(W[0])
# Create reached matrix to keep track of visited points
reached = [[False for j in range(cols)] for i in range(rows)]
# Parent list to store the cell position from where
# we reached the current cell
parents = [[None for j in range(cols)] for i in range(rows)]
# Directions of movement of Machine/Bot/junk
# here L -> Left, R -> Right, U -> Up,
# and D -> Down
L = 0
R = 1
U = 2
D = 3
# Reachable Neighborhood for each current point
# Here N and S represent North and South
# and E and W represent East and West Direction
Neighbors = [[-1, 0], [1, 0], [0, 1], [0, -1]]
# Function to return the minimum cost path
# from source to destination
def shortest_path():
# Create a q to perform BFS
q = deque()
found = False
# Setting the source Node
x, y = start
q.append((x, y))
reached[x][y] = True
while len(q) > 0:
x, y = q.popleft()
if (x == destination[0]
and y == destination[1]):
found = True
break
# Let us check all neighbors of present cell
for i in range(len(Neighbors)):
x1 = x + Neighbors[i][0]
y1 = y + Neighbors[i][1]
if x1 >= 0 and x1 < rows and y1 >=0 and y1 < cols:
if reached[x1][y1] == False and W[x1][y1] <= 0:
q.append((x1, y1))
reached[x1][y1] = True
parents[x1][y1] = (x, y)
path = []
if found:
x, y = destination[0], destination[1]
path.append(destination)
while not (x == start[0] and y == start[1]):
x, y = parents[x][y]
path.append((x, y))
path = path[::-1]
return path
# Driver code
shortest_path = shortest_path()
print(shortest_path)
# Output: [(2, 3), (3, 3), (4, 2), (5, 1)] |
(function () {
'use strict';
ApplicationConfiguration.registerModule('applicants'); // jshint ignore:line
})();
|
#!/bin/sh
set -eu
SRC=$GOPATH/src/github.com/weaveworks/go-checkpoint
# Mount the checkpoint repo:
# -v $(pwd):/go/src/github.com/weaveworks/checkpoint
# If we run make directly, any files created on the bind mount
# will have awkward ownership. So we switch to a user with the
# same user and group IDs as source directory. We have to set a
# few things up so that sudo works without complaining later on.
uid=$(stat --format="%u" $SRC)
gid=$(stat --format="%g" $SRC)
echo "weave:x:$uid:$gid::$SRC:/bin/sh" >>/etc/passwd
echo "weave:*:::::::" >>/etc/shadow
echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
chmod o+rw $GOPATH/src
chmod o+rw $GOPATH/src/github.com
su weave -c "PATH=$PATH make -C $SRC BUILD_IN_CONTAINER=false $*"
|
A possible algorithm for searching for a pattern in a large text is to use the Boyer-Moore string search algorithm. This algorithm pre-processes the pattern and uses skip tables to efficiently search the text for a matching pattern. The algorithm uses the fact that when a mismatch occurs, the algorithm can skip positions in the text and has the potential to eliminate matching of multiple positions. |
import React, { Component } from 'react'
import {InstantSearch,SearchBox,Hits} from 'react-instantsearch-dom';
import algoliasearch from 'algoliasearch';
const searchClient = algoliasearch(
'RWLA5PBM7X',
'fdd07da28a21136346512bb234e09fd9'
);
const hit = (props) => {
const {hit} = props;
console.log(props);
return <h1>{hit.name}</h1>;
}
class Algolia extends Component {
render() {
return (
<InstantSearch
indexName="packages"
searchClient={searchClient}
>
<header>
<SearchBox translations={{placeholder:'Search For Products'}} />
</header>
<Hits hitComponent={hit}/>
</InstantSearch>
)
}
}
export default Algolia; |
<filename>ods-main/src/main/java/cn/stylefeng/guns/onlineaccess/modular/entity/ProjectUser.java
package cn.stylefeng.guns.onlineaccess.modular.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import lombok.Generated;
@TableName("project_user")
public class ProjectUser {
/**
* 主键 id
*/
@TableId(type = IdType.ASSIGN_ID)
@Generated
private Long id;
/**
* 用户id userId
*/
private Long userId;
/**
* 项目id projectId
*/
private Long projectId;
/**
* 类型 type
*/
private int type;
/**
* 状态 status
*/
private int status;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getUserId() {
return userId;
}
public void setUserId(Long userId) {
this.userId = userId;
}
public Long getProjectId() {
return projectId;
}
public void setProjectId(Long projectId) {
this.projectId = projectId;
}
public int getType() {
return type;
}
public void setType(int type) {
this.type = type;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
}
|
#!/bin/bash
# $1 = test_name
# $2 = filename pattern for tested source files
function run_test {
set -e
if [[ ! -f test-coverage/coverage_$1_full ]]; then
cd test
B2_ARGS='sanitize=off asserts=off invariant-checks=off link=static deprecated-functions=off debug-iterators=off test-coverage=on picker-debugging=off'
bjam $B2_ARGS $1 testing.execute=off
EXE_PATH=$(ls -d bin/$1.test/*/debug/debug-iterators-off/deprecated-functions-off/export-extra-on/link-static/test-coverage-on/threading-multi)
# force running the test
rm -f $EXE_PATH/$1.output
rm -f $EXE_PATH/$1.run
rm -f $EXE_PATH/$1.test
cd ..
# expand the pattern to find the path to the object files
OBJECT_PATH=$(ls -d bin/*/debug/debug-iterators-off/deprecated-functions-off/export-extra-on/link-static/test-coverage-on/threading-multi)
# clear counters from last run
rm -f $OBJECT_PATH/src/*.gcda
rm -f $OBJECT_PATH/e25519/src/*.gcda
rm -f test/$EXE_PATH/*.gcda
cd test
# now run the test
bjam $B2_ARGS $1 -l250
cd ..
lcov --base-directory test -d test/$EXE_PATH -d $OBJECT_PATH/src -d $OBJECT_PATH/ed25519/src -c -o test-coverage/coverage_$1_full --exclude "/usr/*" --exclude "/Applications/Xcode.app/*" --exclude "*/boost/*"
fi
lcov --extract test-coverage/coverage_$1_full "$2" -o test-coverage/coverage_$1
if [ ! -f test-coverage/coverage_all ]; then
cp test-coverage/coverage_$1 test-coverage/coverage_all
else
lcov --add-tracefile test-coverage/coverage_$1 --add-tracefile test-coverage/coverage_all -o test-coverage/coverage_all
fi
if [[ $# > 2 ]]; then
lcov --extract test-coverage/coverage_$1_full "$3" -o test-coverage/coverage_$1
lcov --add-tracefile test-coverage/coverage_$1 --add-tracefile test-coverage/coverage_all -o test-coverage/coverage_all
fi
if [[ $# > 3 ]]; then
lcov --extract test-coverage/coverage_$1_full "$4" -o test-coverage/coverage_$1
lcov --add-tracefile test-coverage/coverage_$1 --add-tracefile test-coverage/coverage_all -o test-coverage/coverage_all
fi
set +e
}
mkdir -p test-coverage
rm -f test-coverage/coverage_all
run_test test_create_torrent "*/create_torrent.*"
run_test test_bandwidth_limiter "*/bandwidth_*.*"
run_test test_alloca "*/alloca.hpp"
run_test test_generate_peer_id "*/generate_peer_id.*"
run_test test_file_progress "*/file_progress.*"
run_test test_stack_allocator "*/stack_allocator.*"
run_test test_linked_list "*/linked_list.*"
run_test test_enum_net "*/enum_net.*"
run_test test_stat_cache "*/stat_cache.*"
run_test test_dos_blocker "*/dos_blocker.*"
run_test test_fence "*/disk_job_fence.*"
run_test test_settings_pack "*/settings_pack.*"
run_test test_timestamp_history "*/timestamp_history.*"
run_test test_merkle "*/merkle.*"
run_test test_resolve_links "*/resolve_links.*"
run_test test_heterogeneous_queue "*/heterogeneous_queue.*"
run_test test_socket_io "*/socket_io.*"
run_test test_peer_priority "*/torrent_peer.*"
run_test test_tailqueue "*/tailqueue.*"
run_test test_bencoding "*/entry.*" "*/bencode.*" "*/bdecode.*"
run_test test_bdecode "*/bdecode.*"
run_test test_io "*/io.hpp"
run_test test_block_cache "*/block_cache.*"
run_test test_peer_classes "*/peer_class*.*"
run_test test_bloom_filter "*/bloom_filter.*"
run_test test_sha1_hash "*/sha1_hash.*"
run_test test_identify_client "*/identify_client.*"
run_test test_packet_buffer "*/packet_buffer.*"
run_test test_ip_voter "*/ip_voter.*"
run_test test_bitfield "*/bitfield.*"
run_test test_alert_manager "*/alert_manager.*"
run_test test_alert_types "*/alert_types.*"
run_test test_dht "*/kademlia/*"
run_test test_piece_picker "*/piece_picker.*"
run_test test_torrent_info "*/torrent_info.*"
run_test test_part_file "*/part_file.*"
run_test test_http_parser "*/http_parser.*"
run_test test_ip_filter "*/ip_filter.*"
run_test test_utp "*/utp_stream.*"
run_test test_peer_list "*/peer_list.*"
run_test test_gzip "*/gzip.cpp"
run_test test_file_storage "*/file_storage.*"
run_test test_storage "*/storage.*"
run_test test_xml "*/xml_parse.*"
run_test test_sliding_average "*/sliding_average.*"
run_test test_string "*/escape_string.*" "*/string_util.*"
run_test test_utf8 "*/ConvertUTF.*"
run_test test_hasher "*/hasher.*"
run_test test_hasher512 "*/hasher512.*"
run_test test_span "*/span.hpp"
run_test test_crc32 "*/crc32c.*"
run_test test_ffs "*/ffs.cpp"
run_test test_ed25519 "*/ed25519/src/*"
run_test test_receive_buffer "*/receive_buffer.*"
run_test test_magnet "*/magnet_uri.*"
run_test test_session "*/session_impl.*" "*/session.*"
run_test test_remove_torrent "*/session_impl.*"
run_test test_read_piece "*/torrent.*"
run_test test_session_params "*/session.*"
run_test test_buffer "*/buffer.*"
run_test test_file "*/file.*"
run_test test_read_resume "*/read_resume_data.*" "*/write_resume_data.*"
run_test test_resume "*/torrent.*"
run_test test_checking "*/torrent.*"
run_test test_pe_crypto "*/pe_crypto.*"
run_test test_remap_files "*/file_storage.*" "*/torrent.*"
run_test test_time_critical "*/torrent.*" "*/peer_connection.*" "*/bt_peer_connection.*"
run_test test_pex "*/ut_pex.*"
run_test test_checking "*/torrent.*" "*/disk_io_thread.*"
run_test test_url_seed "*/web_peer_connection.*"
run_test test_web_seed "*/web_peer_connection.*"
run_test test_web_seed_redirect "*/web_peer_connection.*"
run_test test_web_seed_socks4 "*/web_peer_connection.*"
run_test test_web_seed_socks5 "*/web_peer_connection.*"
run_test test_web_seed_socks5_pw "*/web_peer_connection.*"
run_test test_web_seed_http "*/web_peer_connection.*"
run_test test_web_seed_http_pw "*/web_peer_connection.*"
run_test test_web_seed_chunked "*/web_peer_connection.*"
run_test test_web_seed_ban "*/web_peer_connection.*"
run_test test_torrent "*/torrent.*"
run_test test_auto_unchoke "*/session_impl.*"
genhtml -o test-coverage/ -t libtorrent-unit-tests --num-spaces=4 test-coverage/coverage_all
|
SELECT Title, Rating
FROM Books
ORDER BY Rating DESC
LIMIT 5; |
#!/usr/bin/env bash
export PGCERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIVENDQWdXZ0F3SUJBZ0lVTDVnNnQxSG1OT3Q4T09xVVd5dVFaZVdzclVBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RTVUpOSUVOc2IzVmtJRVJoZEdGaVlYTmxjekFlRncweE9ERXhNakV4TVRRMwpNamRhRncweU9ERXhNVGd4TVRRM01qZGFNQjR4SERBYUJnTlZCQU1NRTBsQ1RTQkRiRzkxWkNCRVlYUmhZbUZ6ClpYTXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDM2h4Zm9zWWVYZG1yUVJIbFUKdk1nOTFJV1NDdUdaMTZSYkhiWjJwNjJTRVVmQkplbXI0TWtUcjQ4WVpUZmlUQ1pqY2tXV0dsQmx2VVlyMGNZWQpLTTNPUHRraGszbG1rQVFsSmNNQ1BmaUYyK21CeEFVcXJJaWJDZ21RaWp5bFJKVzRzRUZRZ1Niclhld0R5d0VkCjhKVWdtKzgyRHdMWXk2dm5rS0pqemNVWms1T0tTMVV2N3cwcEhVdFVNdE5MYVN5S2tzRGRFOWQ5c2o2bFdURkoKVzlDeVhDcDVpZmZKOTdvZXdJaDQ3bklGWVU1RDVNcHRDRFlyNndpZk9XdmhpZHVZRDZXTVI3d0RnTE12L3JhOQpadDRudDdNWWdsUm1Lbk83N0RaSTVZOWxUNGd3NEZpVTBrNjVhV25YcmsrMURaNGpxRmRXazNWcG9YaEEwSXVYClpNYlBBZ01CQUFHalV6QlJNQjBHQTFVZERnUVdCQlFZV1RYNGRzVUwzL0xuUUFYUUxMemg0NnlEQVRBZkJnTlYKSFNNRUdEQVdnQlFZV1RYNGRzVUwzL0xuUUFYUUxMemg0NnlEQVRBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUEwRwpDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVB3WXJPSVFNRXJlTnVIWS82OGxGSmltQjZHQ2luR3NKdzhZSUJCUXdBCnRkSk1PWFU4YlhHckVIRXVaNlFhaG83UTVkVjJiQjZHVktUTXFoU0VkcnZ1eXhLbmlROTBqZGtRU2syVkhoRGUKSCs2aTA0aEE5VGtLVDZvb0x3TVBjMUxZWXpxRGxqRWtmS2xMSVBXQ2tPQW96RDNjeWMyNnBWLzM1bkc3V3pBRgp4dzdTM2pBeUIzV2NKRGxXbFNXR1RuNTh3M0VIeHpWWHZLVDZZOWVBZEtwNFNqVUh5VkZzTDV4dFN5akg4enBGCnBaS0s4d1dOVXdnV1E2Nk1OaDhDa3E3MzJKWitzbzZSQWZiNEJiTmo0NUkzczlmdVpTWWx2amtjNS8rZGEzQ2sKUnA2YW5YNU42eUlyemhWbUFnZWZqUWRCenRZemRmUGhzSkJrUy9URG5SbWsKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
export PGURI=postgres://ibm_cloud_75ad7565_16da_48a1_a0c2_ea2a7fcc0123:c3df27bdcfc0fd8dc8f805e4212d517613a6d2be7340c453f69bdf8dc3714152@cfe2da9e-5483-49e7-8827-a3d2101ef8be.brjdfmfw09op3teml03g.databases.appdomain.cloud:30402/ibmclouddb?sslmode=verify-full
export NODE_TLS_REJECT_UNAUTHORIZED='0'
nodemon server/server.js --inspect=0.0.0.0:9229 |
require 'hyrax/preservation/service_environment'
namespace :services do
task :start, [:env] do |t, args|
env = args[:env] || 'development'
Hyrax::Preservation::ServiceEnvironment.new(env).start
end
end
|
require File.dirname(__FILE__) + '/../spec_helper'
def chfsize(cfg = {})
Eye::Checker.create(nil, {:type => :fsize, :every => 5.seconds,
:file => $logger_path, :times => 1}.merge(cfg))
end
describe "Eye::Checker::FileSize" do
describe "" do
subject{ chfsize }
it "get_value" do
subject.get_value.should be_within(10).of(File.size($logger_path))
end
it "not good if size equal prevous" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1001}
subject.check.should == false
end
it "good when little different with previous" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1002}
subject.check.should == true
end
end
describe "below" do
subject{ chfsize(:below => 10) }
it "good" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1005}
subject.check.should == true
end
it "bad" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1015}
subject.check.should == false
end
end
describe "above" do
subject{ chfsize(:above => 10) }
it "good" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1005}
subject.check.should == false
end
it "bad" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1015}
subject.check.should == true
end
end
describe "above and below" do
subject{ chfsize(:above => 10, :below => 30) }
it "bad" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1005}
subject.check.should == false
end
it "good" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1021}
subject.check.should == true
end
it "bad" do
stub(subject).get_value{1001}
subject.check.should == true
stub(subject).get_value{1045}
subject.check.should == false
end
end
end |
<reponame>Divlo/programming-challenges
import readline from 'node:readline'
const numbers = []
const readlineInterface = readline.createInterface({
input: process.stdin,
output: process.stdout
})
readlineInterface.on('line', (value) => {
numbers.push(Number(value))
})
readlineInterface.on('close', solution)
function solution() {
const sortedNumbers = bubbleSort(numbers.slice(1))
sortedNumbers.forEach((number) => {
console.log(number)
})
}
function bubbleSort (numbersInput) {
const numbers = [...numbersInput]
for (let index1 = 0; index1 < numbers.length; index1++) {
for (let index2 = 0; index2 < numbers.length - index1 - 1; index2++) {
if (numbers[index2] > numbers[index2 + 1]) {
const temporary = numbers[index2]
numbers[index2] = numbers[index2 + 1]
numbers[index2 + 1] = temporary
}
}
}
return numbers
}
|
#!/bin/bash
#generate the version info from git
scripts/version.sh
#generate the grpc protobuffer file
#(cd grpc && make clean && make)
#(cd pushd && make clean && make)
#run go test first
#go test ./...
#build the pushd & connd
if [ "$1" = "pushd" ] || [ $# = 0 ]; then
echo building pushd/pushd
(cd push/bin/pushd && go build .)
fi
if [ "$1" = "connd" ] || [ $# = 0 ]; then
echo building connd/connd
(cd conn/bin/connd && go build .)
fi
|
<reponame>effie-ms/eeflows
import React from 'react';
import PropTypes from 'prop-types';
export const SVGElementTimeSeriesType = ({ stroke, fill }) => (
<svg
className="recharts-surface"
width={20}
height={20}
viewBox="0 0 40 40"
version="1.1"
style={{
display: 'inline-block',
verticalAlign: 'middle',
marginRight: '4px',
}}
>
<path
strokeWidth={4}
fill={fill}
stroke={stroke}
d="M0,16h10.666666666666666
A5.333333333333333,5.333333333333333,0,1,1,21.333333333333332,16
H32M21.333333333333332,16
A5.333333333333333,5.333333333333333,0,1,1,10.666666666666666,16"
className="recharts-legend-icon"
/>
</svg>
);
SVGElementTimeSeriesType.propTypes = {
stroke: PropTypes.string.isRequired,
fill: PropTypes.string.isRequired,
};
export const SVGElementThresholdType = ({ stroke }) => (
<svg
className="recharts-surface"
width={40}
height={20}
viewBox="0 0 20 20"
version="1.1"
style={{
display: 'inline-block',
verticalAlign: 'middle',
marginRight: '4px',
}}
>
<line
x1="0"
y1="10"
x2="40"
y2="10"
style={{
fill: 'none',
strokeWidth: '3px',
stroke,
strokeDasharray: '10 5',
}}
/>
</svg>
);
SVGElementThresholdType.propTypes = {
stroke: PropTypes.string.isRequired,
};
export const SVGElementLine = ({ stroke }) => (
<svg
className="recharts-surface"
width={40}
height={20}
viewBox="0 0 20 20"
version="1.1"
style={{
display: 'inline-block',
verticalAlign: 'middle',
marginRight: '4px',
}}
>
<line
x1="0"
y1="10"
x2="40"
y2="10"
style={{ fill: 'none', strokeWidth: '3px', stroke }}
/>
</svg>
);
SVGElementLine.propTypes = {
stroke: PropTypes.string.isRequired,
};
export const SVGElementSquare = ({ fill }) => (
<svg
className="recharts-surface"
width={10}
height={10}
viewBox="0 0 32 32"
version="1.1"
style={{
display: 'inline-block',
verticalAlign: 'middle',
marginRight: '4px',
}}
>
<path
fill={fill}
className="recharts-symbols"
transform="translate(16, 16)"
d="M-16,-16h32v32h-32Z"
/>
</svg>
);
SVGElementSquare.propTypes = {
fill: PropTypes.string.isRequired,
};
export const SVGElementCircle = ({ fill }) => (
<svg
className="recharts-surface"
width={10}
height={10}
viewBox="0 0 32 32"
version="1.1"
style={{
display: 'inline-block',
verticalAlign: 'middle',
marginRight: '4px',
}}
>
<path
fill={fill}
className="recharts-symbols"
transform="translate(16, 16)"
d="M16,0A16,16,0,1,1,-16,0A16,16,0,1,1,16,0"
/>
</svg>
);
SVGElementCircle.propTypes = {
fill: PropTypes.string.isRequired,
};
|
package com.example.batchforscience.aspect;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
@Aspect
public class PointCutDeclarations {
@Pointcut("execution(* com.example.batchforscience.listener.JobCompletionListener.beforeJob(*))")
public void beforeJob() {
}
@Pointcut("execution(* com.example.batchforscience.listener.JobCompletionListener.afterJob(*))")
public void afterJob() {
}
}
|
module.exports = {
name: "ctopic",
description: "Update the channel topic",
category: "admin",
botPermissions: ["MANAGE_CHANNELS"],
memberPermissions: ["MANAGE_CHANNELS"],
async execute(bot, message, args) {
const lang = await bot.getGuildLang(message.guild.id);
let channel = message.mentions.channels.first();
let topic;
if (!channel) {
channel = message.channel;
topic = args.join(" ");
} else {
topic = args.slice(1).join(" ").trim();
}
if (!topic) return message.reply(lang.ADMIN.C_TOPIC_PROVIDE_TOPIC);
await channel.setTopic(topic);
await message.channel.send(lang.ADMIN.C_TOPIC_ADDED.replace("{topic}", topic));
},
};
|
<gh_stars>0
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var AffectationsPartiellesDirecteur_component_1 = require("./AffectationsPartiellesDirecteur/AffectationsPartiellesDirecteur.component");
var List_PersonnelsDirecteur_component_1 = require("./List-PersonnelsDirecteur/List-PersonnelsDirecteur.component");
var affectations_totales_component_1 = require("./affectations-totales/affectations-totales.component");
exports.DIRECTEURLayoutRoutes = [
{ path: "List_Personnels", component: List_PersonnelsDirecteur_component_1.ListPersonnelsComponent },
{
path: "Affectations_partielles",
component: AffectationsPartiellesDirecteur_component_1.AffectationsPartiellesDirecteurComponent
},
{
path: "Affectations_Totales",
component: affectations_totales_component_1.AffectationsTotalesComponent
}
];
//# sourceMappingURL=directeur-layout.routing.js.map |
<filename>common/types_test.go
// Copyright 2020 Condensat Tech. All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package common
import (
"testing"
)
func TestIssuanceInfo_IsValid_Mode(t *testing.T) {
t.Parallel()
type fields struct {
Mode AssetIssuanceMode
AssetPublicAddress string
AssetIssuedAmount float64
TokenPublicAddress string
TokenIssuedAmount float64
ContractHash string
}
tests := []struct {
name string
fields fields
want bool
}{
{"default", fields{}, false},
{"invalid", fields{Mode: "foobar"}, false},
{"issueAsset", fields{Mode: AssetIssuanceModeWithAsset, AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1}, true},
{"issueAssetWithToken", fields{Mode: AssetIssuanceModeWithToken, AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1}, true},
{"issueAssetWithContract", fields{Mode: AssetIssuanceModeWithContract, AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, ContractHash: "contract"}, true},
{"issueAssetWithTokenWithContract", fields{Mode: AssetIssuanceModeWithTokenWithContract, AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1, ContractHash: "contract"}, true},
}
for _, tt := range tests {
tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
p := &IssuanceRequest{
Mode: tt.fields.Mode,
AssetPublicAddress: tt.fields.AssetPublicAddress,
AssetIssuedAmount: tt.fields.AssetIssuedAmount,
TokenPublicAddress: tt.fields.TokenPublicAddress,
TokenIssuedAmount: tt.fields.TokenIssuedAmount,
ContractHash: tt.fields.ContractHash,
}
if got := p.IsValid(); got != tt.want {
t.Errorf("IssuanceInfo.IsValid() = %v, want %v", got, tt.want)
}
})
}
}
func TestIssuanceInfo_IsValid_WithAsset(t *testing.T) {
t.Parallel()
type fields struct {
AssetPublicAddress string
AssetIssuedAmount float64
TokenPublicAddress string
TokenIssuedAmount float64
ContractHash string
}
tests := []struct {
name string
fields fields
want bool
}{
{"default", fields{}, false},
{"invalidAddress", fields{AssetIssuedAmount: 42.1}, false},
{"invalidAmount", fields{AssetPublicAddress: "foobar"}, false},
{"invalidContract", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, ContractHash: "invalid"}, false},
{"valid", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1}, true},
}
for _, tt := range tests {
tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
p := &IssuanceRequest{
Mode: AssetIssuanceModeWithAsset,
AssetPublicAddress: tt.fields.AssetPublicAddress,
AssetIssuedAmount: tt.fields.AssetIssuedAmount,
ContractHash: tt.fields.ContractHash,
}
if got := p.IsValid(); got != tt.want {
t.Errorf("IssuanceInfo.IsValid() = %v, want %v", got, tt.want)
}
})
}
}
func TestIssuanceInfo_IsValid_WithToken(t *testing.T) {
t.Parallel()
type fields struct {
Mode AssetIssuanceMode
AssetPublicAddress string
AssetIssuedAmount float64
TokenPublicAddress string
TokenIssuedAmount float64
ContractHash string
}
tests := []struct {
name string
fields fields
want bool
}{
{"default", fields{}, false},
{"invalidAssetAddress", fields{AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1}, false},
{"invalidAssetAmount", fields{AssetPublicAddress: "foobar", TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1}, false},
{"invalidTokenAddress", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenIssuedAmount: 42.1}, false},
{"invalidTokenAmount", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar"}, false},
{"invalidContractHash", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"valid", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &IssuanceRequest{
Mode: AssetIssuanceModeWithToken,
AssetPublicAddress: tt.fields.AssetPublicAddress,
AssetIssuedAmount: tt.fields.AssetIssuedAmount,
TokenPublicAddress: tt.fields.TokenPublicAddress,
TokenIssuedAmount: tt.fields.TokenIssuedAmount,
ContractHash: tt.fields.ContractHash,
}
if got := p.IsValid(); got != tt.want {
t.Errorf("IssuanceInfo.IsValid() = %v, want %v", got, tt.want)
}
})
}
}
func TestIssuanceInfo_IsValid_WithContract(t *testing.T) {
type fields struct {
Mode AssetIssuanceMode
AssetPublicAddress string
AssetIssuedAmount float64
TokenPublicAddress string
TokenIssuedAmount float64
ContractHash string
}
tests := []struct {
name string
fields fields
want bool
}{
{"default", fields{}, false},
{"invalidAssetAddress", fields{AssetIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"invalidAssetAmount", fields{AssetPublicAddress: "foobar", ContractHash: "contract"}, false},
{"invalidContractHash", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1}, false},
{"invalidTokenAddress", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"invalidTokenAmount", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", ContractHash: "contract"}, false},
{"valid", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, ContractHash: "contract"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &IssuanceRequest{
Mode: AssetIssuanceModeWithContract,
AssetPublicAddress: tt.fields.AssetPublicAddress,
AssetIssuedAmount: tt.fields.AssetIssuedAmount,
TokenPublicAddress: tt.fields.TokenPublicAddress,
TokenIssuedAmount: tt.fields.TokenIssuedAmount,
ContractHash: tt.fields.ContractHash,
}
if got := p.IsValid(); got != tt.want {
t.Errorf("IssuanceInfo.IsValid() = %v, want %v", got, tt.want)
}
})
}
}
func TestIssuanceInfo_IsValid_WithTokenWithContract(t *testing.T) {
type fields struct {
Mode AssetIssuanceMode
AssetPublicAddress string
AssetIssuedAmount float64
TokenPublicAddress string
TokenIssuedAmount float64
ContractHash string
}
tests := []struct {
name string
fields fields
want bool
}{
{"default", fields{}, false},
{"invalidAssetAddress", fields{AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"invalidAssetAmount", fields{AssetPublicAddress: "foobar", TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"invalidTokenAddress", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenIssuedAmount: 42.1, ContractHash: "contract"}, false},
{"invalidTokenAmount", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", ContractHash: "contract"}, false},
{"invalidContractHash", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1}, false},
{"valid", fields{AssetPublicAddress: "foobar", AssetIssuedAmount: 42.1, TokenPublicAddress: "foobar", TokenIssuedAmount: 42.1, ContractHash: "contract"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &IssuanceRequest{
Mode: AssetIssuanceModeWithTokenWithContract,
AssetPublicAddress: tt.fields.AssetPublicAddress,
AssetIssuedAmount: tt.fields.AssetIssuedAmount,
TokenPublicAddress: tt.fields.TokenPublicAddress,
TokenIssuedAmount: tt.fields.TokenIssuedAmount,
ContractHash: tt.fields.ContractHash,
}
if got := p.IsValid(); got != tt.want {
t.Errorf("IssuanceInfo.IsValid() = %v, want %v", got, tt.want)
}
})
}
}
|
<reponame>Prajwal-ctrl/30DaysOfJavaScript
function talk(){
var know = {
"Who are you" : "Hello, I am special bot for 30 Days of JavaScript",
"How are you" : "Good :)",
"What can I do for you" : "Please star this repository",
"How do I contribute" : "Please read Readme",
"ok" : "Thank You So Much ",
"Bye" : "Okay! Will meet soon.."
};
var user = document.getElementById('userBox').value;
document.getElementById('chatLog').innerHTML = user + "<br>";
if (user in know) {
document.getElementById('chatLog').innerHTML = know[user] + "<br>";
}else{
document.getElementById('chatLog').innerHTML = "Sorry,I didn't understand <br>";
}
}
|
import ctypes as ct
def simulate_bulk_transfer(handle, endpoint_in, buffer, READ_CAPACITY_LENGTH):
def err_exit(error_code):
# Placeholder for error handling logic
return f"Error: {error_code}"
size = ct.c_int()
r = usb.bulk_transfer(handle, endpoint_in, ct.cast(ct.pointer(buffer), ct.POINTER(ct.c_ubyte)), READ_CAPACITY_LENGTH, ct.byref(size), 1000)
if r < 0:
return err_exit(r)
size = size.value
print(f" received {size} bytes")
# Example usage
handle = 123 # Replace with actual USB device handle
endpoint_in = 1 # Replace with actual input endpoint
data_buffer = bytearray(b'\x00' * READ_CAPACITY_LENGTH) # Replace with actual data
READ_CAPACITY_LENGTH = 64 # Replace with actual data length
simulate_bulk_transfer(handle, endpoint_in, data_buffer, READ_CAPACITY_LENGTH) |
<filename>models/tasks.py
# -*- coding: utf-8 -*-
# =============================================================================
# Tasks to be callable async
# =============================================================================
tasks = {}
# -----------------------------------------------------------------------------
def maintenance(period="daily"):
"""
Run all maintenance tasks which should be done daily
- these are read from the template
"""
mod = "applications.%s.private.templates.%s.maintenance as maintenance" % \
(appname, settings.get_template())
try:
exec("import %s" % mod)
except ImportError, e:
# No Custom Maintenance available, use the default
exec("import applications.%s.private.templates.default.maintenance as maintenance" % appname)
if period == "daily":
result = maintenance.Daily()()
else:
result = "NotImplementedError"
db.commit()
return result
tasks["maintenance"] = maintenance
# -----------------------------------------------------------------------------
def crop_image(path, x1, y1, x2, y2, width):
"""
Crop Image - used by S3ImageCropWidget through IS_PROCESSED_IMAGE
"""
from PIL import Image
image = Image.open(path)
scale_factor = image.size[0] / float(width)
points = map(int, map(lambda a: a * scale_factor, (x1, y1, x2, y2)))
image.crop(points).save(path)
tasks["crop_image"] = crop_image
# -----------------------------------------------------------------------------
if settings.has_module("doc"):
# -----------------------------------------------------------------------------
def document_create_index(document, user_id=None):
import os
from xlrd import open_workbook
from pyth.plugins.rtf15.reader import Rtf15Reader
from pyth.plugins.plaintext.writer import PlaintextWriter
import sunburnt
document = json.loads(document)
table = s3db.doc_document
id = document["id"]
name = document["name"]
filename = document["filename"]
filename = "%s/%s/uploads/%s" % (os.path.abspath("applications"), \
request.application, filename)
si = sunburnt.SolrInterface(settings.get_base_solr_url())
extension = os.path.splitext(filename)[1][1:]
if extension == "pdf":
data = os.popen("pdf2txt.py " + filename).read()
elif extension == "doc":
data = os.popen("antiword " + filename).read()
elif extension == "xls":
wb = open_workbook(filename)
data=" "
for s in wb.sheets():
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(str(s.cell(row, col).value))
data = data + ",".join(values) + "\n"
elif extension == "rtf":
doct = Rtf15Reader.read(open(filename))
data = PlaintextWriter.write(doct).getvalue()
else:
data = os.popen("strings " + filename).read()
# The text needs to be in unicode or ascii, with no contol characters
data = str(unicode(data, errors="ignore"))
data = "".join(c if ord(c) >= 32 else " " for c in data)
# Put the data according to the Multiple Fields
# @ToDo: Also, would change this according to requirement of Eden
document = {"id": str(id), # doc_document.id
"name": data, # the data of the file
"url": filename, # the encoded file name stored in uploads/
"filename": name, # the filename actually uploaded by the user
"filetype": extension # x.pdf -> pdf is the extension of the file
}
# Add and commit Indices
si.add(document)
si.commit()
# After Indexing, set the value for has_been_indexed to True in the database
db(table.id == id).update(has_been_indexed = True)
db.commit()
tasks["document_create_index"] = document_create_index
# -----------------------------------------------------------------------------
def document_delete_index(document, user_id=None):
import sunburnt
document = json.loads(document)
table = s3db.doc_document
id = document["id"]
filename = document["filename"]
si = sunburnt.SolrInterface(settings.get_base_solr_url())
# Delete and Commit the indicies of the deleted document
si.delete(id)
si.commit()
# After removing the index, set has_been_indexed value to False in the database
db(table.id == id).update(has_been_indexed = False)
db.commit()
tasks["document_delete_index"] = document_delete_index
# -----------------------------------------------------------------------------
def gis_download_kml(record_id, filename, session_id_name, session_id,
user_id=None):
"""
Download a KML file
- will normally be done Asynchronously if there is a worker alive
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param session_id_name: name of the session
@param session_id: id of the session
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = gis.download_kml(record_id, filename, session_id_name, session_id)
db.commit()
return result
tasks["gis_download_kml"] = gis_download_kml
# -----------------------------------------------------------------------------
def gis_update_location_tree(feature, user_id=None):
"""
Update the Location Tree for a feature
- will normally be done Asynchronously if there is a worker alive
@param feature: the feature (in JSON format)
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
feature = json.loads(feature)
path = gis.update_location_tree(feature)
db.commit()
return path
tasks["gis_update_location_tree"] = gis_update_location_tree
# -----------------------------------------------------------------------------
def org_facility_geojson(user_id=None):
"""
Export GeoJSON[P] Of Facility data
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
s3db.org_facility_geojson()
tasks["org_facility_geojson"] = org_facility_geojson
# -----------------------------------------------------------------------------
if settings.has_module("msg"):
# -------------------------------------------------------------------------
def msg_process_outbox(contact_method, user_id=None):
"""
Process Outbox
- will normally be done Asynchronously if there is a worker alive
@param contact_method: one from s3msg.MSG_CONTACT_OPTS
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.process_outbox(contact_method)
db.commit()
return result
tasks["msg_process_outbox"] = msg_process_outbox
# -------------------------------------------------------------------------
def msg_twitter_search(search_id, user_id=None):
"""
Perform a Search of Twitter
- will normally be done Asynchronously if there is a worker alive
@param search_id: one of s3db.msg_twitter_search.id
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.twitter_search(search_id)
db.commit()
return result
tasks["msg_twitter_search"] = msg_twitter_search
# -------------------------------------------------------------------------
def msg_process_keygraph(search_id, user_id=None):
"""
Process Twitter Search Results with KeyGraph
- will normally be done Asynchronously if there is a worker alive
@param search_id: one of s3db.msg_twitter_search.id
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.process_keygraph(search_id)
db.commit()
return result
tasks["msg_process_keygraph"] = msg_process_keygraph
# -------------------------------------------------------------------------
def msg_poll(tablename, channel_id, user_id=None):
"""
Poll an inbound channel
"""
if user_id:
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.poll(tablename, channel_id)
db.commit()
return result
tasks["msg_poll"] = msg_poll
# -----------------------------------------------------------------------------
def msg_parse(channel_id, function_name, user_id=None):
"""
Parse Messages coming in from a Source Channel
"""
if user_id:
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.parse(channel_id, function_name)
db.commit()
return result
tasks["msg_parse"] = msg_parse
# --------------------------------------------------------------------------
def msg_search_subscription_notifications(frequency, user_id=None):
"""
Search Subscriptions & send Notifications.
@ToDo: Deprecate
"""
if user_id:
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.msg_search_subscription_notifications(frequency=frequency)
db.commit()
return result
tasks["msg_search_subscription_notifications"] = msg_search_subscription_notifications
# -------------------------------------------------------------------------
def notify_check_subscriptions(user_id=None):
"""
Scheduled task to check subscriptions for updates,
creates notify_notify tasks where updates exist.
"""
if user_id:
auth.s3_impersonate(user_id)
notify = s3base.S3Notifications()
return notify.check_subscriptions()
tasks["notify_check_subscriptions"] = notify_check_subscriptions
# -------------------------------------------------------------------------
def notify_notify(resource_id, user_id=None):
"""
Asynchronous task to notify a subscriber about resource
updates. This task is created by notify_check_subscriptions.
@param subscription: JSON with the subscription data
@param now: lookup date (@todo: remove this)
"""
if user_id:
auth.s3_impersonate(user_id)
notify = s3base.S3Notifications
return notify.notify(resource_id)
tasks["notify_notify"] = notify_notify
# -----------------------------------------------------------------------------
if settings.has_module("req"):
def req_add_from_template(req_id, user_id=None):
"""
Add a Request from template
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.req_add_from_template(req_id)
db.commit()
return result
tasks["req_add_from_template"] = req_add_from_template
# -----------------------------------------------------------------------------
if settings.has_module("stats"):
def stats_demographic_update_aggregates(records=None, user_id=None):
"""
Update the stats_demographic_aggregate table for the given
stats_demographic_data record(s)
@param records: JSON of Rows of stats_demographic_data records to
update aggregates for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.stats_demographic_update_aggregates(records)
db.commit()
return result
tasks["stats_demographic_update_aggregates"] = stats_demographic_update_aggregates
def stats_demographic_update_location_aggregate(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
user_id=None):
"""
Update the stats_demographic_aggregate table for the given location and parameter
- called from within stats_demographic_update_aggregates
@param location_level: gis level at which the data needs to be accumulated
@param root_location_id: id of the location
@param parameter_id: parameter for which the stats are being updated
@param start_date: start date of the period in question
@param end_date: end date of the period in question
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.stats_demographic_update_location_aggregate(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
)
db.commit()
return result
tasks["stats_demographic_update_location_aggregate"] = stats_demographic_update_location_aggregate
if settings.has_module("vulnerability"):
def vulnerability_update_aggregates(records=None, user_id=None):
"""
Update the vulnerability_aggregate table for the given
vulnerability_data record(s)
@param records: JSON of Rows of vulnerability_data records to update aggregates for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.vulnerability_update_aggregates(records)
db.commit()
return result
tasks["vulnerability_update_aggregates"] = vulnerability_update_aggregates
def vulnerability_update_location_aggregate(#location_level,
root_location_id,
parameter_id,
start_date,
end_date,
user_id=None):
"""
Update the vulnerability_aggregate table for the given location and parameter
- called from within vulnerability_update_aggregates
@param location_level: gis level at which the data needs to be accumulated
@param root_location_id: id of the location
@param parameter_id: parameter for which the stats are being updated
@param start_date: start date of the period in question
@param end_date: end date of the period in question
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.vulnerability_update_location_aggregate(#location_level,
root_location_id,
parameter_id,
start_date,
end_date,
)
db.commit()
return result
tasks["vulnerability_update_location_aggregate"] = vulnerability_update_location_aggregate
# -----------------------------------------------------------------------------
if settings.has_module("sync"):
# -----------------------------------------------------------------------------
def sync_synchronize(repository_id, user_id=None, manual=False):
"""
Run all tasks for a repository, to be called from scheduler
"""
auth.s3_impersonate(user_id)
rtable = s3db.sync_repository
query = (rtable.deleted != True) & \
(rtable.id == repository_id)
repository = db(query).select(limitby=(0, 1)).first()
if repository:
sync = s3base.S3Sync()
status = sync.get_status()
if status.running:
message = "Synchronization already active - skipping run"
sync.log.write(repository_id=repository.id,
resource_name=None,
transmission=None,
mode=None,
action="check",
remote=False,
result=sync.log.ERROR,
message=message)
db.commit()
return sync.log.ERROR
sync.set_status(running=True, manual=manual)
try:
sync.synchronize(repository)
finally:
sync.set_status(running=False, manual=False)
db.commit()
return s3base.S3SyncLog.SUCCESS
tasks["sync_synchronize"] = sync_synchronize
# -----------------------------------------------------------------------------
# Instantiate Scheduler instance with the list of tasks
s3.tasks = tasks
s3task = s3base.S3Task()
current.s3task = s3task
# -----------------------------------------------------------------------------
# Reusable field for scheduler task links
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
s3.scheduler_task_id = scheduler_task_id
# END =========================================================================
|
const pubsub = require('pubsub-js');
const muteStatus = new Map();
function registerModule(brunchModule) {
muteStatus.set(brunchModule.id, false);
}
function deregisterModules() {
muteStatus.clear();
}
function isMuted(brunchModule) {
return muteStatus.get(brunchModule.id);
}
function publish(brunchModule, topic, data) {
if (!muteStatus.has(brunchModule.id)) {
throw new Error('fateBus.js#publish: Module ['+brunchModule.id+'] is not defined');
return;
}
if (!isMuted(brunchModule)) {
pubsub.publishSync(topic, data);
}
}
function subscribe(brunchModule, topic, callback) {
if (!muteStatus.has(brunchModule.id)) {
throw new Error('fateBus.js#subscribe: Module ['+brunchModule.id+'] is not defined');
return;
}
pubsub.subscribe(topic, function(msg, data) {
if (muteStatus.has(brunchModule.id)) {
if (!isMuted(brunchModule)) {
callback(msg, data);
}
}
});
}
function mute(moduleId) {
muteStatus.set(moduleId, true);
}
function muteAll() {
const moduleIds = Array.from(muteStatus.keys());
moduleIds.forEach(function(moduleId) {
mute(moduleId);
});
}
function unmute(moduleId) {
muteStatus.set(moduleId, false);
}
function unsubscribeFunctionFromAllTopics(fn) {
pubsub.unsubscribe(fn);
}
exports.registerModule = registerModule;
exports.deregisterModules = deregisterModules;
exports.publish = publish;
exports.subscribe = subscribe;
exports.mute = mute;
exports.muteAll = muteAll;
exports.unmute = unmute;
exports.unsubscribeFunctionFromAllTopics = unsubscribeFunctionFromAllTopics;
|
<filename>code/iaas/model/src/main/java/io/cattle/platform/core/constants/InstanceLinkConstants.java
package io.cattle.platform.core.constants;
public class InstanceLinkConstants {
public static final String FIELD_INSTANCE_ID = "instanceId";
public static final String FIELD_PORTS = "ports";
public static final String FIELD_INSTANCE_NAMES = "instanceNames";
public static final String DATA_LINK_WAIT_TIME = "linkWaitTime";
}
|
const Page = require("./page");
/**
* sub page containing specific selectors and methods for a specific page
*/
class Menu extends Page {
/**
* define selectors using getter methods
*/
get menu() {
return $(".global-menu")
}
get authButton(){
return $(".login")
}
/**
* a method to encapsule automation code to interact with the page
*/
async assertMenu(){
const menu = await this.menu
expect(menu).toExist()
const currentUrl = await browser.getUrl()
expect(currentUrl).toContain("menu", `Expected URL "${currentUrl}" to contain "menu"`)
}
async clickLogInButton(){
const button = await this.authButton
await expect(button).toExist()
await button.click()
}
open() {
return super.open("menu");
}
}
module.exports = new Menu(); |
<reponame>pnkfb9/gem5_priority
/*
* Copyright (c) 2010-2012 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2001-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: <NAME>
*/
#ifndef __ARCH_ARM_TLB_HH__
#define __ARCH_ARM_TLB_HH__
#include <map>
#include "arch/arm/isa_traits.hh"
#include "arch/arm/pagetable.hh"
#include "arch/arm/utility.hh"
#include "arch/arm/vtophys.hh"
#include "base/statistics.hh"
#include "mem/request.hh"
#include "params/ArmTLB.hh"
#include "sim/fault_fwd.hh"
#include "sim/tlb.hh"
class ThreadContext;
namespace ArmISA {
class TableWalker;
class TLB : public BaseTLB
{
public:
enum ArmFlags {
AlignmentMask = 0x1f,
AlignByte = 0x0,
AlignHalfWord = 0x1,
AlignWord = 0x3,
AlignDoubleWord = 0x7,
AlignQuadWord = 0xf,
AlignOctWord = 0x1f,
AllowUnaligned = 0x20,
// Priv code operating as if it wasn't
UserMode = 0x40,
// Because zero otherwise looks like a valid setting and may be used
// accidentally, this bit must be non-zero to show it was used on
// purpose.
MustBeOne = 0x80
};
protected:
TlbEntry *table; // the Page Table
int size; // TLB Size
uint32_t _attr; // Memory attributes for last accessed TLB entry
TableWalker *tableWalker;
/** Lookup an entry in the TLB
* @param vpn virtual address
* @param asn context id/address space id to use
* @param functional if the lookup should modify state
* @return pointer to TLB entrry if it exists
*/
TlbEntry *lookup(Addr vpn, uint8_t asn, bool functional = false);
// Access Stats
mutable Stats::Scalar instHits;
mutable Stats::Scalar instMisses;
mutable Stats::Scalar readHits;
mutable Stats::Scalar readMisses;
mutable Stats::Scalar writeHits;
mutable Stats::Scalar writeMisses;
mutable Stats::Scalar inserts;
mutable Stats::Scalar flushTlb;
mutable Stats::Scalar flushTlbMva;
mutable Stats::Scalar flushTlbMvaAsid;
mutable Stats::Scalar flushTlbAsid;
mutable Stats::Scalar flushedEntries;
mutable Stats::Scalar alignFaults;
mutable Stats::Scalar prefetchFaults;
mutable Stats::Scalar domainFaults;
mutable Stats::Scalar permsFaults;
Stats::Formula readAccesses;
Stats::Formula writeAccesses;
Stats::Formula instAccesses;
Stats::Formula hits;
Stats::Formula misses;
Stats::Formula accesses;
int rangeMRU; //On lookup, only move entries ahead when outside rangeMRU
bool bootUncacheability;
public:
typedef ArmTLBParams Params;
TLB(const Params *p);
virtual ~TLB();
int getsize() const { return size; }
void insert(Addr vaddr, TlbEntry &pte);
/** Reset the entire TLB */
void flushAll();
/** Remove any entries that match both a va and asn
* @param mva virtual address to flush
* @param asn contextid/asn to flush on match
*/
void flushMvaAsid(Addr mva, uint64_t asn);
/** Remove any entries that match the asn
* @param asn contextid/asn to flush on match
*/
void flushAsid(uint64_t asn);
/** Remove all entries that match the va regardless of asn
* @param mva address to flush from cache
*/
void flushMva(Addr mva);
Fault trickBoxCheck(RequestPtr req, Mode mode, uint8_t domain, bool sNp);
Fault walkTrickBoxCheck(Addr pa, Addr va, Addr sz, bool is_exec,
bool is_write, uint8_t domain, bool sNp);
void printTlb();
void allCpusCaching() { bootUncacheability = true; }
void demapPage(Addr vaddr, uint64_t asn)
{
flushMvaAsid(vaddr, asn);
}
static bool validVirtualAddress(Addr vaddr);
/**
* Do a functional lookup on the TLB (for debugging)
* and don't modify any internal state
* @param tc thread context to get the context id from
* @param vaddr virtual address to translate
* @param pa returned physical address
* @return if the translation was successful
*/
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr);
/**
* Do a functional lookup on the TLB (for checker cpu) that
* behaves like a normal lookup without modifying any page table state.
*/
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
/** Accessor functions for memory attributes for last accessed TLB entry
*/
void
setAttr(uint32_t attr)
{
_attr = attr;
}
uint32_t
getAttr() const
{
return _attr;
}
Fault translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay,
bool timing, bool functional = false);
Fault translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing);
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode);
Fault translateTiming(RequestPtr req, ThreadContext *tc,
Translation *translation, Mode mode);
void drainResume();
// Checkpointing
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string §ion);
void regStats();
/**
* Get the table walker master port. This is used for migrating
* port connections during a CPU takeOverFrom() call. For
* architectures that do not have a table walker, NULL is
* returned, hence the use of a pointer rather than a
* reference. For ARM this method will always return a valid port
* pointer.
*
* @return A pointer to the walker master port
*/
virtual BaseMasterPort* getMasterPort();
// Caching misc register values here.
// Writing to misc registers needs to invalidate them.
// translateFunctional/translateSe/translateFs checks if they are
// invalid and call updateMiscReg if necessary.
protected:
SCTLR sctlr;
bool isPriv;
CONTEXTIDR contextId;
PRRR prrr;
NMRR nmrr;
uint32_t dacr;
bool miscRegValid;
void updateMiscReg(ThreadContext *tc)
{
sctlr = tc->readMiscReg(MISCREG_SCTLR);
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
isPriv = cpsr.mode != MODE_USER;
contextId = tc->readMiscReg(MISCREG_CONTEXTIDR);
prrr = tc->readMiscReg(MISCREG_PRRR);
nmrr = tc->readMiscReg(MISCREG_NMRR);
dacr = tc->readMiscReg(MISCREG_DACR);
miscRegValid = true;
}
public:
const Params *
params() const
{
return dynamic_cast<const Params *>(_params);
}
inline void invalidateMiscReg() { miscRegValid = false; }
};
} // namespace ArmISA
#endif // __ARCH_ARM_TLB_HH__
|
<reponame>MccreeFei/jframe
/**
*
*/
package jframe.pushy;
/**
* @author dzh
* @date Aug 29, 2015 2:18:51 PM
* @since 1.0
*/
public interface Fields {
public static final String KEY_IOS_AUTH = "ios.auth";
public static final String KEY_IOS_PASSWORD = "<PASSWORD>";
public static final String KEY_HOST = "host";
public static final String KEY_HOST_PORT = "host.port";
public static final String KEY_FEEDBACK = "feedback";
public static final String KEY_FEEDBACK_PORT = "feedback.port";
public static final String KEY_PUSH_CONN_COUNT = "push.conn.count";
}
|
#!/bin/bash
# Filename:- gp_bash_functions.sh
# Status:- Released
# Author:- G L Coombe (Greenplum)
# Contact:- gcoombe@greenplum.com
# Release date:- March 2006
# Release stat:- Greenplum Internal
# Copyright (c) Metapa 2005. All Rights Reserved.
# Copyright (c) Greenplum 2005. All Rights Reserved
# Brief descn:- Common functions used by various scripts
#***************************************************************
# Location Functions
#******************************************************************************
#Check that SHELL is /bin/bash
if [ $SHELL != /bin/bash ] && [ `ls -al /bin/sh|grep -c bash` -ne 1 ];then
echo "[FATAL]:-Scripts must be run by a user account that has SHELL=/bin/bash"
if [ -f /bin/bash ];then
echo "[INFO]:-/bin/bash exists, please update user account shell"
else
echo "[WARN]:-/bin/bash does not exist, does bash need to be installed?"
fi
exit 2
fi
#CMDPATH is the list of locations to search for commands, in precedence order
declare -a CMDPATH
CMDPATH=(/usr/kerberos/bin /usr/sfw/bin /opt/sfw/bin /usr/local/bin /bin /usr/bin /sbin /usr/sbin /usr/ucb /sw/bin)
#GPPATH is the list of possible locations for the Greenplum Database binaries, in precedence order
declare -a GPPATH
GPPATH=( $GPHOME $MPPHOME $BIZHOME )
if [ ${#GPPATH[@]} -eq 0 ];then
echo "[FATAL]:-GPHOME environment variable is required to run GPDB but could not be found."
echo "Please set it by sourcing the greenplum_path.sh in your GPDB installation directory."
echo "Example: ''. /usr/local/gpdb/greenplum_path.sh''"
exit 2
fi
#GP_UNIQUE_COMMAND is used to identify the binary directory
GP_UNIQUE_COMMAND=gpstart
findCmdInPath() {
cmdtofind=$1
if [ $cmdtofind = 'awk' ] && [ `uname` = SunOS ]; then
if [ -f "/usr/xpg4/bin/awk" ]; then
CMD=/usr/xpg4/bin/awk
echo $CMD
return
else
echo $cmdtofind
return "Problem in gp_bash_functions, command '/usr/xpg4/bin/awk' not found. You will need to edit the script named gp_bash_functions.sh to properly locate the needed commands for your platform."
fi
fi
for pathel in ${CMDPATH[@]}
do
CMD=$pathel/$cmdtofind
if [ x"$CMD" != x"" ] && [ -f $CMD ]; then
echo $CMD
return
fi
done
echo $cmdtofind
return "Problem in gp_bash_functions, command '$cmdtofind' not found in COMMAND path. You will need to edit the script named gp_bash_functions.sh to properly locate the needed commands for your platform."
}
findMppPath() {
cmdtofind=$GP_UNIQUE_COMMAND
for pathel in ${GPPATH[@]}
do
CMD=`find $pathel -follow -name $cmdtofind | tail -1`
if [ x"$CMD" != x"" ] && [ -f $CMD ]; then
echo $CMD
return
fi
done
}
#******************************************************************************
# OS Command Variables
#******************************************************************************
AWK=`findCmdInPath awk`
BASENAME=`findCmdInPath basename`
CAT=`findCmdInPath cat`
CKSUM=`findCmdInPath cksum`
CUT=`findCmdInPath cut`
DATE=`findCmdInPath date`
DD=`findCmdInPath dd`
DIRNAME=`findCmdInPath dirname`
DF=`findCmdInPath df`
DU=`findCmdInPath du`
ECHO=`findCmdInPath echo`
FIND=`findCmdInPath find`
GREP=`findCmdInPath grep`
EGREP=`findCmdInPath egrep`
HEAD=`findCmdInPath head`
HOSTNAME=`findCmdInPath hostname`
IFCONFIG=`findCmdInPath ifconfig`
LESSCMD=`findCmdInPath less`
LOCALE=`findCmdInPath locale`
MV=`findCmdInPath mv`
MKDIR=`findCmdInPath mkdir`
NETSTAT=`findCmdInPath netstat`
PING=`findCmdInPath ping`
PS=`findCmdInPath ps`
PYTHON=${GPHOME}/ext/python/bin/python
RM=`findCmdInPath rm`
SCP=`findCmdInPath scp`
SED=`findCmdInPath sed`
SLEEP=`findCmdInPath sleep`
SORT=`findCmdInPath sort`
SSH=`findCmdInPath ssh`
TAIL=`findCmdInPath tail`
TEE=`findCmdInPath tee`
TOUCH=`findCmdInPath touch`
TR=`findCmdInPath tr`
WC=`findCmdInPath wc`
WHICH=`findCmdInPath which`
ZCAT=`findCmdInPath zcat`
#***************#******************************************************************************
# Script Specific Variables
#******************************************************************************
# By default set error logging level to verbose
VERBOSE=1
USER_NAME=`id|$AWK '{print $1}'|$CUT -d"(" -f2|$TR -d ')'`
PROG_NAME=`echo $0 | $TR -d '-'`
PROG_NAME=`$BASENAME $PROG_NAME`
PROG_PIDNAME=`echo $$ $PROG_NAME | awk '{printf "%06d %s\n", $1, $2}'`
CALL_HOST=`$HOSTNAME|$CUT -d. -f1`
#******************************************************************************
# Locate the postgres routines from the Greenplum release
#******************************************************************************
PSQLBIN=`findMppPath`
if [ x"$PSQLBIN" = x"" ];then
echo "Problem in gp_bash_functions, command '$GP_UNIQUE_COMMAND' not found in Greenplum path."
echo "Try setting GPHOME to the location of your Greenplum distribution."
exit 99
fi
PSQLBIN=`$DIRNAME $PSQLBIN`
SCRIPTDIR="`$DIRNAME $PSQLBIN`/bin"
#******************************************************************************
# Greenplum Scripts
#******************************************************************************
GPINITSYSTEM=$SCRIPTDIR/gpinitsystem
GPCONFIG=$SCRIPTDIR/gpconfig
GPCRONDUMP=$SCRIPTDIR/gpcrondump
GPINITSTANDBY=$SCRIPTDIR/gpinitstandby
GPRECOVERSEG=$SCRIPTDIR/gprecoverseg
GPSTART=$SCRIPTDIR/gpstart
GPSTATE=$SCRIPTDIR/gpstate
GPSTOP=$SCRIPTDIR/gpstop
GPDOCDIR=${GPHOME}/docs/cli_help/
#******************************************************************************
# Greenplum Command Variables
#******************************************************************************
INITDB=$PSQLBIN/initdb
PG_CTL=$PSQLBIN/pg_ctl
PG_DUMP=$PSQLBIN/pg_dump
PG_DUMPALL=$PSQLBIN/pg_dumpall
PG_RESTORE=$PSQLBIN/pg_restore
PSQL=$PSQLBIN/psql
GPLISTDATABASEQTY="SELECT d.datname as \"Name\",
r.rolname as \"Owner\",
pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\"
FROM pg_catalog.pg_database d
JOIN pg_catalog.pg_authid r ON d.datdba = r.oid
ORDER BY 1;"
#******************************************************************************
# Greenplum OS Settings
#******************************************************************************
OS_OPENFILES=65535
#******************************************************************************
# General Variables
#******************************************************************************
HOSTFILE=/etc/hosts
PG_PID=postmaster.pid
PG_OPT=postmaster.opts
PG_CONF=postgresql.conf
PG_HBA=pg_hba.conf
if [ x"$TRUSTED_SHELL" = x"" ]; then TRUSTED_SHELL="$SSH"; fi
if [ x"$TRUSTED_COPY" = x"" ]; then TRUSTED_COPY="$SCP"; fi
PG_CONF_ADD_FILE=$WORKDIR/postgresql_conf_gp_additions
SCHEMA_FILE=cdb_schema.sql
DEFAULTDB=template1
CONFIG_TABLE="(SELECT dbid, content, role, preferred_role, mode, status,
hostname, address, port, fselocation as datadir,
replication_port
FROM gp_segment_configuration
JOIN pg_filespace_entry ON (dbid = fsedbid)
JOIN pg_filespace fs ON (fs.oid = fsefsoid)
WHERE fsname = 'pg_system')"
GP_PG_VIEW="(SELECT dbid, role = 'p' as isprimary, content, status = 'u' as valid,
preferred_role = 'p' as definedprimary FROM gp_segment_configuration)"
DEFAULT_CHK_PT_SEG=8
DEFAULT_QD_MAX_CONNECT=250
QE_CONNECT_FACTOR=3
# DEFAULT_BUFFERS sets the default shared_buffers unless overridden by '-b'.
# It applies to the master db and segment dbs. Specify either the number of
# buffers (without suffix) or the amount of memory to use for buffers (with
# case-insensitive suffix 'kB', 'MB' or 'GB').
DEFAULT_BUFFERS=128000kB
DEBUG_LEVEL=0
BATCH_DEFAULT=60
WAIT_LIMIT=1800
WARN_MARK="<<<<<"
#******************************************************************************
# Functions
#******************************************************************************
IN_ARRAY () {
for v in $2; do
if [ x"$1" == x"$v" ]; then
return 1
fi
done
return 0
}
#
# NOTE: this function is called a lot; try to keep it quick.
#
LOG_MSG () {
TIMESTAMP=`$DATE +%Y%m%d":"%H":"%M":"%S`
DISPLAY_TXT=0
# Check to see if we need to update value of EXIT_STATUS. Strip off
# everything in the message after the first ending bracket ']' and
# compare it to WARN/FATAL.
level=${1%%]*}
case "$level" in
*WARN*)
EXIT_STATUS=1
;;
*FATAL*)
EXIT_STATUS=2
;;
esac
if [ x"" == x"$DEBUG_LEVEL" ];then
DEBUG_LEVEL=1
fi
if [ $# -eq 2 ];then
DISPLAY_TXT=1
fi
if [ $VERBOSE ]; then
if [ $DEBUG_LEVEL -eq 1 ] || [ $DISPLAY_TXT -eq 1 ];then
$ECHO "${TIMESTAMP}:${PROG_PIDNAME}:${CALL_HOST}:${USER_NAME}-$1" | $TEE -a $LOG_FILE
else
$ECHO "${TIMESTAMP}:${PROG_PIDNAME}:${CALL_HOST}:${USER_NAME}-$1" >> $LOG_FILE
fi
else
$ECHO "${TIMESTAMP}:${PROG_PIDNAME}:${CALL_HOST}:${USER_NAME}-$1" >> $LOG_FILE
fi
}
POSTGRES_VERSION_CHK() {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
HOST=$1;shift
CURRENT_VERSION=`$EXPORT_GPHOME; $EXPORT_LIB_PATH; $GPHOME/bin/postgres --gp-version`
VERSION_MATCH=0
VER=`$TRUSTED_SHELL $HOST "$EXPORT_GPHOME; $EXPORT_LIB_PATH; $GPHOME/bin/postgres --gp-version"`
if [ $? -ne 0 ] ; then
LOG_MSG "[WARN]:- Failed to obtain postgres version on $HOST" 1
EXIT_STATUS=1
VERSION_MATCH=0
fi
LOG_MSG "[INFO]:- Current postgres version = $CURRENT_VERSION"
LOG_MSG "[INFO]:- postgres version on $HOST = $VER"
if [ x"$VER" != x"$CURRENT_VERSION" ] ; then
LOG_MSG "[WARN]:-Postgres version does not match. [$CURRENT_VERSION != $VER]" 1
VERSION_MATCH=0
EXIT_STATUS=1
else
VERSION_MATCH=1
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
ERROR_EXIT () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
TIME=`$DATE +%H":"%M":"%S`
CUR_DATE=`$DATE +%Y%m%d`
$ECHO "${CUR_DATE}:${TIME}:${PROG_NAME}:${CALL_HOST}:${USER_NAME}-$1 Script Exiting!" >> $LOG_FILE
$ECHO "${CUR_DATE}:${TIME}:${PROG_NAME}:${CALL_HOST}:${USER_NAME}-$1 Script Exiting!"
DEBUG_LEVEL=1
if [ $BACKOUT_FILE ]; then
if [ -s $BACKOUT_FILE ]; then
LOG_MSG "[WARN]:-Script has left Greenplum Database in an incomplete state"
LOG_MSG "[WARN]:-Run command /bin/bash $BACKOUT_FILE to remove these changes"
BACKOUT_COMMAND "if [ x$MASTER_HOSTNAME != x\`$HOSTNAME\` ];then $ECHO \"[FATAL]:-Not on original master host $MASTER_HOSTNAME, backout script exiting!\";exit 2;fi"
$ECHO "$RM -f $BACKOUT_FILE" >> $BACKOUT_FILE
fi
fi
exit $2
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
ERROR_CHK () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ $# -ne 3 ];then
INITIAL_LEVEL=$DEBUG_LEVEL
DEBUG_LEVEL=1
LOG_MSG "[WARN]:-Incorrect # parameters supplied to $FUNCNAME"
DEBUG_LEVEL=$INITIAL_LEVEL
return;fi
RETVAL=$1;shift
MSG_TXT=$1;shift
ACTION=$1 #1=issue warn, 2=fatal
if [ $RETVAL -eq 0 ];then
LOG_MSG "[INFO]:-Successfully completed $MSG_TXT"
else
if [ $ACTION -eq 1 ];then
INITIAL_LEVEL=$DEBUG_LEVEL
DEBUG_LEVEL=1
LOG_MSG "[WARN]:-Issue with $MSG_TXT"
EXIT_STATUS=1
DEBUG_LEVEL=$INITIAL_LEVEL
else
LOG_MSG "[INFO]:-End Function $FUNCNAME"
ERROR_EXIT "[FATAL]:-Failed to complete $MSG_TXT " 2
fi
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
RETRY () {
RETVAL=$?
if [[ "$CURRENT" =~ "ssh" ]]; then
for i in 2 4 8; do
sleep $i
LOG_MSG "[WARN]:-Retrying command -- $CURRENT"
eval "$CURRENT"
if [ $? = 0 ]; then
RETVAL=0
# There seems to be no way of grabbing the return code of a
# trap other than saving it to a variable
return
fi
done
fi
}
SED_PG_CONF () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
SED_TMP_FILE=/tmp/sed_text.$$
APPEND=0
FILENAME=$1;shift
SEARCH_TXT=$1;shift
SUB_TXT="$1";shift
KEEP_PREV=$1;shift
SED_HOST=$1
if [ x"" == x"$SED_HOST" ]; then
if [ `$GREP -c "${SEARCH_TXT}[ ]*=" $FILENAME` -gt 1 ]; then
LOG_MSG "[INFO]:-Found more than 1 instance of $SEARCH_TXT in $FILENAME, will append" 1
APPEND=1
fi
if [ `$GREP -c "${SEARCH_TXT}[ ]*=" $FILENAME` -eq 0 ] || [ $APPEND -eq 1 ]; then
$ECHO $SUB_TXT >> $FILENAME
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
LOG_MSG "[WARN]:-Failed to append line $SUB_TXT to $FILENAME" 1
else
LOG_MSG "[INFO]:-Appended line $SUB_TXT to $FILENAME"
fi
else
if [ $KEEP_PREV -eq 0 ];then
$SED -i'.bak1' -e "s/${SEARCH_TXT}/${SUB_TXT} #${SEARCH_TXT}/" $FILENAME
else
$SED -i'.bak1' -e "s/${SEARCH_TXT}.*/${SUB_TXT}/" $FILENAME
fi
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
ERROR_EXIT "[FATAL]:-Failed to replace $SEARCH_TXT in $FILENAME" 2
else
LOG_MSG "[INFO]:-Replaced line in $FILENAME"
$RM -f ${FILENAME}.bak1
fi
$SED -i'.bak2' -e "s/^#${SEARCH_TXT}/${SEARCH_TXT}/" $FILENAME
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
ERROR_EXIT "[FATAL]:-Failed to replace #$SEARCH_TXT in $FILENAME" 2
else
LOG_MSG "[INFO]:-Replaced line in $FILENAME"
$RM -f ${FILENAME}.bak2
fi
fi
else
# trap DEBUG will always be called first, when other traps are triggered.
# We need to make sure that we save the current running command, so
# that the RETRY function re-runs the command
trap 'CURRENT=$BASH_COMMAND' DEBUG
# Call out retry for commands that fail
trap RETRY ERR
RETVAL=0 # RETVAL gets modified in RETRY function whenever the trap is called
if [ `$TRUSTED_SHELL $SED_HOST "$GREP -c \"${SEARCH_TXT}\" $FILENAME"` -gt 1 ]; then
LOG_MSG "[INFO]:-Found more than 1 instance of $SEARCH_TXT in $FILENAME on $SED_HOST, will append" 1
APPEND=1
fi
if [ `$TRUSTED_SHELL $SED_HOST "$GREP -c \"${SEARCH_TXT}\" $FILENAME"` -eq 0 ] || [ $APPEND -eq 1 ]; then
$TRUSTED_SHELL $SED_HOST "$ECHO \"$SUB_TXT\" >> $FILENAME"
if [ $RETVAL -ne 0 ]; then
ERROR_EXIT "[FATAL]:-Failed to append line $SUB_TXT to $FILENAME on $SED_HOST" 2
else
LOG_MSG "[INFO]:-Appended line $SUB_TXT to $FILENAME on $SED_HOST" 1
fi
else
if [ $KEEP_PREV -eq 0 ];then
$ECHO "s/${SEARCH_TXT}/${SUB_TXT} #${SEARCH_TXT}/" > $SED_TMP_FILE
else
$ECHO "s/${SEARCH_TXT}.*/${SUB_TXT}/" > $SED_TMP_FILE
fi
$CAT $SED_TMP_FILE | $TRUSTED_SHELL ${SED_HOST} $DD of=$SED_TMP_FILE > /dev/null 2>&1
$TRUSTED_SHELL $SED_HOST "sed -i'.bak1' -f $SED_TMP_FILE $FILENAME" > /dev/null 2>&1
if [ $RETVAL -ne 0 ]; then
ERROR_EXIT "[FATAL]:-Failed to insert $SUB_TXT in $FILENAME on $SED_HOST" 2
else
LOG_MSG "[INFO]:-Replaced line in $FILENAME on $SED_HOST"
$TRUSTED_SHELL $SED_HOST "$RM -f ${FILENAME}.bak1" > /dev/null 2>&1
fi
$ECHO "s/^#${SEARCH_TXT}/${SEARCH_TXT}/" > $SED_TMP_FILE
$CAT $SED_TMP_FILE | $TRUSTED_SHELL ${SED_HOST} $DD of=$SED_TMP_FILE > /dev/null 2>&1
$TRUSTED_SHELL $SED_HOST "sed -i'.bak2' -f $SED_TMP_FILE $FILENAME" > /dev/null 2>&1
if [ $RETVAL -ne 0 ]; then
ERROR_EXIT "[FATAL]:-Failed to substitute #${SEARCH_TXT} in $FILENAME on $SED_HOST" 2
else
LOG_MSG "[INFO]:-Replaced line in $FILENAME on $SED_HOST"
$TRUSTED_SHELL $SED_HOST "$RM -f ${FILENAME}.bak2" > /dev/null 2>&1
fi
$TRUSTED_SHELL $SED_HOST "$RM -f $SED_TMP_FILE"
$RM -f $SED_TMP_FILE
fi
trap - ERR DEBUG # Disable trap
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CHK_EXTERNAL () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
EXTERNAL=`$EXPORT_LIB_PATH;$PSQL -A -t -q -p $MASTER_PORT -d "$QD_DBNAME" -c"select 1 from pg_exttable where reloid in (select oid from pg_class where relname='$TABLENAME' and relnamespace in (select oid from pg_namespace where nspname='$SCHEMA_NAME'));"|$WC -l`
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
POSTGRES_PORT_CHK () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
GET_PG_PID_ACTIVE $1 $2
if [ $PID -ne 0 ];then
ERROR_EXIT "[FATAL]:-Host $2 has an active database process on port = $1" 2
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CREATE_SPREAD_MIRROR_ARRAY () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
((MAX_ARRAY=${#QE_PRIMARY_ARRAY[@]}-1))
# Current host and subnet we are working on
CURRENT_HOST=0
CURRENT_SUBNET=0
# Destination host and subnet
DEST_HOST=0
DEST_SUBNET=0
if [ x"$NUM_MHOST_NODE" != x"" ] && [ $NUM_MHOST_NODE -gt 0 ] ; then
((DIRS_PER_SUBNET=$NUM_DATADIR/$NUM_MHOST_NODE))
else
DIRS_PER_SUBNET=$NUM_DATADIR
fi
((MAX_SUBNET=$NUM_DATADIR/$DIRS_PER_SUBNET))
((MAX_HOST=${#QE_PRIMARY_ARRAY[@]}/$NUM_DATADIR))
SEGS_PROCESSED=0
SEGS_PROCESSED_HOST=0
# The following is heavily dependent on sort order of primary array. This sort
# order will be affected by hostnames so something non-standard will cause
# strange behaviour. This isn't new (just recording this fact for future generations)
# and can be worked around with a mapping file to gpinitsystem (-I option).
# The right way to do this would require us to connect to remote hosts, determine
# what subnet we are on for that hostname and then build the array that way. We *will*
# do this once this is in python (or anything other than BASH)
LOG_MSG "[INFO]:-Building spread mirror array type $MULTI_TXT, please wait..." 1
for QE_LINE in ${QE_PRIMARY_ARRAY[@]}
do
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
if [ $(($SEGS_PROCESSED%$NUM_DATADIR)) -eq 0 ] ; then
# A new host group is starting
if [ $SEGS_PROCESSED -ne 0 ] ; then ((CURRENT_HOST=$CURRENT_HOST+1)); fi
# Start the mirroring on the next host
((DEST_HOST=$CURRENT_HOST+1))
# Always subnet "0" to start
CURRENT_SUBNET=0
DEST_SUBNET=1
# Make sure we loop back when needed
if [ $DEST_HOST -ge $MAX_HOST ] ; then DEST_HOST=0; fi
SEGS_PROCESSED_HOST=0
else
# Continue with current host
# move dest host to the next one (This is spread mirroring)
((DEST_HOST=$DEST_HOST+1))
# Make sure we look back when needed
if [ $DEST_HOST -ge $MAX_HOST ] ; then DEST_HOST=0; fi
# Get what subnet we are on, we may have moved to next
((CURRENT_SUBNET=($SEGS_PROCESSED_HOST+1)/$DIRS_PER_SUBNET))
((DEST_SUBNET=$CURRENT_SUBNET+1))
# Handle looping over
if [ $DEST_SUBNET -ge $MAX_SUBNET ] ; then DEST_SUBNET=0; fi
# Increment the number of segments we've processed for this host
((SEGS_PROCESSED_HOST=$SEGS_PROCESSED_HOST+1))
fi
# Handle the case where it's a single hostname (thus a single subnet)
# This case will mainly be for QA testing
if [ $NUM_DATADIR -eq $DIRS_PER_SUBNET ] ; then DEST_SUBNET=0; fi
# Handle possible loop
if [ $DEST_SUBNET -ge $MAX_SUBNET ] ; then DEST_SUBNET=0; fi
# Calculate the index based on host and subnet number
((PRIM_SEG_INDEX=($DEST_HOST*$NUM_DATADIR)+($DEST_SUBNET*$DIRS_PER_SUBNET)))
QE_M_NAME=`$ECHO ${QE_PRIMARY_ARRAY[$PRIM_SEG_INDEX]}|$AWK -F"~" '{print $1}'`
GP_M_DIR=${MIRROR_DATA_DIRECTORY[$SEGS_PROCESSED%$NUM_DATADIR]}
P_PORT=`$ECHO $QE_LINE|$AWK -F"~" '{print $2}'`
P_REPL_PORT=`$ECHO $QE_LINE|$AWK -F"~" '{print $6}'`
((GP_M_PORT=$P_PORT+$MIRROR_OFFSET))
((M_REPL_PORT=$P_REPL_PORT+$MIRROR_REPLICATION_PORT_OFFSET))
M_CONTENT=`$ECHO $QE_LINE|$AWK -F"~" '{print $5}'`
M_SEG=`$ECHO $QE_LINE|$AWK -F"~" '{print $3}'|$AWK -F"/" '{print $NF}'`
QE_MIRROR_ARRAY=(${QE_MIRROR_ARRAY[@]} ${QE_M_NAME}~${GP_M_PORT}~${GP_M_DIR}/${M_SEG}~${DBID_COUNT}~${M_CONTENT}~${M_REPL_PORT})
POSTGRES_PORT_CHK $GP_M_PORT $QE_M_NAME
((DBID_COUNT=$DBID_COUNT+1))
((SEGS_PROCESSED=$SEGS_PROCESSED+1))
done
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CREATE_GROUP_MIRROR_ARRAY () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
LOG_MSG "[INFO]:-Building group mirror array type $MULTI_TXT, please wait..." 1
PRI_HOST_COUNT=`$ECHO ${QE_PRIMARY_ARRAY[@]}|$TR ' ' '\n'|$AWK -F"~" '{print $1}'|$SORT -u|$WC -l`
if [ $MULTI_HOME -eq 1 ] && [ $REMOTE_HOST_COUNT -eq 1 ];then
PRI_HOST_COUNT=1
fi
if [ x"$NUM_MHOST_NODE" != x"" ] && [ $NUM_MHOST_NODE -gt 0 ] ; then
((DIRS_PER_SUBNET=$NUM_DATADIR/$NUM_MHOST_NODE))
else
DIRS_PER_SUBNET=$NUM_DATADIR
fi
((MAX_SUBNET=$NUM_DATADIR/$DIRS_PER_SUBNET))
((MAX_HOST=${#QE_PRIMARY_ARRAY[@]}/$NUM_DATADIR))
# Current host we are working on
CURRENT_HOST=0
# Destination host and subnet
DEST_HOST=0
DEST_SUBNET=0
PRIMARY_ARRAY_LENGTH=${#QE_PRIMARY_ARRAY[@]}
PRIMARY_INDEX=0
for QE_LINE in ${QE_PRIMARY_ARRAY[@]}
do
if [ $(($PRIMARY_INDEX%$NUM_DATADIR)) -eq 0 ] ; then
if [ $PRIMARY_INDEX -ne 0 ] ; then ((CURRENT_HOST=$CURRENT_HOST+1)); fi
((DEST_HOST=$CURRENT_HOST+1))
if [ $DEST_HOST -ge $MAX_HOST ] ; then DEST_HOST=0; fi
DEST_SUBNET=1
else
if [ $(($PRIMARY_INDEX%$DIRS_PER_SUBNET)) -eq 0 ] ; then
((DEST_SUBNET=$DEST_SUBNET+1))
fi
fi
# Handle possible loop
if [ $DEST_SUBNET -ge $MAX_SUBNET ] ; then DEST_SUBNET=0; fi
((MIRROR_INDEX=($DEST_HOST*$NUM_DATADIR)+($DEST_SUBNET*$DIRS_PER_SUBNET)))
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
QE_M_NAME=`$ECHO ${QE_PRIMARY_ARRAY[$MIRROR_INDEX]}|$AWK -F"~" '{print $1}'`
GP_M_DIR=${MIRROR_DATA_DIRECTORY[$PRIMARY_INDEX%$NUM_DATADIR]}/`$ECHO $QE_LINE|$AWK -F"~" '{print $3}'|$AWK -F"/" '{print $NF}'`
M_CONTENT=`$ECHO $QE_LINE|$AWK -F"~" '{print $5}'`
P_PORT=`$ECHO $QE_LINE|$AWK -F"~" '{print $2}'`
P_REPL_PORT=`$ECHO $QE_LINE|$AWK -F"~" '{print $6}'`
GP_M_PORT=$(($P_PORT+$MIRROR_OFFSET))
M_REPL_PORT=$(($P_REPL_PORT+$MIRROR_REPLICATION_PORT_OFFSET))
QE_MIRROR_ARRAY=(${QE_MIRROR_ARRAY[@]} ${QE_M_NAME}~${GP_M_PORT}~${GP_M_DIR}~${DBID_COUNT}~${M_CONTENT}~${M_REPL_PORT})
POSTGRES_PORT_CHK $GP_M_PORT $QE_M_NAME
DBID_COUNT=$(($DBID_COUNT+1))
PRIMARY_INDEX=$((PRIMARY_INDEX+1))
done
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
GET_REPLY () {
$ECHO -e "\n$1 Yy|Nn (default=N):"
$ECHO -n "> "
read REPLY
if [ -z $REPLY ]; then
LOG_MSG "[WARN]:-User abort requested, Script Exits!" 1
exit 1
fi
if [ $REPLY != Y ] && [ $REPLY != y ]; then
LOG_MSG "[WARN]:-User abort requested, Script Exits!" 1
exit 1
fi
}
CHK_MULTI_HOME () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
GET_QE_DETAILS
MULTI_ARRAY=()
J=0
if [ x"" == x"$1" ];then
#Select two hosts to test as we do not want to do the whole array
LOG_MSG "[INFO]:-Obtaining GPDB array type, [Brief], please wait..." 1
while [ $J -lt 2 ]
do
QE_HOST=`$ECHO ${QE_ARRAY[$J]}|$AWK -F"|" '{print $1}'`
REMOTE_HOSTNAME=`$TRUSTED_SHELL $QE_HOST "$HOSTNAME"`
MULTI_ARRAY=(${MULTI_ARRAY[@]} ${QE_HOST}:$REMOTE_HOSTNAME)
((J=$J+1))
done
else
LOG_MSG "[INFO]:-Obtaining GPDB array type, [Full], please wait..." 1
for QE_LINE in ${QE_ARRAY[@]}
do
QE_HOST=`$ECHO $QE_LINE|$AWK -F"|" '{print $1}'`
REMOTE_HOSTNAME=`$TRUSTED_SHELL $QE_HOST "$HOSTNAME"`
MULTI_ARRAY=(${MULTI_ARRAY[@]} ${QE_HOST}:$REMOTE_HOSTNAME)
done
fi
SEG_HOST_COUNT=`$ECHO ${MULTI_ARRAY[@]}|$TR ' ' '\n'|$AWK -F"~" '{print $1}'|$SORT -u|wc -l`
REMOTE_HOST_COUNT=`$ECHO ${MULTI_ARRAY[@]}|$TR ' ' '\n'|$AWK -F"~" '{print $2}'|$SORT -u|wc -l`
if [ $SEG_HOST_COUNT -eq $REMOTE_HOST_COUNT ];then
LOG_MSG "[INFO]:-Non multi-home configuration"
MULTI_HOME=0
MULTI_TXT="Standard"
else
LOG_MSG "[INFO]:-Multi-home configuration"
MULTI_HOME=1
MULTI_TXT="Multi-home"
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CHK_FILE () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
FILENAME=$1
FILE_HOST=$2
if [ x"" == x"$FILE_HOST" ];then
LOG_MSG "[INFO]:-Checking file $FILENAME"
if [ ! -s $FILENAME ] || [ ! -r $FILENAME ]
then
EXISTS=1
else
EXISTS=0
fi
else
EXISTS=`$TRUSTED_SHELL $FILE_HOST "if [ ! -s $FILENAME ] || [ ! -r $FILENAME ];then $ECHO 1;else $ECHO 0;fi"`
RETVAL=$?
if [ $RETVAL -ne 0 ];then
LOG_MSG "[WARN]:-Failed to obtain details of $FILENAME on $FILE_HOST"
EXIT_STATUS=1
EXISTS=1
fi
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CHK_DIR () {
# this function might be called very early, before logfiles are initialized
if [ x"" == x"$3" ];then
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
fi
DIR_NAME=$1
DIR_HOST=$2
if [ x"" == x"$DIR_HOST" ];then
EXISTS=`if [ -d $DIR_NAME ];then $ECHO 0;else $ECHO 1;fi`
else
EXISTS=`$TRUSTED_SHELL $DIR_HOST "if [ -d $DIR_NAME ];then $ECHO 0;else $ECHO 1;fi"`
RETVAL=$?
if [ $RETVAL -ne 0 ];then
LOG_MSG "[WARN]:-Failed to obtain details of $DIR_NAME on $DIR_HOST" 1
EXIT_STATUS=1
EXISTS=1
fi
fi
if [ x"" == x"$3" ];then
LOG_MSG "[INFO]:-End Function $FUNCNAME"
fi
}
GET_MASTER_PORT () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
MASTER_DATA_DIRECTORY=$1
if [ x"" == x"$MASTER_DATA_DIRECTORY" ];then
ERROR_EXIT "[FATAL]:-MASTER_DATA_DIRECTORY variable not set" 2;fi
if [ ! -d $MASTER_DATA_DIRECTORY ]; then
ERROR_EXIT "[FATAL]:-No $MASTER_DATA_DIRECTORY directory" 2
fi
if [ -r $MASTER_DATA_DIRECTORY/$PG_CONF ];then
MASTER_PORT=`$AWK 'split($0,a,"#")>0 && split(a[1],b,"=")>1 {print b[1] " " b[2]}' $MASTER_DATA_DIRECTORY/$PG_CONF | $AWK '$1=="port" {print $2}' | $TAIL -1`
if [ x"" == x"$MASTER_PORT" ] ; then
#look for include files
for INC_FILE in `$AWK '/^[ ]*include /{print $2}' $MASTER_DATA_DIRECTORY/$PG_CONF | $TR -d "'\""` ; do
if [[ $INC_FILE == /* ]] ; then
GET_MASTER_PORT_RECUR "$INC_FILE" 1
else
GET_MASTER_PORT_RECUR "$MASTER_DATA_DIRECTORY/$INC_FILE" 1
fi
done
if [ x"" == x"$MASTER_PORT" ] ; then
ERROR_EXIT "[FATAL]:-Failed to obtain master port number from $MASTER_DATA_DIRECTORY/$PG_CONF" 2
fi
fi
else
ERROR_EXIT "[FATAL]:-Do not have read access to $MASTER_DATA_DIRECTORY/$PG_CONF" 2
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
GET_MASTER_PORT_RECUR () {
INCLUDED_FILE=$1
RECUR=$2
if [ $RECUR -le 10 ] ; then
MASTER_PORT=`$AWK 'split($0,a,"#")>0 && split(a[1],b,"=")>1 {print b[1] " " b[2]}' $INCLUDED_FILE | $AWK '$1=="port" {print $2}' | $TAIL -1`
if [ x"" == x"$MASTER_PORT" ] ; then
#look for include files
let CURR_DEPTH=$RECUR+1
for INC_FILE in `$AWK '/^[ ]*include /{print $2}' $INC_FILE | $TR -d "'\""` ; do
if [[ $INC_FILE == /* ]] ; then
GET_MASTER_PORT_RECUR "$INC_FILE" $CURR_DEPTH
else
GET_MASTER_PORT_RECUR "$MASTER_DATA_DIRECTORY/$INC_FILE" $CURR_DEPTH
fi
if [ x"" != x"$MASTER_PORT" ] ; then
break
fi
done
fi
else
ERROR_EXIT "[FATAL]:-Could not open configuration file \"$INCLUDED_FILE\": maximum nesting depth exceeded"
fi
}
GET_CIDRADDR () {
# MPP-15889
# assuming argument is an ip address, return the address
# with a /32 or /128 cidr suffix based on whether or not the
# address contains a :
if [ `echo $1 | grep -c :` -gt 0 ]; then
echo $1/128
else
echo $1/32
fi
}
BUILD_MASTER_PG_HBA_FILE () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ $# -eq 0 ];then ERROR_EXIT "[FATAL]:-Passed zero parameters, expected at least 1" 2;fi
GP_DIR=$1
LOG_MSG "[INFO]:-Clearing values in Master $PG_HBA"
$GREP "^#" ${GP_DIR}/$PG_HBA > $TMP_PG_HBA
$MV $TMP_PG_HBA ${GP_DIR}/$PG_HBA
LOG_MSG "[INFO]:-Setting local access"
$ECHO "local all $USER_NAME $PG_METHOD" >> ${GP_DIR}/$PG_HBA
#$ECHO "local all all $PG_METHOD" >> ${GP_DIR}/$PG_HBA
LOG_MSG "[INFO]:-Setting local host access"
$ECHO "host all $USER_NAME 127.0.0.1/28 trust" >> ${GP_DIR}/$PG_HBA
for ADDR in "${MASTER_IP_ADDRESS_ALL[@]}"
do
# MPP-15889
CIDRADDR=$(GET_CIDRADDR $ADDR)
$ECHO "host all $USER_NAME $CIDRADDR trust" >> ${GP_DIR}/$PG_HBA
done
for ADDR in "${STANDBY_IP_ADDRESS_ALL[@]}"
do
# MPP-15889
CIDRADDR=$(GET_CIDRADDR $ADDR)
$ECHO "host all $USER_NAME $CIDRADDR trust" >> ${GP_DIR}/$PG_HBA
done
# Add all local IPV6 addresses
for ADDR in "${MASTER_IPV6_LOCAL_ADDRESS_ALL[@]}"
do
# MPP-15889
CIDRADDR=$(GET_CIDRADDR $ADDR)
$ECHO "host all $USER_NAME $CIDRADDR trust" >> ${GP_DIR}/$PG_HBA
done
# Add replication config
$ECHO "local replication $USER_NAME $PG_METHOD" >> ${GP_DIR}/$PG_HBA
$ECHO "host replication $USER_NAME samenet trust" >> ${GP_DIR}/$PG_HBA
LOG_MSG "[INFO]:-Complete Master $PG_HBA configuration"
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
BUILD_GPSSH_CONF () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ $# -eq 0 ];then ERROR_EXIT "[FATAL]:-Passed zero parameters, expected at least 1" 2;fi
GP_DIR=$1
$CAT <<_EOF_ >> $GP_DIR/gpssh.conf
[gpssh]
# delaybeforesend specifies the time in seconds to wait at the
# beginning of an ssh interaction before doing anything.
# Increasing this value can have a big runtime impact at the
# beginning of gpssh.
delaybeforesend = 0.05
# prompt_validation_timeout specifies a timeout multiplier that
# will be used in validating the ssh prompt. Increasing this
# value will have a small runtime impact at the beginning of
# gpssh.
prompt_validation_timeout = 1.0
# sync_retries specifies how many times to try the pxssh
# connection verification.
# Setting this value to 1 means gpssh will immediately pass
# along pxssh's best effort.
# Increasing this value will allow for slow network connections,
# cpu load, or other slowness on the segment host, but will
# also delay feedback when a connection cannot be established
# for other reasons
sync_retries = 3
_EOF_
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
BUILD_PERFMON() {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
GP_DIR=$1
$MKDIR -p $GP_DIR/gpperfmon/conf $GP_DIR/gpperfmon/logs $GP_DIR/gpperfmon/data
$CAT <<_EOF_ >> $GP_DIR/gpperfmon/conf/gpperfmon.conf
[GPMMON]
# quantum specifies the time in seconds between updates from
# performance monitor agents on all segments. Valid values
# are 10, 15, 20, 30, or 60
quantum = 15
# min_query_time specifies the minimum query run time
# in seconds for statistics collection. The monitor logs all
# queries that run longer than this value in the queries_history
# table. For queries with shorter run times, no historical
# data is collected.
min_query_time = 20
# This should be a percentage between 0 and 100 and should be
# less than the error_disk_space_percentage. If a filesystem’s
# disk space used percentage equals or exceeds this value a
# warning will be logged and a warning email/snmp trap may be
# sent. If this configuration is set to 0 or not specified, no
# warnings are sent.
#warning_disk_space_percentage = 80
# This should be a percentage between 0 and 100 and should be
# greater than the warning_disk_space_percentage. If a
# filesystem’s disk space used percentage equals or exceeds
# this value an error will be logged and a error email/snmp
# trap may be sent. If this configuration is set to 0 or not
# specified, no errors are sent.
#error_disk_space_percentage = 90
#This is the interval in minutes that limits the number of
#error/warning messages that are sent. The minimum value for
#this configuration is 1. Setting this to 0 or not specifying
#this configuration results in it getting set to the minimum.
disk_space_interval = 60
#This is the maximum number of error/warning messages that
#will be sent in the disk_space_interval. The maximum value
#for this configuration is 50. The minimum value for this
#configuration is 1. Setting this configuration to greater
#than 50 or not specifying this configuration results in it
#getting set to the maximum.
max_disk_space_messages_per_interval = 10
# The number of partitions for statistics data in month
# will be retained. Older partitions will be dropped.
#partition_age = 6
log_location = $GP_DIR/gpperfmon/logs
_EOF_
}
CHK_DB_RUNNING () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ $# -eq 1 ];then
CHK_DISPATCH_ACCESS=1
else
CHK_DISPATCH_ACCESS=0
fi
if [ ! -d $MASTER_DATA_DIRECTORY ]; then
ERROR_EXIT "[FATAL]:-No Master $MASTER_DATA_DIRECTORY directory" 2
fi
if [ ! -f $MASTER_DATA_DIRECTORY/$PG_PID ]; then
LOG_MSG "[FATAL]:-No $MASTER_DATA_DIRECTORY/$PG_PID file" 1
ERROR_EXIT "[FATAL]:-Run gpstart to start the Greenplum database." 2
fi
GET_MASTER_PORT $MASTER_DATA_DIRECTORY
export $EXPORT_LIB_PATH;env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"SELECT d.datname as \"Name\",
r.rolname as \"Owner\",
pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\"
FROM pg_catalog.pg_database d
JOIN pg_catalog.pg_authid r ON d.datdba = r.oid
ORDER BY 1;" >> $LOG_FILE 2>&1
if [ $? -ne 0 ];then
LOG_MSG "[FATAL]:-Have a postmaster.pid file for master instance on port $MASTER_PORT" 1
LOG_MSG "[FATAL]:-However, error reported on test psql access to master instance" 1
LOG_MSG "[INFO]:-Check ps output for a postmaster process on the above port" 1
LOG_MSG "[INFO]:-Check the master postgres logfile for errors and also the utility log file" 1
ERROR_EXIT "[FATAL]:-Unable to continue" 2
fi
if [ $CHK_DISPATCH_ACCESS -eq 1 ];then
#Check if in admin mode
export $EXPORT_LIB_PATH;$PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"\l" >> $LOG_FILE 2>&1
if [ $? -ne 0 ];then
LOG_MSG "[WARN]:-Can access the Master instance in admin mode, but dispatch access failed" 1
LOG_MSG "[INFO]:-This could mean that the Master instance is in admin mode only" 1
LOG_MSG "[INFO]:-Run gpstop -m to shutdown Master instance from admin mode, and restart" 1
LOG_MSG "[INFO]:-the Greenplum database using gpstart" 1
EXIT_STATUS=1
else
EXIT_STATUS=0
fi
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
GET_QD_DB_NAME () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
GET_MASTER_PORT $MASTER_DATA_DIRECTORY
CHK_DB_RUNNING
#Check if we have PGDATABASE environment variable set, if so see if that database exists
if [ x"" != x"$PGDATABASE" ];then
LOG_MSG "[INFO]:-PGDATABASE set, checking for this database"
QD_DBNAME_THERE=`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"select 1 from pg_database where datname='${PGDATABASE}';"|$WC -l`
ERROR_CHK $? "check for $PGDATABASE" 2
if [ $QD_DBNAME_THERE -eq 1 ];then
QD_DBNAME=$PGDATABASE
else
QD_DBNAME_THERE=""
fi
fi
if [ x"" = x"$QD_DBNAME_THERE" ];then
LOG_MSG "[INFO]:-Checking for a non-system database"
QD_DBNAME=`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"$GPLISTDATABASEQTY " 2>/dev/null|$GREP -v postgres|$GREP -v template0|$HEAD -1|$CUT -d"|" -f1`
ERROR_CHK $? "obtain database name" 2
fi
MASTER_DIR_CHK=`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"select 1 from ${CONFIG_TABLE} a;"|$WC -l`
if [ $MASTER_DIR_CHK -eq 0 ];then
ERROR_EXIT "[FATAL]:-MASTER_DATA_DIRECTORY value of $MASTER_DATA_DIRECTORY is incorrect" 2;fi
if [ x"" == x"$QD_DBNAME" ]; then
LOG_MSG "[INFO]:-Unable to obtain a non-system database name, setting value to "$DEFAULTDB""
QD_DBNAME="$DEFAULTDB"
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
GET_QE_DETAILS () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
GET_QD_DB_NAME
if [ x"" == x"$PGUSER" ];then
DBUSER=$USER
else
DBUSER=$PGUSER
fi
if [ $# -eq 0 ];then
QE_ARRAY=(`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -q -p $MASTER_PORT -U $DBUSER -d "$QD_DBNAME" -A -t -c"select a.hostname, a.datadir, a.port, b.valid, b.definedprimary from $CONFIG_TABLE a, $GP_PG_VIEW b where a.dbid=b.dbid and a.content<>-1 order by b.dbid;"`) > /dev/null 2>&1
else
QE_ARRAY=(`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -q -p $MASTER_PORT -U $DBUSER -d "$QD_DBNAME" -A -t -c"select a.hostname, a.datadir, a.port, b.valid, b.definedprimary from $CONFIG_TABLE a, $GP_PG_VIEW b where a.dbid=b.dbid and a.content<>-1 order by a.port;"`) > /dev/null 2>&1
fi
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
LOG_MSG "[WARN]:-Unable to obtain segment instance host details from Master db, error code $RETVAL returned" 1
EXIT_STATUS=1
fi
QE_ARRAY_COUNT=${#QE_ARRAY[@]}
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CHK_MIRRORS_CONFIGURED () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
MIRROR_COUNT=`${EXPORT_LIB_PATH};env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -A -t -c"select count(dbid)/count(distinct(content)) from $CONFIG_TABLE a where content<>-1;"`
ERROR_CHK $? "obtain mirror count from master instance" 2
LOG_MSG "[INFO]:-Obtained $MIRROR_COUNT as check value"
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
GET_PG_PID_ACTIVE () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
# Expects port number and hostname for remote checking
PORT=$1;shift
HOST=$1
PG_LOCK_FILE="/tmp/.s.PGSQL.${PORT}.lock"
PG_LOCK_NETSTAT=""
if [ x"" == x"$HOST" ];then
#See if we have a netstat entry for this local host
PORT_ARRAY=(`$NETSTAT -an 2>/dev/null |$GREP ".s.PGSQL.${PORT}"|$AWK '{print $NF}'|$AWK -F"." '{print $NF}'|$SORT -u`)
for P_CHK in ${PORT_ARRAY[@]}
do
if [ $P_CHK -eq $PORT ];then PG_LOCK_NETSTAT=$PORT;fi
done
#PG_LOCK_NETSTAT=`$NETSTAT -an 2>/dev/null |$GREP ".s.PGSQL.${PORT}"|$AWK '{print $NF}'|$HEAD -1`
#See if we have a lock file in /tmp
if [ -f ${PG_LOCK_FILE} ];then
PG_LOCK_TMP=1
else
PG_LOCK_TMP=0
fi
if [ x"" == x"$PG_LOCK_NETSTAT" ] && [ $PG_LOCK_TMP -eq 0 ];then
PID=0
LOG_MSG "[INFO]:-No socket connection or lock file in /tmp found for port=${PORT}"
else
#Now check the failure combinations
if [ $PG_LOCK_TMP -eq 0 ] && [ x"" != x"$PG_LOCK_NETSTAT" ];then
#Have a process but no lock file
LOG_MSG "[WARN]:-No lock file $PG_LOCK_FILE but process running on port $PORT" 1
PID=1
EXIT_STATUS=1
fi
if [ $PG_LOCK_TMP -eq 1 ] && [ x"" == x"$PG_LOCK_NETSTAT" ];then
#Have a lock file but no process
if [ -r ${PG_LOCK_FILE} ];then
PID=`$CAT ${PG_LOCK_FILE}|$HEAD -1|$AWK '{print $1}'`
else
LOG_MSG "[WARN]:-Unable to access ${PG_LOCK_FILE}" 1
PID=1
fi
LOG_MSG "[WARN]:-Have lock file $PG_LOCK_FILE but no process running on port $PORT" 1
EXIT_STATUS=1
fi
if [ $PG_LOCK_TMP -eq 1 ] && [ x"" != x"$PG_LOCK_NETSTAT" ];then
#Have both a lock file and a netstat process
if [ -r ${PG_LOCK_FILE} ];then
PID=`$CAT ${PG_LOCK_FILE}|$HEAD -1|$AWK '{print $1}'`
else
LOG_MSG "[WARN]:-Unable to access ${PG_LOCK_FILE}" 1
PID=1
EXIT_STATUS=1
fi
LOG_MSG "[INFO]:-Have lock file $PG_LOCK_FILE and a process running on port $PORT"
fi
fi
else
PING_HOST $HOST 1
if [ $RETVAL -ne 0 ];then
PID=0
EXIT_STATUS=1
else
PORT_ARRAY=(`$TRUSTED_SHELL $HOST "$NETSTAT -an 2>/dev/null |$GREP ".s.PGSQL.${PORT}" 2>/dev/null"|$AWK '{print $NF}'|$AWK -F"." '{print $NF}'|$SORT -u`)
for P_CHK in ${PORT_ARRAY[@]}
do
if [ $P_CHK -eq $PORT ];then PG_LOCK_NETSTAT=$PORT;fi
done
#PG_LOCK_NETSTAT=`$TRUSTED_SHELL $HOST "$NETSTAT -an 2>/dev/null |$GREP ".s.PGSQL.${PORT}" 2>/dev/null"|$AWK '{print $NF}'|$HEAD -1`
PG_LOCK_TMP=`$TRUSTED_SHELL $HOST "ls ${PG_LOCK_FILE} 2>/dev/null"|$WC -l`
if [ x"" == x"$PG_LOCK_NETSTAT" ] && [ $PG_LOCK_TMP -eq 0 ];then
PID=0
LOG_MSG "[INFO]:-No socket connection or lock file $PG_LOCK_FILE found for port=${PORT}"
else
#Now check the failure combinations
if [ $PG_LOCK_TMP -eq 0 ] && [ x"" != x"$PG_LOCK_NETSTAT" ];then
#Have a process but no lock file
LOG_MSG "[WARN]:-No lock file $PG_LOCK_FILE but process running on port $PORT on $HOST" 1
PID=1
EXIT_STATUS=1
fi
if [ $PG_LOCK_TMP -eq 1 ] && [ x"" == x"$PG_LOCK_NETSTAT" ];then
#Have a lock file but no process
CAN_READ=`$TRUSTED_SHELL $HOST "if [ -r ${PG_LOCK_FILE} ];then echo 1;else echo 0;fi"`
if [ $CAN_READ -eq 1 ];then
PID=`$TRUSTED_SHELL $HOST "$CAT ${PG_LOCK_FILE}|$HEAD -1 2>/dev/null"|$AWK '{print $1}'`
else
LOG_MSG "[WARN]:-Unable to access ${PG_LOCK_FILE} on $HOST" 1
fi
LOG_MSG "[WARN]:-Have lock file $PG_LOCK_FILE but no process running on port $PORT on $HOST" 1
PID=1
EXIT_STATUS=1
fi
if [ $PG_LOCK_TMP -eq 1 ] && [ x"" != x"$PG_LOCK_NETSTAT" ];then
#Have both a lock file and a netstat process
CAN_READ=`$TRUSTED_SHELL $HOST "if [ -r ${PG_LOCK_FILE} ];then echo 1;else echo 0;fi"`
if [ $CAN_READ -eq 1 ];then
PID=`$TRUSTED_SHELL $HOST "$CAT ${PG_LOCK_FILE}|$HEAD -1 2>/dev/null"|$AWK '{print $1}'`
else
LOG_MSG "[WARN]:-Unable to access ${PG_LOCK_FILE} on $HOST" 1
EXIT_STATUS=1
fi
LOG_MSG "[INFO]:-Have lock file $PG_LOCK_FILE and a process running on port $PORT on $HOST"
fi
fi
fi
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
RUN_COMMAND_REMOTE () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
HOST=$1
COMMAND=$2
LOG_MSG "[INFO]:-Commencing remote $TRUSTED_SHELL $HOST $COMMAND"
$TRUSTED_SHELL $HOST $COMMAND >> $LOG_FILE 2>&1
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
LOG_MSG "[FATAL]:- Command $COMMAND on $HOST failed with error status $RETVAL" 2
else
LOG_MSG "[INFO]:-Completed $TRUSTED_SHELL $HOST $COMMAND"
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
return $RETVAL
}
BACKOUT_COMMAND () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
COMMAND=$1
if [ ! -f $BACKOUT_FILE ]; then
$ECHO $COMMAND > $BACKOUT_FILE
else
$CAT $BACKOUT_FILE > /tmp/backout_file.tmp.$$
$ECHO $COMMAND > $BACKOUT_FILE
$CAT /tmp/backout_file.tmp.$$ >> $BACKOUT_FILE
$RM -f /tmp/backout_file.tmp.$$
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
PING_HOST () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
TARGET_HOST=$1;shift
PING_EXIT=$1
if [ x"" == x"$PING_EXIT" ];then PING_EXIT=0;fi
case $OS_TYPE in
darwin )
$PING $PING_TIME $TARGET_HOST > /dev/null 2>&1 || $PING6 $PING_TIME $TARGET_HOST > /dev/null 2>&1
;;
linux )
$PING $TARGET_HOST $PING_TIME > /dev/null 2>&1 || $PING6 $TARGET_HOST $PING_TIME > /dev/null 2>&1
;;
* )
$PING $TARGET_HOST $PING_TIME > /dev/null 2>&1
esac
RETVAL=$?
case $RETVAL in
0) LOG_MSG "[INFO]:-$TARGET_HOST contact established"
;;
1) if [ $PING_EXIT -eq 0 ];then
ERROR_EXIT "[FATAL]:-Unable to contact $TARGET_HOST" 2
else
LOG_MSG "[WARN]:-Unable to contact $TARGET_HOST" 1
fi
;;
2) if [ $PING_EXIT -eq 0 ];then
ERROR_EXIT "[FATAL]:-Unknown host $TARGET_HOST" 2
else
LOG_MSG "[WARN]:-Unknown host $TARGET_HOST" 1
fi
;;
esac
LOG_MSG "[INFO]:-End Function $FUNCNAME"
return $RETVAL
}
PARALLEL_SETUP () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
PARALLEL_STATUS_FILE=$1
$TOUCH $PARALLEL_STATUS_FILE
export PARALLEL_STATUS_FILE=$PARALLEL_STATUS_FILE
LOG_MSG "[INFO]:-Spawning parallel processes batch [1], please wait..." 1
BATCH_COUNT=0
INST_COUNT=0
BATCH_DONE=1
BATCH_TOTAL=0
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
PARALLEL_COUNT () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ $# -ne 2 ];then ERROR_EXIT "[FATAL]:-Incorrect number of parameters passed to $FUNCNAME" 2;fi
BATCH_LIMIT=$1
BATCH_DEFAULT=$2
((INST_COUNT=$INST_COUNT+1))
((BATCH_COUNT=$BATCH_COUNT+1))
((BATCH_TOTAL=$BATCH_TOTAL+1))
if [ $BATCH_COUNT -eq $BATCH_DEFAULT ] || [ $BATCH_LIMIT -eq $BATCH_TOTAL ];then
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
PARALLEL_WAIT
((BATCH_DONE=$BATCH_DONE+1))
BATCH_COUNT=0
if [ $BATCH_LIMIT -ne $BATCH_TOTAL ];then
LOG_MSG "[INFO]:-Spawning parallel processes batch [$BATCH_DONE], please wait..." 1
fi
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
PARALLEL_WAIT () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
LOG_MSG "[INFO]:-Waiting for parallel processes batch [$BATCH_DONE], please wait..." 1
SLEEP_COUNT=0
while [ `$WC -l $PARALLEL_STATUS_FILE|$AWK '{print $1}'` -ne $INST_COUNT ]
do
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
$SLEEP 1
((SLEEP_COUNT=$SLEEP_COUNT+1))
if [ $WAIT_LIMIT -lt $SLEEP_COUNT ];then
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
LOG_MSG "[FATAL]:-Failed to process this batch of segments within $WAIT_LIMIT seconds" 1
LOG_MSG "[INFO]:-Review contents of $LOG_FILE" 1
ERROR_EXIT "[FATAL]:-Process timeout failure" 2
fi
done
if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
PARALLEL_SUMMARY_STATUS_REPORT () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
REPORT_FAIL=0
if [ -f $1 ];then
KILLED_COUNT=`$GREP -c "KILLED:" $PARALLEL_STATUS_FILE`
COMPLETED_COUNT=`$GREP -c "COMPLETED:" $PARALLEL_STATUS_FILE`
FAILED_COUNT=`$GREP -c "FAILED:" $PARALLEL_STATUS_FILE`
((TOTAL_FAILED_COUNT=$KILLED_COUNT+$FAILED_COUNT))
LOG_MSG "[INFO]:------------------------------------------------" 1
LOG_MSG "[INFO]:-Parallel process exit status" 1
LOG_MSG "[INFO]:------------------------------------------------" 1
LOG_MSG "[INFO]:-Total processes marked as completed = $COMPLETED_COUNT" 1
if [ $KILLED_COUNT -ne 0 ];then
LOG_MSG "[WARN]:-Total processes marked as killed = $KILLED_COUNT $WARN_MARK" 1
REPORT_FAIL=1
else
LOG_MSG "[INFO]:-Total processes marked as killed = 0" 1
fi
if [ $FAILED_COUNT -ne 0 ];then
LOG_MSG "[WARN]:-Total processes marked as failed = $FAILED_COUNT $WARN_MARK" 1
REPORT_FAIL=1
else
LOG_MSG "[INFO]:-Total processes marked as failed = 0" 1
fi
LOG_MSG "[INFO]:------------------------------------------------" 1
else
LOG_MSG "[WARN]:-Could not locate status file $1" 1
REPORT_FAIL=1
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
CHK_GPDB_ID () {
LOG_MSG "[INFO]:-Start Function $FUNCNAME"
if [ -f ${INITDB} ];then
PERMISSION=`ls -al ${INITDB}|$AWK '{print $1}'`
MASTER_INITDB_ID=`ls -al ${INITDB}|$AWK '{print $3}'`
INIT_CHAR=`$ECHO $MASTER_INITDB_ID|$TR -d '\n'|$WC -c|$TR -d ' '`
MASTER_INITDB_GROUPID=`ls -al ${INITDB}|$AWK '{print $4}'`
GROUP_INIT_CHAR=`$ECHO $MASTER_INITDB_ID|$TR -d '\n'|$WC -c|$TR -d ' '`
GPDB_ID=`id|$TR '(' ' '|$TR ')' ' '|$AWK '{print $2}'`
GPDB_GROUPID=`id|$TR '(' ' '|$TR ')' ' '|$AWK '{print $4}'`
USER_EXECUTE=`$ECHO $PERMISSION | $SED -e 's/...\(.\).*/\1/g'`
GROUP_EXECUTE=`$ECHO $PERMISSION | $SED -e 's/......\(.\).*/\1/g'`
if [ `$ECHO $GPDB_ID|$TR -d '\n'|$WC -c` -gt $INIT_CHAR ];then
GPDB_ID_CHK=`$ECHO $GPDB_ID|$CUT -c1-$INIT_CHAR`
else
GPDB_ID_CHK=$GPDB_ID
fi
if [ `$ECHO $GPDB_GROUPID|$TR -d '\n'|$WC -c` -gt $GROUP_INIT_CHAR ];then
GPDB_GROUPID_CHK=`$ECHO $GPDB_GROUPID|$CUT -c1-$GROUP_INIT_CHAR`
else
GPDB_GROUPID_CHK=$GPDB_GROUPID
fi
if [ x$GPDB_ID_CHK == x$MASTER_INITDB_ID ] && [ x"x" == x"$USER_EXECUTE" ];then
LOG_MSG "[INFO]:-Current user id of $GPDB_ID, matches initdb id of $MASTER_INITDB_ID"
elif [ x$GPDB_GROUPID_CHK == x$MASTER_INITDB_GROUPID ] && [ x"x" == x"$GROUP_EXECUTE" ] ; then
LOG_MSG "[INFO]:-Current group id of $GPDB_GROUPID, matches initdb group id of $MASTER_INITDB_GROUPID"
else
LOG_MSG "[WARN]:-File permission mismatch. The $GPDB_ID_CHK owns the Greenplum Database installation directory."
LOG_MSG "[WARN]:-You are currently logged in as $MASTER_INITDB_ID and may not have sufficient"
LOG_MSG "[WARN]:-permissions to run the Greenplum binaries and management utilities."
fi
if [ x"" != x"$USER" ];then
if [ `$ECHO $USER|$TR -d '\n'|$WC -c` -gt $INIT_CHAR ];then
USER_CHK=`$ECHO $USER|$CUT -c1-$INIT_CHAR`
else
USER_CHK=$USER
fi
if [ x$GPDB_ID_CHK != x$USER_CHK ];then
LOG_MSG "[WARN]:-\$USER mismatch, id returns $GPDB_ID, \$USER returns $USER" 1
LOG_MSG "[WARN]:-The GPDB superuser account that owns the initdb binary should run these utilities" 1
LOG_MSG "[WARN]:-This may cause problems when these utilities are run as $USER" 1
fi
else
LOG_MSG "[INFO]:-Environment variable \$USER unset, will set to $GPDB_ID" 1
export USER=$GPDB_ID
fi
if [ x"" != x"$LOGNAME" ];then
if [ `$ECHO $LOGNAME|$TR -d '\n'|$WC -c` -gt $INIT_CHAR ];then
LOGNAME_CHK=`$ECHO $LOGNAME|$CUT -c1-$INIT_CHAR`
else
LOGNAME_CHK=$LOGNAME
fi
if [ x$GPDB_ID_CHK != x$LOGNAME_CHK ];then
LOG_MSG "[WARN]:-\$LOGNAME mismatch, id returns $GPDB_ID_CHK, \$LOGNAME returns $LOGNAME_CHK" 1
LOG_MSG "[WARN]:-The GPDB superuser account that owns the initdb binary should run these utilities" 1
LOG_MSG "[WARN]:-This may cause problems when these utilities are run as $LOGNAME" 1
fi
else
LOG_MSG "[INFO]:-Environment variable \$LOGNAME unset, will set to $GPDB_ID" 1
export LOGNAME=$GPDB_ID
fi
else
LOG_MSG "[WARN]:-No initdb file, unable to verify id" 1
fi
LOG_MSG "[INFO]:-End Function $FUNCNAME"
}
# Make a dbid file at a particular host. The dbid file is used by gpstart
# to tell the process in question which segment/master it is.
# Arguments:
# 1 - DBID
# 2 - host name
# 3 - path to data directory
MAKE_DBID_FILE() {
DBID=$1; shift
HOST=$1; shift
DATADIR=$1; shift
FILEPATH=$DATADIR/gp_dbid
if [ "$FILEPATH" = "/gp_dbid" ]; then # DATADIR is empty
ERROR_EXIT "[FATAL]:-Internal error -- expected non-empty data directory" 2
fi
$TRUSTED_SHELL $HOST \
"$ECHO \"# Greenplum Database identifier for this master/segment.
# Do not change the contents of this file.
dbid = $DBID\" > $FILEPATH && chmod 400 $FILEPATH"
}
UPDATE_MPP () {
LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
U_DB=$DEFAULTDB
U_PT=$1
U_MPPNAME="$2"
U_NUMSEG=$3
U_DBID=$4
U_CONTENT=$5
TYPE=$6
U_HOST=$7
U_DIR=$8
LOG_MSG "[INFO][$INST_COUNT]:-Making dbid file @ $U_HOST:$U_DIR = $U_DBID"
MAKE_DBID_FILE $U_DBID $U_HOST $U_DIR
LOG_MSG "[INFO][$INST_COUNT]:-Successfully updated GPDB system table"
LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
}
#******************************************************************************
# Main Section
#******************************************************************************
#******************************************************************************
# Setup logging directory
#******************************************************************************
CUR_DATE=`$DATE +%Y%m%d`
DEFLOGDIR=$HOME/gpAdminLogs
if [ ! -d $DEFLOGDIR ]; then
mkdir $DEFLOGDIR
fi
LOG_FILE=$DEFLOGDIR/${PROG_NAME}_${CUR_DATE}.log
#Set up OS type for scripts to change command lines
OS_TYPE=`uname -s|tr '[A-Z]' '[a-z]'`
case $OS_TYPE in
sunos ) IPV4_ADDR_LIST_CMD="$IFCONFIG -a4"
IPV6_ADDR_LIST_CMD="$IFCONFIG -a6"
PS_TXT="-ef"
LIB_TYPE="LD_LIBRARY_PATH"
ZCAT=gzcat
# MPP-15890
PG_METHOD=ident
HOST_ARCH_TYPE="uname -i"
NOLINE_ECHO=/usr/bin/echo
DEFAULT_LOCALE_SETTING=en_US.UTF-8
MAIL=/bin/mailx
PING_TIME="1"
DF=`findCmdInPath df`
# Multi-byte tr needed on Solaris to handle [:upper:], [:lower:], etc.
MBTR=/usr/xpg4/bin/tr
DU_TXT="-s" ;;
linux ) IPV4_ADDR_LIST_CMD="`findCmdInPath ip` -4 address show"
IPV6_ADDR_LIST_CMD="`findCmdInPath ip` -6 address show"
PS_TXT="ax"
LIB_TYPE="LD_LIBRARY_PATH"
PG_METHOD="ident"
HOST_ARCH_TYPE="uname -i"
NOLINE_ECHO="$ECHO -e"
DEFAULT_LOCALE_SETTING=en_US.utf8
PING6=`findCmdInPath ping6`
PING_TIME="-c 1"
DF="`findCmdInPath df` -P"
ID=`whoami`
DU_TXT="-c" ;;
darwin ) IPV4_ADDR_LIST_CMD="$IFCONFIG -a inet"
IPV6_ADDR_LIST_CMD="$IFCONFIG -a inet6"
PS_TXT="ax"
LIB_TYPE="DYLD_LIBRARY_PATH"
# Darwin zcat wants to append ".Z" to the end of the file name; use "gunzip -c" instead
ZCAT="`findCmdInPath gunzip` -c"
PG_METHOD="ident"
HOST_ARCH_TYPE="uname -m"
NOLINE_ECHO=$ECHO
DEFAULT_LOCALE_SETTING=en_US.utf-8
PING6=`findCmdInPath ping6`
PING_TIME="-c 1"
DF="`findCmdInPath df` -P"
DU_TXT="-c" ;;
freebsd ) IPV4_ADDR_LIST_CMD="$IFCONFIG -a inet"
IPV6_ADDR_LIST_CMD="$IFCONFIG -a inet6"
LIB_TYPE="LD_LIBRARY_PATH"
PG_METHOD="ident"
HOST_ARCH_TYPE="uname -m"
NOLINE_ECHO="$ECHO -e"
DEFAULT_LOCALE_SETTING=en_US.utf8
PING_TIME="-c 1"
DF="`findCmdInPath df` -P"
DU_TXT="-c" ;;
* ) echo unknown ;;
esac
GP_LIBRARY_PATH=`$DIRNAME \`$DIRNAME $INITDB\``/lib
##
# we setup some EXPORT foo='blah' commands for when we dispatch to segments and standby master
##
EXPORT_GPHOME='export GPHOME='$GPHOME
if [ x"$LIB_TYPE" == x"LD_LIBRARY_PATH" ]; then
EXPORT_LIB_PATH="export LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
else
EXPORT_LIB_PATH="export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH"
fi
|
import { SPIRType } from "../../common/SPIRType";
export class TextureFunctionBaseArguments
{
img: VariableID = 0;
imgtype: SPIRType;
is_fetch: boolean = false;
is_gather: boolean = false;
is_proj: boolean = false;
}
export class TextureFunctionNameArguments
{
// GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor.
base: TextureFunctionBaseArguments = new TextureFunctionBaseArguments();
has_array_offsets: boolean = false;
has_offset: boolean = false;
has_grad: boolean = false;
has_dref: boolean = false;
is_sparse_feedback: boolean = false;
has_min_lod: boolean = false;
lod: number = 0;
}
export class TextureFunctionArguments
{
base: TextureFunctionBaseArguments = new TextureFunctionBaseArguments();
coord: number = 0;
coord_components: number = 0;
dref: number = 0;
grad_x: number = 0;
grad_y: number = 0;
lod: number = 0;
coffset: number = 0;
offset: number = 0;
bias: number = 0;
component: number = 0;
sample: number = 0;
sparse_texel: number = 0;
min_lod: number = 0;
nonuniform_expression: boolean = false;
} |
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_flight_land_twotone = void 0;
var ic_flight_land_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M2.5 19h19v2h-19v-2zm16.84-3.15c.8.21 1.62-.26 1.84-1.06.21-.8-.26-1.62-1.06-1.84l-5.31-1.42-2.76-9.02L10.12 2v8.28L5.15 8.95l-.93-2.32-1.45-.39v5.17l16.57 4.44z"
},
"children": []
}]
};
exports.ic_flight_land_twotone = ic_flight_land_twotone; |
#! /bin/bash
source ./version
docker push $IMAGE:$VERSION
|
#!/bin/bash
#sudo apt update
#sudo apt upgrade
#sudo apt-get install build-essential libtool autotools-dev automake pkg-config bsdmainutils
make clean
#make depend
CC='cc -fPIC' ./config --prefix=/usr/local/openssl1.0 --openssldir=/usr/local/openssl1.0/openssl -static no-shared enable-ec_nistp_64_gcc_128
make -j4
sudo make install
#sudo checkinstall --pkgname=libssl1.0-dev --pkgversion=0s-from-sources --default --requires="build-essential,libtool,autotools-dev,automake,pkg-config,bsdmainutils"
|
function generateAdminPanelPage($header, $content, $email) {
$adminPanelPage = '</head>' . PHP_EOL;
$adminPanelPage .= '<body>' . PHP_EOL;
$adminPanelPage .= '<div id="layout">' . PHP_EOL;
$adminPanelPage .= $header . PHP_EOL;
$adminPanelPage .= $content . PHP_EOL;
$adminPanelPage .= '<div id="bottom">' . PHP_EOL;
$adminPanelPage .= 'Admin © 2018 by ' . $email . PHP_EOL;
$adminPanelPage .= '</div>' . PHP_EOL;
$adminPanelPage .= '</div>' . PHP_EOL;
return $adminPanelPage;
}
$header = '<header>Admin Panel Header</header>';
$content = '<div>Welcome to the admin panel</div>';
$email = 'admin@example.com';
echo generateAdminPanelPage($header, $content, $email); |
<reponame>yangx14488/mcmod_grave
package net.atcat.nanzhi.grave.com.item;
import net.atcat.nanzhi.grave.grave;
import net.minecraft.block.Block;
import net.minecraft.client.util.ITooltipFlag;
import net.minecraft.item.BlockItem;
import net.minecraft.item.ItemStack;
import net.minecraft.util.text.ITextComponent;
import net.minecraft.util.text.TextFormatting;
import net.minecraft.util.text.TranslationTextComponent;
import net.minecraft.world.World;
import net.minecraftforge.api.distmarker.Dist;
import net.minecraftforge.api.distmarker.OnlyIn;
import javax.annotation.Nullable;
import java.util.List;
public class graveStone extends BlockItem {
public graveStone ( Block blockIn, Properties builder ) {
super( blockIn, builder );
}
@Override
public String getTranslationKey( ) { return "item." + grave.modID + ".gravestone" ; }
@OnlyIn( Dist.CLIENT )
@Override
public void addInformation( ItemStack stack, @Nullable World worldIn, List< ITextComponent > tooltip, ITooltipFlag flagIn) {
tooltip.add( new TranslationTextComponent( "lore.item." + grave.modID + ".grave" ).mergeStyle( TextFormatting.GRAY ) ) ;
tooltip.add( ITextComponent.getTextComponentOrEmpty( "" ) ) ;
tooltip.add( new TranslationTextComponent( "lore_end.item." + grave.modID + ".grave" ).mergeStyle( TextFormatting.DARK_GRAY ) ) ;
}
}
|
package prospector.routiduct;
public class RoutiductConstants {
public static final String MOD_NAME = "Routiduct";
public static final String MOD_ID = "routiduct";
public static final String PREFIX = "routiduct:";
public static final String MOD_VERSION = "@version@";
public static final String MINECRAFT_VERSIONS = "[1.12.2]";
public static final String SERVER_PROXY_CLASS = "prospector.routiduct.proxy.RoutiductServer";
public static final String CLIENT_PROXY_CLASS = "prospector.routiduct.proxy.RoutiductClient";
}
|
<filename>admin/vue2/element-admin-v3/node_modules/@antv/g2/esm/util/transform.js
import { ext } from '@antv/matrix-util';
var transform = ext.transform;
export { transform };
/**
* 对元素进行平移操作。
* @param element 进行变换的元素
* @param x x 方向位移
* @param y y 方向位移
*/
export function translate(element, x, y) {
var matrix = transform(element.getMatrix(), [['t', x, y]]);
element.setMatrix(matrix);
}
/**
* 对元素进行旋转操作。
* @param element 进行变换的元素
* @param rotateRadian 旋转弧度
*/
export function rotate(element, rotateRadian) {
var _a = element.attr(), x = _a.x, y = _a.y;
var matrix = transform(element.getMatrix(), [
['t', -x, -y],
['r', rotateRadian],
['t', x, y],
]);
element.setMatrix(matrix);
}
/**
* 获取元矩阵。
* @returns identity matrix
*/
export function getIdentityMatrix() {
return [1, 0, 0, 0, 1, 0, 0, 0, 1];
}
/**
* 围绕图形中心点进行缩放
* @param element 进行缩放的图形元素
* @param ratio 缩放比例
*/
export function zoom(element, ratio) {
var bbox = element.getBBox();
var x = (bbox.minX + bbox.maxX) / 2;
var y = (bbox.minY + bbox.maxY) / 2;
element.applyToMatrix([x, y, 1]);
var matrix = transform(element.getMatrix(), [
['t', -x, -y],
['s', ratio, ratio],
['t', x, y],
]);
element.setMatrix(matrix);
}
//# sourceMappingURL=transform.js.map |
module.exports = {
rootDir: "../",
testPathIgnorePatterns: ["node_modules", "config"],
transformIgnorePatterns: ["node_modules"],
setupFilesAfterEnv: ["<rootDir>/config/enzyme-conf.js"],
transform: { "^.+\\.js$": "<rootDir>/node_modules/babel-jest" },
automock: false,
collectCoverage: true,
collectCoverageFrom: ["<rootDir>/src/js/**/*.{js,jsx}"],
unmockedModulePathPatterns: [
"react",
"react-dom",
"react-addons-test-utils",
"enzyme"
],
moduleNameMapper: {
"\\.(jpg|ico|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
"<rootDir>/config/fileMock.js",
"\\.(css|less)$": "<rootDir>/config/styleMock.js"
},
coverageThreshold: {
global: {
branches: 70,
functions: 65,
lines: 70,
statements: 70
}
}
};
|
package org.bf2.cos.fleetshard.support.resources;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import org.bf2.cos.fleetshard.api.ResourceRef;
import org.bson.types.ObjectId;
import io.fabric8.kubernetes.api.Pluralize;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.dsl.base.ResourceDefinitionContext;
import io.fabric8.kubernetes.client.utils.KubernetesResourceUtil;
public final class Resources {
public static final String LABEL_CLUSTER_ID = "cos.bf2.org/cluster.id";
public static final String LABEL_DEPLOYMENT_ID = "cos.bf2.org/deployment.id";
public static final String LABEL_CONNECTOR_ID = "cos.bf2.org/connector.id";
public static final String LABEL_CONNECTOR_TYPE_ID = "cos.bf2.org/connector.type.id";
public static final String LABEL_CONNECTOR_OPERATOR = "cos.bf2.org/connector.operator";
public static final String LABEL_DEPLOYMENT_RESOURCE_VERSION = "cos.bf2.org/deployment.resource.version";
public static final String LABEL_OPERATOR_OWNER = "cos.bf2.org/operator.owner";
public static final String LABEL_OPERATOR_ASSIGNED = "cos.bf2.org/operator.assigned";
public static final String LABEL_OPERATOR_TYPE = "cos.bf2.org/operator.type";
public static final String LABEL_OPERATOR_VERSION = "cos.bf2.org/operator.version";
public static final String LABEL_UOW = "cos.bf2.org/uow";
public static final String ANNOTATION_UPDATED_TIMESTAMP = "cos.bf2.org/update.timestamp";
public static final String CONNECTOR_PREFIX = "mctr-";
public static final String CONNECTOR_SECRET_SUFFIX = "-config";
public static final String CONNECTOR_SECRET_DEPLOYMENT_SUFFIX = "-deploy";
public static final String LABEL_KCP_TARGET_CLUSTER_ID = "kcp.dev/cluster";
public static final String LABEL_KUBERNETES_NAME = "app.kubernetes.io/name";
public static final String LABEL_KUBERNETES_INSTANCE = "app.kubernetes.io/instance";
public static final String LABEL_KUBERNETES_VERSION = "app.kubernetes.io/version";
public static final String LABEL_KUBERNETES_COMPONENT = "app.kubernetes.io/component";
public static final String LABEL_KUBERNETES_PART_OF = "app.kubernetes.io/part-of";
public static final String LABEL_KUBERNETES_MANAGED_BY = "app.kubernetes.io/managed-by";
public static final String LABEL_KUBERNETES_CREATED_BY = "app.kubernetes.io/created-by";
private Resources() {
}
public static ResourceRef asRef(HasMetadata from) {
ResourceRef answer = new ResourceRef();
answer.setApiVersion(from.getApiVersion());
answer.setKind(from.getKind());
answer.setName(from.getMetadata().getName());
return answer;
}
public static String uid() {
return ObjectId.get().toString();
}
public static boolean hasLabel(HasMetadata metadata, String name, String value) {
Map<String, String> elements = metadata.getMetadata().getLabels();
return elements != null && Objects.equals(value, elements.get(name));
}
public static void setLabel(HasMetadata metadata, String name, String value) {
if (value != null) {
KubernetesResourceUtil.getOrCreateLabels(metadata).put(name, value);
}
}
public static String getLabel(HasMetadata metadata, String name) {
Map<String, String> labels = metadata.getMetadata().getLabels();
if (labels != null) {
return labels.get(name);
}
return null;
}
public static void copyLabel(String name, HasMetadata source, HasMetadata target) {
setLabel(target, name, getLabel(source, name));
}
public static boolean hasAnnotation(HasMetadata metadata, String name, String value) {
Map<String, String> elements = metadata.getMetadata().getAnnotations();
return elements != null && Objects.equals(value, elements.get(name));
}
public static void setAnnotation(HasMetadata metadata, String name, String value) {
if (value != null) {
KubernetesResourceUtil.getOrCreateAnnotations(metadata).put(name, value);
}
}
public static String getAnnotation(HasMetadata metadata, String name) {
Map<String, String> annotations = metadata.getMetadata().getAnnotations();
if (annotations != null) {
return annotations.get(name);
}
return null;
}
public static void copyAnnotation(String name, HasMetadata source, HasMetadata target) {
setAnnotation(target, name, getAnnotation(source, name));
}
public static ResourceDefinitionContext asResourceDefinitionContext(String apiVersion, String kind) {
ResourceDefinitionContext.Builder builder = new ResourceDefinitionContext.Builder();
builder.withNamespaced(true);
if (apiVersion != null) {
String[] items = apiVersion.split("/");
if (items.length == 1) {
builder.withVersion(items[0]);
}
if (items.length == 2) {
builder.withGroup(items[0]);
builder.withVersion(items[1]);
}
}
if (kind != null) {
builder.withKind(kind);
builder.withPlural(Pluralize.toPlural(kind.toLowerCase(Locale.US)));
}
return builder.build();
}
public static <T extends HasMetadata> boolean delete(
KubernetesClient client,
Class<T> type,
String namespace,
String name) {
Boolean result = client.resources(type)
.inNamespace(namespace)
.withName(name)
.delete();
if (result == null || result) {
return true;
}
return client.resources(type)
.inNamespace(namespace)
.withName(name)
.get() == null;
}
}
|
package com.oven.vo;
import lombok.Data;
@Data
public class Article {
private Integer id;
private String author;
private String content;
}
|
#!/usr/bin/env bash
source /secrets.sh
ENVIRONMENT=run
if [[ $# -lt 2 ]]; then
echo usage: osism-$ENVIRONMENT ENVIRONMENT SERVICE [...]
exit 1
fi
environment=$1
shift
service=$1
shift
ANSIBLE_DIRECTORY=/ansible
CONFIGURATION_DIRECTORY=/opt/configuration
ENVIRONMENTS_DIRECTORY=$CONFIGURATION_DIRECTORY/environments
VAULT=${VAULT:-$ENVIRONMENTS_DIRECTORY/.vault_pass}
if [[ -e /ansible/ara.env ]]; then
source /ansible/ara.env
fi
export ANSIBLE_INVENTORY=$ANSIBLE_DIRECTORY/inventory
export ANSIBLE_CONFIG=$ENVIRONMENTS_DIRECTORY/ansible.cfg
if [[ -e $ENVIRONMENTS_DIRECTORY/$environment/ansible.cfg ]]; then
export ANSIBLE_CONFIG=$ENVIRONMENTS_DIRECTORY/$environment/ansible.cfg
fi
cd $ENVIRONMENTS_DIRECTORY/$environment
ansible-playbook \
--vault-password-file $VAULT \
-e @$ENVIRONMENTS_DIRECTORY/configuration.yml \
-e @$ENVIRONMENTS_DIRECTORY/secrets.yml \
-e @secrets.yml \
-e @images.yml \
-e @configuration.yml \
"$@" \
playbook-$service.yml
|
import { CoaError } from 'coa-error'
import { $, axios, Axios, _ } from 'coa-helper'
import { RedisCache } from 'coa-redis'
const BaseURL = 'https://apis.map.qq.com/ws'
export class CoaTencentLbsBin {
key: string
redisCache: RedisCache
cacheNsp = 'tencent-lbs'
constructor(key: string, redisCache: RedisCache) {
this.key = key
this.redisCache = redisCache
}
async request(method: Axios.Method, url: string, data: any, params?: any, config?: Axios.AxiosRequestConfig) {
const { data: res = {} } = await axios.request({ method, url, data, params, baseURL: BaseURL, ...config })
res.status === 0 || CoaError.throw('CoaTencentLbs.RequestError.' + res.status, res.message || '腾讯位置服务返回错误')
const result = res.result
return _.isPlainObject(result) ? $.camelCaseKeys(result) : result
}
}
|
#! /bin/sh
URI='http://docs.tms.tribune.com/tech/xml/schemas/tmsxtvd.xsd'
PREFIX='tmstvd'
rm -rf raw
mkdir -p raw
touch raw/__init__.py
pyxbgen \
-m "${PREFIX}" \
-u "${URI}" \
-r
if [ ! -f ${PREFIX}.py ] ; then
echo "from raw.${PREFIX} import *" > ${PREFIX}.py
fi
if [ ! -f tmsdatadirect_sample.xml ] ; then
wget http://tmsdatadirect.com/docs/tv/tmsdatadirect_sample.xml
fi
|
#!/bin/sh
source database.conf
psql -U postgres -d $WASHING_SCHEDULER_DATABASE -f 'src/test/sql/populate.sql'
|
/*
* Copyright 2015 Textocat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.textocat.textokit.morph.model;
import java.io.Serializable;
import java.util.Comparator;
/**
* @author <NAME>
*/
public class Grammeme implements Serializable {
private static final long serialVersionUID = 4295735884264399518L;
private static int idCounter = 1;
private String id;
private String parentId;
private String alias;
private String description;
private int numId;
public Grammeme(String id, String parentId, String alias, String description) {
this.id = id;
this.parentId = parentId;
this.numId = idCounter++;
this.alias = alias;
this.description = description;
}
public String getId() {
return id;
}
public String getParentId() {
return parentId;
}
public int getNumId() {
return numId;
}
public String getAlias() {
return alias;
}
public String getDescription() {
return description;
}
private static final Comparator<Grammeme> numIdComparator = new Comparator<Grammeme>() {
@Override
public int compare(Grammeme first, Grammeme second) {
// TODO avoid auto-boxing
return Integer.valueOf(first.getNumId()).compareTo(second.getNumId());
}
};
public static final Comparator<Grammeme> numIdComparator() {
return numIdComparator;
}
} |
<reponame>havocp/hwf<gh_stars>1-10
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
* This file contains functions used by all of the XPConnect tests.
* See http://www.mozilla.org/scriptable/tests/ for more information.
*/
/**
*
*
*/
var FILE_BUGNUMBERS = "";
var window;
var PASSED;
var FAILED;
function AddTestCase( s, e, a, n, b, i ) {
TESTCASES[TESTCASES.length] = new TestCase( s, e, a, b, n, i );
return TESTCASES[TESTCASES.length];
}
function TestCase( s, e, a, n, b, i ) {
this.id = ID++;
this.description = s;
this.expected = e;
this.actual = a;
if ( n )
this.negative = n;
if ( b )
this.bugnumber = b;
if ( i )
this.ignore = i;
this.passed = GetResult( e, a );
}
function StartTest( t ) {
TESTCASES = new Array();
FILE_FAILED_CASES = 0;
FILE_PASSED_CASES = 0;
FILE_PASSED = true;
COMPLETED = false;
ID = 0;
FILE_TITLE = t;
WriteLine("\n" + FILE_TITLE +"\n");
if ( window ) {
document.open();
PASSED = "<font color=\"#00cc00\">passed </font>";
FAILED = "<font color=\"#ff0000\">FAILED </font>";
} else {
PASSED = "passed ";
FAILED = "FAILED ";
}
}
function StopTest() {
// here, we will close up and print a summary of what happened
writeReadableResults();
//writeParseableResults();
writeTestFileSummary();
if ( window ) {
document.close();
}
}
function AddComment(s) {
WriteLine(s);
}
function writeReadableResults() {
for ( var i = 0; i < TESTCASES.length; i++ ) {
var tc = TESTCASES[i];
if (tc.passed && this.DONT_PRINT_PASSED_TESTS)
continue;
WriteLine(
(tc.passed ? PASSED : FAILED) +
tc.description + " = " +
tc.actual + " " +
(tc.passed ? "" : "expected " + tc.expected)
);
}
}
function writeParseableResults() {
WriteLine( "START TEST CASE RESULTS" );
for ( var i = 0; i < TESTCASES.length; i++ ) {
var tc = TESTCASES[i];
WriteLine( tc.id +","+
tc.description +","+
tc.expected +","+
tc.actual +","+
tc.bugnumber +","+
tc.negative +","+
tc.ignore +","+
tc.exception +","+
tc.passed );
}
}
function writeTestFileSummary() {
WriteLine ("\nTEST FILE SUMMARY" );
WriteLine( "Title: " + FILE_TITLE );
WriteLine( "Passed: " + FILE_PASSED );
WriteLine( "Testcases: " + TESTCASES.length );
WriteLine( "Passed Cases: " + FILE_PASSED_CASES );
WriteLine( "Failed Cases: " + FILE_FAILED_CASES );
// if we're in the shell, run the garbage collector.
var gc;
if ( typeof gc == "function") {
gc();
}
}
function GetResult(expect, actual) {
if ( actual != actual ) {
if ( typeof actual == "object" ){
actual = "NaN object";
} else {
actual = "NaN number";
}
}
if ( expect != expect ) {
if ( typeof expect == "object" ) {
expect = "NaN object";
} else {
expect = "NaN number";
}
}
var passed = ( expect == actual ) ? true : false;
if ( typeof(expect) != typeof(actual) ) {
passed = false;
}
if ( !passed ) {
FILE_PASSED = false;
FILE_FAILED_CASES++;
} else {
FILE_PASSED_CASES++;
}
return passed;
}
function PrintResult(e, a, s, p) {
}
function PrintHTMLFormattedResult( e, a, s, p ) {
}
function WriteLine( s ) {
if ( window ) {
document.write( s +"<br>");
} else {
print ( s );
}
}
function GetFailedCases() {
for ( var i = 0; i < TESTCASES.length; i++ ) {
var tc = TESTCASES[i];
if ( !tc.passed )
WriteLine(
(tc.passed ? "passed " : "FAILED! ") +
tc.description + " = " +
tc.actual + " " +
(tc.passed ? "" : "expected " + tc.expected)
);
}
}
/**
* Given an object, display all its properties and the value of that
* property.
*/
function Enumerate( o ) {
var p;
WriteLine( "Properties of object " + o );
for ( p in o ) {
WriteLine( p +": "+ (typeof o[p] == "function" ? "function" : o[p]) );
}
}
/**
* These are variables whose values depend on the host environment.
* The defaults here are correct for the JavaScript or XPConnect shell.
* In order to run the tests in the browser, need to override these
* values for the tests to execute correctly.
*
*/
var GLOBAL = "[object global]";
|
<reponame>Sherlock92/greentop
/**
* Copyright 2017 <NAME>. Distributed under the MIT license.
*/
#include "greentop/sport/SetExposureLimitForMarketGroupResponse.h"
namespace greentop {
namespace sport {
SetExposureLimitForMarketGroupResponse::SetExposureLimitForMarketGroupResponse() {
}
SetExposureLimitForMarketGroupResponse::SetExposureLimitForMarketGroupResponse(const std::string& response) :
response(response) {
}
void SetExposureLimitForMarketGroupResponse::fromJson(const Json::Value& json) {
if (validateJson(json)) {
response = json.asString();
}
}
Json::Value SetExposureLimitForMarketGroupResponse::toJson() const {
Json::Value json(Json::stringValue);
if (response != "") {
json = response;
}
return json;
}
bool SetExposureLimitForMarketGroupResponse::isValid() const {
return response != "";
}
const std::string& SetExposureLimitForMarketGroupResponse::getResponse() const {
return response;
}
void SetExposureLimitForMarketGroupResponse::setResponse(const std::string& response) {
this->response = response;
}
}
}
|
import { createStore, applyMiddleware, combineReducers } from 'redux';
import { composeWithDevTools } from 'redux-devtools-extension/developmentOnly';
import thunk from 'redux-thunk';
import articleList from './articleList';
import replyList from './replyList';
import articleDetail from './articleDetail';
import replyDetail from './replyDetail';
import auth from './auth';
const reducers = combineReducers({
articleList,
replyList,
articleDetail,
replyDetail,
auth,
});
const enhancer = composeWithDevTools(applyMiddleware(thunk));
export default function makeStore(initialState) {
return createStore(reducers, initialState, enhancer);
}
|
<gh_stars>0
export { default as CONTACT } from './Contact';
export { default as EMAIL } from './Email';
export { default as NOTIFICATION } from './Notification';
export { default as SMS } from './Sms';
export { default as STORY_CHOICE } from './StoryChoice';
export { default as STORY_SCRIPT } from './StoryScript';
export { default as PHOTO } from './Photo';
|
<reponame>gcusnieux/jooby
package org.jooby.issues;
import org.jooby.test.ServerFeature;
import org.junit.Test;
public class Issue946 extends ServerFeature {
{
path("/946/api/some", () -> {
path("/:id", () -> {
get(req -> req.param("id").value());
get("/enabled", req -> req.param("id").value());
});
});
}
@Test
public void nestedPathExpression() throws Exception {
request().get("/946/api/some/1")
.expect("1");
request().get("/946/api/some/2/enabled")
.expect("2");
}
}
|
#!/bin/bash
#$ -M dschiavazzi@nd.edu
#$ -m abe
#$ -pe smp 24
#$ -q long
#$ -N mri_rec
module load python/3.7.3
# Limit numpy to a single thread
export MKL_NUM_THREADS=1
export NUMEXPR_NUM_THREADS=1
export OMP_NUM_THREADS=1
# Set Parameters
# Set Folders
KSPACEDIR="../"
RECDIR="./"
PATTERNDIR="../"
# Set Running Parameters
PROCESSES=24
REALIZATIONS=100
# Set Solver To Be Used For Reconstruction
SOLVERMODE=2
SAMPTYPE="vardengauss"
NOISEVAL=0.3
PVAL=0.25
# RECONSTRUCT IMAGES
for WAVETYPE in "haar" "db8"
do
echo 'Reconstructing' $NOISEVAL $PVAL $SAMPTYPE
python -m mrina.recover --noisepercent $NOISEVAL \
--urate $PVAL \
--utype $SAMPTYPE \
--repetitions $REALIZATIONS \
--numprocesses $PROCESSES \
--fromdir $KSPACEDIR \
--recdir $RECDIR \
--maskdir $PATTERNDIR \
--method $SOLVERMODE \
--wavelet $WAVETYPE \
--savevels \
--usemultipatterns
done
|
public class Main {
public static void main(String[] args) {
String s = "abc";
for (int i = 0; i < s.length(); i++) {
for (int j = i; j < s.length(); j++) {
for (int k = j; k < s.length(); k++) {
System.out.println(s.charAt(i) + "" + s.charAt(j) + "" + s.charAt(k));
}
}
}
}
} |
function flattenMenu($menu) {
$flattenedMenu = array();
foreach ($menu as $key => $value) {
if (is_array($value)) {
$flattenedMenu = array_merge($flattenedMenu, flattenMenu($value));
} else {
$flattenedMenu[] = array("title" => $value["title"], "icon" => $value["icon"]);
}
}
return $flattenedMenu;
}
$menu = array(
"home" => array(
"title" => "Home",
"icon" => "fa-home"
),
"sub" => array(
"file" => array(
"title" => "File",
"icon" => "fa-file-text"
),
"fifth_lvl" => array(
"title" => "Delete",
"icon" => "fa-trash-o"
)
)
);
$flattenedMenu = flattenMenu($menu);
print_r($flattenedMenu); |
// Tencent is pleased to support the open source community by making LuaPanda available.
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import * as vscode from 'vscode';
import {
Logger, logger,
LoggingDebugSession,
InitializedEvent, TerminatedEvent, StoppedEvent, BreakpointEvent, OutputEvent,
Thread, StackFrame, Scope, Source, Breakpoint
} from 'vscode-debugadapter';
import { DebugProtocol } from 'vscode-debugprotocol';
import { basename } from 'path';
import { luaDebugRuntime, LuaBreakpoint } from './luaDebugRuntime';
const { Subject } = require('await-notify');
import * as Net from 'net';
import {dataProcesser} from './dataProcesser';
import {DebugLogger} from './LogManager';
import {StatusBarManager} from './StatusBarManager';
export class LuaDebugSession extends LoggingDebugSession {
private static THREAD_ID = 1; //调试器不支持多线程,硬编码THREAD_ID为1
public static TCPPort = 0; //和客户端连接的端口号,通过VScode的设置赋值
private static breakpointsArray; //在socket连接前临时保存断点的数组
private static autoReconnect;
private _configurationDone = new Subject();
//自身单例
private static instance: LuaDebugSession ;
public static userConnectionFlag; //这个标记位的作用是标记Adapter停止连接,因为Adapter是Server端,要等Client发来请求才能断开
public static isListening;
public static _server;
public static getInstance():LuaDebugSession{
return LuaDebugSession.instance;
}
//luaDebugRuntime实例
private _runtime: luaDebugRuntime;
private UseLoadstring : boolean = false ;
private static currentframeId : number = 0;
public getRuntime(){
return this._runtime;
}
public constructor() {
super("lua-debug.txt");
//设置自身实例
LuaDebugSession.instance = this;
this.setDebuggerLinesStartAt1(true);
this.setDebuggerColumnsStartAt1(true);
//设置runtime实例
this._runtime = new luaDebugRuntime();
dataProcesser._runtime = this._runtime;
this._runtime.TCPSplitChar = "|*|";
//给状态绑定监听方法
this._runtime.on('stopOnEntry', () => {
this.sendEvent(new StoppedEvent('entry', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnStep', () => {
this.sendEvent(new StoppedEvent('step', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnStepIn', () => {
this.sendEvent(new StoppedEvent('step', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnStepOut', () => {
this.sendEvent(new StoppedEvent('step', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnBreakpoint', () => {
this.sendEvent(new StoppedEvent('breakpoint', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnException', () => {
this.sendEvent(new StoppedEvent('exception', LuaDebugSession.THREAD_ID));
});
this._runtime.on('stopOnPause', () => {
this.sendEvent(new StoppedEvent('exception', LuaDebugSession.THREAD_ID));
});
this._runtime.on('breakpointValidated', (bp: LuaBreakpoint) => {
this.sendEvent(new BreakpointEvent('changed', <DebugProtocol.Breakpoint>{ verified: bp.verified, id: bp.id }));
});
this._runtime.on('output', (text, filePath, line, column) => {
const e: DebugProtocol.OutputEvent = new OutputEvent(`${text}\n`);
e.body.source = this.createSource(filePath);
e.body.line = this.convertDebuggerLineToClient(line);
e.body.column = this.convertDebuggerColumnToClient(column);
this.sendEvent(e);
});
}
/**
* VScode前端的首个请求,询问debug adapter所能提供的特性
* 这个方法是VSCode调过来的,adapter拿到其中的参数进行填充. 再回给VSCode,VSCode根据这些设置做不同的显示
*/
protected initializeRequest(response: DebugProtocol.InitializeResponse, args: DebugProtocol.InitializeRequestArguments): void {
DebugLogger.AdapterInfo("initializeRequest!");
//设置Debug能力
response.body = response.body || {};
response.body.supportsConfigurationDoneRequest = true;
//后面可以支持Hovers显示值
response.body.supportsEvaluateForHovers = true;//悬停请求变量的值
response.body.supportsStepBack = false;//back按钮
response.body.supportsSetVariable = false;//修改变量的值
this.sendResponse(response);
}
/**
* configurationDone后通知launchRequest
*/
protected configurationDoneRequest(response: DebugProtocol.ConfigurationDoneResponse, args: DebugProtocol.ConfigurationDoneArguments): void {
super.configurationDoneRequest(response, args);
this._configurationDone.notify();
}
/**
* launchRequest的args会把在Launch.json中的配置读取出来, 在这里通过socket传给Debugger
*/
protected async launchRequest(response: DebugProtocol.LaunchResponse, args) {
logger.setup(args.trace ? Logger.LogLevel.Verbose : Logger.LogLevel.Stop, false);
// 等待configurationDoneRequest的通知
await this._configurationDone.wait(1000);
//1. 配置初始化信息
let os = require("os");
let path = require("path");
//去除out, Debugger/debugger_lib/plugins/Darwin/ libpdebug_版本号.so
let clibPath = path.dirname(__dirname) + '/Debugger/debugger_lib/plugins/'
let sendArgs =new Array();
sendArgs["stopOnEntry"] = !!args.stopOnEntry;
sendArgs["luaFileExtension"] = args.luaFileExtension;
sendArgs["cwd"] = args.cwd;
sendArgs["TempFilePath"] = args.TempFilePath;
sendArgs["logLevel"] = args.logLevel;
sendArgs["debugMode"] = args.DebugMode;
sendArgs["pathCaseSensitivity"] = args.pathCaseSensitivity;
sendArgs["OSType"] = os.type();
sendArgs["clibPath"] = clibPath;
sendArgs["useHighSpeedModule"] = args.useHighSpeedModule;
LuaDebugSession.autoReconnect = args.autoReconnect;
//2. 初始化内存分析状态栏
StatusBarManager.reset();
//3. 把response装入回调
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
//4. 启动Adapter的socket | VSCode = Server ; Debugger = Client
LuaDebugSession._server = Net.createServer(socket=>{
//--connect--
DebugLogger.AdapterInfo("Debugger " + socket.remoteAddress + ":" + socket.remotePort + " connect!" );
dataProcesser._socket = socket;
//向debugger发送含配置项的初始化协议
this._runtime.start((arr, info) => {
DebugLogger.AdapterInfo("已建立连接,发送初始化协议和断点信息!");
//设置标记位
if (info.UseLoadstring == "1"){
this.UseLoadstring = true;
}else{
this.UseLoadstring = false;
}
if (info.UseHookLib == "1"){}
//已建立连接,并完成初始化
let ins = arr[0];
ins.sendResponse(arr[1]);
LuaDebugSession.userConnectionFlag = true;
LuaDebugSession.isListening = false;
//发送断点信息
for (var bkMap of LuaDebugSession.breakpointsArray) {
this._runtime.setBreakPoint(bkMap.bkPath, bkMap.bksArray, null,null);
}
}, callbackArgs ,sendArgs);
//--connect end--
socket.on('end',()=>{
DebugLogger.AdapterInfo('socket end');
});
socket.on('close',()=>{
if (LuaDebugSession.isListening == true){
DebugLogger.AdapterInfo('close socket when listening!');
return;
}
DebugLogger.AdapterInfo('Socket close!');
vscode.window.showInformationMessage('Stop connecting!');
//停止连接
LuaDebugSession._server.close();
LuaDebugSession.userConnectionFlag = false;
delete dataProcesser._socket;
//停止VSCode的调试模式
this.sendEvent(new TerminatedEvent(LuaDebugSession.autoReconnect));
});
socket.on('data',(data)=>{
DebugLogger.AdapterInfo('[Get Msg]:' + data);
dataProcesser.processMsg(data.toString());
});
}).listen(LuaDebugSession.TCPPort, function(){
DebugLogger.AdapterInfo("listen");
});
LuaDebugSession.isListening = true;
LuaDebugSession.breakpointsArray = new Array();
this.sendEvent(new InitializedEvent()); //收到返回后,执行setbreakpoint
}
/**
* VSCode -> Adapter 设置(删除)断点
*/
protected setBreakPointsRequest(response: DebugProtocol.SetBreakpointsResponse, args: DebugProtocol.SetBreakpointsArguments): void {
DebugLogger.AdapterInfo('setBreakPointsRequest');
const path = <string>args.source.path;
const clientLines = args.lines || [];//clientLines中包含了本文件所有的断点行号
let vscodeBreakpoints = new Array(); //VScode UI识别的断点(起始行号1)
clientLines.map(l => {
const id = this._runtime.getBreakPointId()
const bp = <DebugProtocol.Breakpoint> new Breakpoint(true, l);
bp.id= id;
vscodeBreakpoints.push(bp);
});
response.body = {
breakpoints: vscodeBreakpoints
};
if (dataProcesser._socket && LuaDebugSession.userConnectionFlag){
//已建立连接
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.setBreakPoint(path, vscodeBreakpoints, function(arr) {
DebugLogger.AdapterInfo("确认断点");
let ins = arr[0];
ins.sendResponse(arr[1]);//在收到debugger的返回后,通知VSCode, VSCode界面的断点会变成已验证
}, callbackArgs);
}else{
//未连接,记录断点
if (LuaDebugSession.breakpointsArray != undefined){
for (var bkMap of LuaDebugSession.breakpointsArray) {
if (bkMap.bkPath == path){
bkMap["bksArray"] = vscodeBreakpoints;
this.sendResponse(response);
return;
}
}
let bk = new Object();
bk["bkPath"] =path;
bk["bksArray"] = vscodeBreakpoints;
LuaDebugSession.breakpointsArray.push(bk);
}
this.sendResponse(response);
}
}
/**
* 断点的堆栈追踪
*/
protected stackTraceRequest(response: DebugProtocol.StackTraceResponse, args: DebugProtocol.StackTraceArguments): void {
const startFrame = typeof args.startFrame === 'number' ? args.startFrame : 0;
const maxLevels = typeof args.levels === 'number' ? args.levels : 1000;
const endFrame = startFrame + maxLevels;
const stk = this._runtime.stack(startFrame, endFrame);
response.body = {
stackFrames: stk.frames.map(f => new StackFrame(f.index, f.name, this.createSource(f.file), f.line)),
totalFrames: stk.count
};
this.sendResponse(response);
}
/**
* 监控的变量
*/
protected evaluateRequest(response: DebugProtocol.EvaluateResponse, args: DebugProtocol.EvaluateArguments): void {
//watch -- 监视窗口
if (args.context == "watch" || args.context == "hover"){
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
//把B["A"] ['A'] => B.A形式
if(this.UseLoadstring == false){
let watchString = args.expression;
watchString = watchString.replace(/\[/g,".");
watchString = watchString.replace(/\"/g,"");
watchString = watchString.replace(/\'/g,"");
watchString = watchString.replace(/]/g,"");
args.expression = watchString;
}
this._runtime.getWatchedVariable((arr, info) => {
if(info.length == 0){
//没有查到
arr[1].body = {
result: 'nil',
variablesReference: 0
};
}else{
arr[1].body = {
result: info[0].value,
type: info[0].type,
variablesReference: parseInt(info[0].variablesReference)
};
}
let ins = arr[0]; //第一个参数是实例
ins.sendResponse(arr[1]);//第二个参数是response
}, callbackArgs, args.expression, args.frameId);
}else if(args.context == "repl"){
//repl -- 调试控制台
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.getREPLExpression((arr, info) => {
if(info.length == 0){
//没有查到
arr[1].body = {
result: 'nil',
variablesReference: 0
};
}else{
arr[1].body = {
result: info[0].value,
type: info[0].type,
variablesReference: parseInt(info[0].variablesReference)
};
}
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs, args.expression, args.frameId);
}else{
this.sendResponse(response);
}
}
/**
* 在变量大栏目中列举出的种类
*/
protected scopesRequest(response: DebugProtocol.ScopesResponse, args: DebugProtocol.ScopesArguments): void {
LuaDebugSession.currentframeId = args.frameId; //frameId指示调用栈深度,从2开始
const scopes = new Array<Scope>();
//设置局部变量的reference是1w, 全局2w, upValue 3w
scopes.push(new Scope("Local", 10000, false));
scopes.push(new Scope("Global", 20000, true));
scopes.push(new Scope("UpValue", 30000, false));
response.body = {
scopes: scopes
};
this.sendResponse(response);
}
/**
* 变量信息 断点的信息应该完全用一条协议单独发,因为点开Object,切换堆栈都需要单独请求断点信息
*/
protected variablesRequest(response: DebugProtocol.VariablesResponse, args: DebugProtocol.VariablesArguments): void {
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.getVariable((arr, info) => {
const variables = new Array<DebugProtocol.Variable>();
info.forEach(element => {
variables.push({
name: element.name,
type: element.type,
value: element.value,
variablesReference: parseInt(element.variablesReference)
});
});
arr[1].body = {
variables: variables
};
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs, args.variablesReference, LuaDebugSession.currentframeId);
}
/**
* continue 执行
*/
protected continueRequest(response: DebugProtocol.ContinueResponse, args: DebugProtocol.ContinueArguments): void {
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.continue(arr => {
DebugLogger.AdapterInfo("确认继续运行");
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs);
}
/**
* step 单步执行
*/
protected nextRequest(response: DebugProtocol.NextResponse, args: DebugProtocol.NextArguments): void {
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.step(arr => {
DebugLogger.AdapterInfo("确认单步");
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs);
}
/**
* step in
*/
protected stepInRequest(response: DebugProtocol.StepInResponse, args: DebugProtocol.StepInArguments): void{
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.step(arr => {
DebugLogger.AdapterInfo("确认StepIn");
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs, 'stopOnStepIn');
}
/**
* step out
*/
protected stepOutRequest(response: DebugProtocol.StepOutResponse, args: DebugProtocol.StepOutArguments): void{
let callbackArgs =new Array();
callbackArgs.push(this);
callbackArgs.push(response);
this._runtime.step(arr => {
DebugLogger.AdapterInfo("确认StepOut");
let ins = arr[0];
ins.sendResponse(arr[1]);
}, callbackArgs, 'stopOnStepOut');
}
/**
* pause 暂不支持
*/
protected pauseRequest(response: DebugProtocol.PauseResponse, args: DebugProtocol.PauseArguments): void{
vscode.window.showInformationMessage('pauseRequest!');
}
/**
* disconnect
*/
protected disconnectRequest(response: DebugProtocol.DisconnectResponse, args): void{
DebugLogger.AdapterInfo("disconnectRequest");
let restart = args.restart;
//给lua发消息,让lua停止运行
let callbackArgs =new Array();
callbackArgs.push(restart);
this._runtime.stopRun(arr => {
//客户端主动断开连接,这里仅做确认
DebugLogger.AdapterInfo("确认stop");
}, callbackArgs, 'stopRun');
LuaDebugSession.userConnectionFlag = false;
this.sendResponse(response);
LuaDebugSession._server.close();
}
protected restartRequest(response: DebugProtocol.RestartResponse, args: DebugProtocol.RestartArguments): void{
DebugLogger.AdapterInfo("restartRequest");
}
protected restartFrameRequest(response: DebugProtocol.RestartFrameResponse, args: DebugProtocol.RestartFrameArguments): void{
DebugLogger.AdapterInfo("restartFrameRequest");
}
private createSource(filePath: string): Source {
return new Source(basename(filePath), this.convertDebuggerPathToClient(filePath), undefined, undefined, undefined);
}
protected threadsRequest(response: DebugProtocol.ThreadsResponse): void {
response.body = {
threads: [
new Thread(LuaDebugSession.THREAD_ID, "thread 1")
]
};
this.sendResponse(response);
}
public LuaGarbageCollect(){
this._runtime.luaGarbageCollect();
}
}
|
#!/bin/bash
cd ~/tsystem
apt=`command -v apt`
if [ "`uname`" != "Darwin" -a "$apt" != "" ]; then
./bin/add-apt-repo.sh
fi
./bin/package-install.sh
cd migration
files=($(ls))
for f in "${files[@]}"
do
if [ "`cat ../migrated.txt | grep $f`" = "" ]; then
echo "execute $f"
bash $f
echo $f >> ../migrated.txt
fi
done
cd ~/tsystem
if [ "`uname`" = "Darwin" ]; then
cp bashrc.txt ~/.bash_profile
else
cp bashrc.txt ~/.bashrc
fi
cd ~/tsystem/bin
|
import { SvgTemplates } from '../html-templates';
import AbstractView from './abstract-view';
const getTemplate = () => {
return `<div class="app-modals__profile-params-dialog">
<div class="app-modals__profile-params-dialog-controls">
<span id="profile-params-dialog-close" class="app-modals__profile-params-dialog-controls-close">X</span>
<div class="app-modals__profile-params-dialog-set">
<label>
Название сценария
<input type="text" name="" id="profile-params-dialog-scenario-name"
class="app-modals__profile-params-dialog-set-input">
</label>
<label>
Название носителя
<input type="text" name="" id="profile-params-dialog-carrier-name"
class="app-modals__profile-params-dialog-set-input">
</label>
<label>
Название профиля
<input type="text" name="" id="profile-params-dialog-profile-name"
class="app-modals__profile-params-dialog-set-input">
</label>
<div>
<input type="checkbox" id="profile-params-dialog-save-checkbox"></input>
<label for="profile-params-dialog-save-checkbox">
${SvgTemplates.getYellowFloppyDiskImage()}Сохранить изменения
</label>
<div class="svg-input-popup">
<span>Подтвердите действие</span>
<div class="flex-row-only">
<button id="profile-params-dialog-save-confirm">ДА, сохранить изменения</button>
<button id="profile-params-dialog-save-cancel">ОТМЕНА</button>
</div>
</div>
</div>
</div>
</div>
</div>`;
};
export default class ProfileControlView extends AbstractView {
constructor({container, saveButtonClickCallback}) {
super(container);
this._saveButtonClickCallback = saveButtonClickCallback;
this._close = null;
this._saveChangesCheckbox = null;
this._saveChangesConfirm = null;
this._saveChangesCancel = null;
this._scenarioTitleInput = null;
this._carrierTitleInput = null;
this._profileTitleInput = null;
this.init();
}
getTemplate() {
return getTemplate();
}
open({scenarioTitle, carrierTitle, profileTitle}) {
this.getElement().style.opacity = '0';
this.getElement().style.transition = 'none';
this.getElement().style.maxWidth = '100vw';
this.getElement().style.maxHeight = '100vh';
this.getElement().style.padding = '10px';
if(scenarioTitle) {
this._scenarioTitleInput.value = scenarioTitle;
}
if(carrierTitle) {
this._carrierTitleInput.value = carrierTitle;
}
if(profileTitle) {
this._profileTitleInput.value = profileTitle;
}
setTimeout(() => {
this.getElement().style.transition = '0.5s';
this.getElement().style.opacity = '1';
}, 20);
}
close() {
this._saveChangesCheckbox.checked = false;
this.getElement().style.opacity = '0';
setTimeout(() => {
this.getElement().style.padding = '0';
this.getElement().style.transition = 'none';
this.getElement().style.maxWidth = '0';
this.getElement().style.maxHeight = '0';
}, 500);
}
getScenarioTitle() {
return this._scenarioTitleInput.value;
}
getCarrierTitle() {
return this._carrierTitleInput.value;
}
getProfileTitle() {
return this._profileTitleInput.value;
}
_attachEventListeners() {
this._close.addEventListener('click', () => {
this.close();
});
this._saveChangesCancel.addEventListener('click', () => {
this._saveChangesCheckbox.checked = false;
});
this._saveChangesConfirm.addEventListener('click', () => {
if(this._saveButtonClickCallback) {
this._saveButtonClickCallback();
}
});
}
init() {
this._saveChangesCheckbox = this.getElement().querySelector('#profile-params-dialog-save-checkbox');
this._saveChangesConfirm = this.getElement().querySelector('#profile-params-dialog-save-confirm');
this._saveChangesCancel = this.getElement().querySelector('#profile-params-dialog-save-cancel');
this._scenarioTitleInput = this.getElement().querySelector('#profile-params-dialog-scenario-name');
this._carrierTitleInput = this.getElement().querySelector('#profile-params-dialog-carrier-name');
this._profileTitleInput = this.getElement().querySelector('#profile-params-dialog-profile-name');
this._close = this.getElement().querySelector('#profile-params-dialog-close');
this._attachEventListeners();
}
} |
<reponame>weily22/react_hooks
import UseEffectDemo from './UseEffectDemo';
import './UseEffectDemo.scss';
export default UseEffectDemo;
|
//Function to pre-populate selectors with values of existing
//row data on update page.
function attributeSelected(attribute, selected){
var selector = document.getElementById(attribute);
if(selected === null){
selector.value = 'null';
}
else{
selector.value = selected;
}
}; |
#!/bin/bash
function download_from_google_drive() {
COOKIE_FILE=$(mktemp)
CONFIRM_ID=$(curl -c $COOKIE_FILE -s -L "https://drive.google.com/uc?export=download&id=$2" | grep confirm | sed -e "s/^.*confirm=\(.*\)&id=.*$/\1/")
curl -b $COOKIE_FILE -L -o $1 "https://drive.google.com/uc?confirm=${CONFIRM_ID}&export=download&id=$2"
rm $COOKIE_FILE
return 0
}
function train_and_evaluate() {
TARGET=$1
GDRIVE_ID=$2
TARGET_DATA_DIR=$DATA_DIR/$TARGET
if [ ! -d $TARGET_DATA_DIR ]; then
pushd $DATA_DIR
if [ ! -f ${TARGET}.zip ]; then
download_from_google_drive ${TARGET}.zip $GDRIVE_ID
fi
unzip ${TARGET}.zip
popd
fi
FILE_HEAD=$(echo $TARGET | sed -e 's/^\(.\)/\L\1/')
TRAIN_FILE=$TARGET_DATA_DIR/${FILE_HEAD}_train.txt
PREDICT_FILE=$TARGET_DATA_DIR/${FILE_HEAD}_test.txt
MODEL_FILE=$WORK_DIR/annexml-model-${TARGET}.bin
RESULT_FILE=$WORK_DIR/annexml-result-${TARGET}.txt
echo "----------------------------------------"
echo $TARGET
$SRC_DIR/annexml train annexml-example.json train_file=${TRAIN_FILE} model_file=${MODEL_FILE}
$SRC_DIR/annexml predict annexml-example.json predict_file=${PREDICT_FILE} model_file=${MODEL_FILE} result_file=${RESULT_FILE}
cat ${RESULT_FILE} | python $SCRIPTS_DIR/learning-evaluate_predictions.py
cat ${RESULT_FILE} | python $SCRIPTS_DIR/learning-evaluate_predictions_propensity_scored.py $TRAIN_FILE -A $3 -B $4
echo "----------------------------------------"
echo ""
return 0
}
cd $(dirname $0)
SRC_DIR=$(cd src && pwd)
SCRIPTS_DIR=$(cd scripts && pwd)
if [ ! -d data ]; then
mkdir data
fi
DATA_DIR=$(cd data && pwd)
if [ ! -d work ]; then
mkdir work
fi
WORK_DIR=$(cd work && pwd)
if [ ! -x $SRC_DIR/annexml ]; then
make -C $SRC_DIR annexml
fi
#train_and_evaluate AmazonCat "0B3lPMIHmG6vGa2tMbVJGdDNSMGc" 0.55 1.5
train_and_evaluate Wiki10 "0B3lPMIHmG6vGaDdOeGliWF9EOTA" 0.55 1.5
#train_and_evaluate DeliciousLarge "0B3lPMIHmG6vGR3lBWWYyVlhDLWM" 0.55 1.5
#train_and_evaluate WikiLSHTC "0B3lPMIHmG6vGSHE1SWx4TVRva3c" 0.5 0.4
#train_and_evaluate Amazon "0B3lPMIHmG6vGdUJwRzltS1dvUVk" 0.6 2.6
|
require 'hydra/file_characterization/exceptions'
require 'open3'
require 'active_support/core_ext/class/attribute'
module Hydra::FileCharacterization
class Characterizer
include Open3
class_attribute :tool_path
attr_reader :filename
def initialize(filename, tool_path = nil)
@filename = filename
@tool_path = tool_path
end
def call
unless File.exists?(filename)
raise Hydra::FileCharacterization::FileNotFoundError.new("File: #{filename} does not exist.")
end
if tool_path.respond_to?(:call)
tool_path.call(filename)
else
internal_call
end
end
def tool_path
@tool_path || self.class.tool_path || convention_based_tool_name
end
protected
def convention_based_tool_name
self.class.name.split("::").last.downcase
end
def internal_call
stdin, stdout, stderr, wait_thr = popen3(command)
begin
out = stdout.read
err = stderr.read
exit_status = wait_thr.value
raise "Unable to execute command \"#{command}\"\n#{err}" unless exit_status.success?
out
ensure
stdin.close
stdout.close
stderr.close
end
end
def command
raise NotImplementedError, "Method #command should be overriden in child classes"
end
end
end
|
/*******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2021 <NAME> - www.xs-labs.com
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
#include <iostream>
#include <string>
#include <thread>
#include <vector>
#include <functional>
#include <unistd.h>
#include <spawn.h>
#include <sys/wait.h>
#include <poll.h>
bool exec( const std::string & command, const std::vector< std::string > & args, int64_t & processID, std::string & output, std::string & error, bool wait );
int main( void )
{
int64_t pid = 0;
std::string out = {};
std::string err = {};
exec( "/bin/ls", { "-al", "/" }, pid, out, err, true );
std::cout << "PID: " << pid << std::endl;
std::cout << "Output: " << out << std::endl;
std::cout << "Error: " << err << std::endl;
return 0;
}
namespace Core
{
class Defer
{
public:
~Defer()
{
for( const auto & f: this->_f )
{
f();
}
}
Defer & operator += ( const std::function< void( void ) > & f )
{
this->_f.push_back( f );
return *( this );
}
private:
std::vector< std::function< void( void ) > > _f;
};
}
bool exec( const std::string & command, const std::vector< std::string > & args, int64_t & processID, std::string & output, std::string & error, bool wait )
{
Core::Defer defer;
int pipeOut[ 2 ];
int pipeErr[ 2 ];
posix_spawn_file_actions_t actions;
processID = 0;
if( pipe( pipeOut ) != 0 || pipe( pipeErr ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_init( &actions ) != 0 )
{
return false;
}
defer += [ & ]
{
posix_spawn_file_actions_destroy( &actions );
};
if( posix_spawn_file_actions_addclose( &actions, pipeOut[ 0 ] ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_addclose( &actions, pipeErr[ 0 ] ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_adddup2( &actions, pipeOut[ 1 ], 1 ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_adddup2( &actions, pipeErr[ 1 ], 2 ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_addclose( &actions, pipeOut[ 1 ] ) != 0 )
{
return false;
}
if( posix_spawn_file_actions_addclose( &actions, pipeErr[ 1 ] ) != 0 )
{
return false;
}
char ** argv = static_cast< char ** >( calloc( sizeof( char * ), args.size() + 2 ) );
char ** envp = { nullptr };
if( argv == nullptr )
{
return false;
}
defer += [ & ]
{
for( size_t i = 0; i < args.size() + 2; i++ )
{
free( argv[ i ] );
}
free( argv );
};
argv[ 0 ] = strdup( command.c_str() );
argv[ args.size() + 1 ] = nullptr;
for( size_t i = 1; i < args.size() + 1; i++ )
{
argv[ i ] = strdup( args[ i - 1 ].c_str() );
}
pid_t pid = 0;
int status = posix_spawnp( &pid, command.c_str(), &actions, nullptr, argv, envp );
processID = pid;
close( pipeOut[ 1 ] );
close( pipeErr[ 1 ] );
if( status != 0 )
{
return false;
}
if( wait )
{
char buff[ 1024 ];
ssize_t n[ 2 ] = { -1, -1 };
pollfd fds[ 2 ] = { { pipeOut[ 0 ], POLLIN, 0 }, { pipeErr[ 0 ], POLLIN, 0 } };
for( int rval; ( rval = poll( fds, 2, -1) ) > 0; )
{
if( n[ 0 ] != 0 && fds[ 0 ].revents & POLLIN )
{
n[ 0 ] = read( pipeOut[ 0 ], buff, sizeof( buff ) );
if( n[ 0 ] > 0 )
{
output += std::string( buff, static_cast< size_t >( n[ 0 ] ) );
}
}
else if( n[ 1 ] != 0 && fds[ 1 ].revents & POLLIN )
{
n[ 1 ] = read( pipeErr[ 0 ], buff, sizeof( buff ) );
if( n[ 1 ] > 0 )
{
error += std::string( buff, static_cast< size_t >( n[ 1 ] ) );
}
}
else
{
break;
}
}
waitpid( pid, &status, 0 );
}
return true;
}
|
#!/usr/bin/env bash
# This script connects a node to mainnet
ROOT="$(realpath "$(dirname "$0")/../..")"
configuration="${ROOT}/configuration/cardano"
data_dir=mainnetsingle
mkdir -p "${data_dir}"
db_dir="${data_dir}/db/node"
mkdir -p "${db_dir}"
socket_dir="${data_dir}/socket"
mkdir -p "${socket_dir}"
# Launch a node
cabal run exe:cardano-node -- run \
--config "${configuration}/membench-config-old.json" \
--topology "${configuration}/mainnet-topology.json" \
--database-path "${db_dir}" \
--socket-path "${socket_dir}/node-1-socket" \
--host-addr "127.0.0.1" \
--port "3001"
function cleanup()
{
for child in $(jobs -p); do
echo kill "$child" && kill "$child"
done
}
trap cleanup EXIT
|
import java.util.*;
public class PrimeNumber {
public static void main(String[] args) {
int num = 6;
System.out.println("List of prime numbers till "+num+ " : ");
printPrimeNumbers(num);
}
public static void printPrimeNumbers(int num){
for (int i=2; i<=num; i++){
if(isPrime(i))
System.out.print(i+ " ");
}
}
public static boolean isPrime(int n){
for(int i=2; i<=n/2; i++){
if (n%i == 0)
return false;
}
return true;
}
} |
<gh_stars>1-10
/*
* JCY
* 07/2007
* Derived Datatype functions for mpi-serial
*/
#include "type.h"
#include "mpiP.h"
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
/*
* NOTES: All MPI_ prefixed (public) functions operate
* using the integer handle for a datatype. Most of these
* functions are wrapper functions for a different function,
* _not_ prefixed with MPI_. These functions translate the
* handle to a pointer and call the non-MPI_ func.
*
* Fortran bindings use FC_FUNC, as defined in mpiP.h.
*/
/*
* Wrapper for mpi_handle_to_ptr in handles.c
* specific for datatype handles, which may be
* predefined negative handles
*/
Datatype* mpi_handle_to_datatype(int handle)
{
if (handle < 0)
return (Datatype*) &simpletypes[-1-handle];
else
return (Datatype*) mpi_handle_to_ptr(handle);
}
/*
* Calculate the epsilon value of typemap
* using the largest element in the typemap
*/
int calc_padding(Datatype datatype)
{
long size_max = INT_MIN;
long type_len;
int i;
//find the largest datatype size. The epsilon padding is (probably) based on this.
for (i = 0; i < datatype->count; i++)
{
type_len = Simpletype_length(datatype->pairs[i].type);
size_max = type_len > size_max ? type_len : size_max;
}
return size_max;
}
/* Retrieve size of any simple type
* C sizes use sizeof the literal type
* they represent. Fortran types are those
* as defined in type.h
*/
int Simpletype_length(Simpletype t)
{
switch(t)
{
case SIMPLE_CHAR:
return sizeof(char); break;
case SIMPLE_SHORT:
return sizeof(short); break;
case SIMPLE_INT:
return sizeof(int); break;
case SIMPLE_LONG:
return sizeof(long); break;
case SIMPLE_UCHAR:
return sizeof(unsigned char); break;
case SIMPLE_USHORT:
return sizeof(unsigned short); break;
case SIMPLE_UINT:
return sizeof(unsigned int); break;
case SIMPLE_ULONG:
return sizeof(unsigned long); break;
case SIMPLE_FLOAT:
return sizeof(float); break;
case SIMPLE_DOUBLE:
return sizeof(double); break;
case SIMPLE_LDOUBLE:
return sizeof(long double); break;
case SIMPLE_BYTE:
return sizeof(char); break;
case SIMPLE_FINTEGER:
return FSIZE_INTEGER; break;
case SIMPLE_FREAL:
return FSIZE_REAL; break;
case SIMPLE_FDPRECISION:
return FSIZE_DPRECISION; break;
case SIMPLE_FCOMPLEX:
return FSIZE_COMPLEX; break;
case SIMPLE_FDCOMPLEX:
return FSIZE_DCOMPLEX; break;
case SIMPLE_FLOGICAL:
return FSIZE_LOGICAL; break;
case SIMPLE_FCHARACTER:
return FSIZE_CHARACTER; break;
case SIMPLE_FINTEGER1:
return 1; break;
case SIMPLE_FINTEGER2:
return 2; break;
case SIMPLE_FINTEGER4:
return 4; break;
case SIMPLE_FINTEGER8:
return 8; break;
case SIMPLE_FREAL4:
return 4; break;
case SIMPLE_FREAL8:
return 8; break;
case SIMPLE_FREAL16:
return 16; break;
case SIMPLE_FCOMPLEX8:
return 8; break;
case SIMPLE_FCOMPLEX16:
return 16; break;
case SIMPLE_FCOMPLEX32:
return 32; break;
case SIMPLE_LONGLONG:
return sizeof(long long); break;
case SIMPLE_ULONGLONG:
return sizeof(unsigned long long); break;
case SIMPLE_OFFSET:
return sizeof(MPI_Offset); break;
default:
printf("Invalid simple type\n");
exit(1);
}
}
/*
* calculates the lower bound of a datatype using typemap
* (This gives no regard to MPI_LB, but rather uses only displacements)
*/
long calc_lb(Datatype type)
{
int i;
int min_disp = INT_MAX;
typepair * tp;
for(i =0; i < type->count; i++)
{
tp = type->pairs+i;
min_disp = tp->disp < min_disp
? tp->disp
: min_disp;
}
return min_disp;
}
/*
* Calculate upper bound using typemap
* (Gives no regard to MPI_UB, just calculates
* highest displacement+size of its respective data type)
*/
long calc_ub(Datatype type)
{
int i;
long max_disp = INT_MIN;
typepair * tp;
for(i = 0; i < type->count; i++)
{
tp = type->pairs+i;
max_disp = tp->disp + Simpletype_length(tp->type) > max_disp
? tp->disp + Simpletype_length(tp->type)
: max_disp;
}
return max_disp;
}
/*******************************************************/
/* MPI_Type_struct is the most general type constructor that
* does the common work other constructors.
* All other type constructors call this function.
*/
FC_FUNC( mpi_type_struct, MPI_TYPE_STRUCT )
(int * count, int * blocklens, long * displacements,
int *oldtypes_ptr, int *newtype, int *ierror)
{
*ierror=MPI_Type_struct(*count, blocklens, displacements,
oldtypes_ptr, newtype);
}
/* Public function, wrapper for Type_struct that translates handle to
* pointer (see NOTES at top of file)
*/
int MPI_Type_struct(int count, int * blocklens, MPI_Aint * displacements,
MPI_Datatype *oldtypes, MPI_Datatype *newtype)
{
int i;
Datatype oldtypes_ptr[count];
Datatype * newtype_ptr;
for (i = 0; i < count; i++)
{
oldtypes_ptr[i] = *(Datatype*) mpi_handle_to_datatype(oldtypes[i]);
}
mpi_alloc_handle(newtype, (void**) &newtype_ptr);
return Type_struct(count, blocklens, displacements,
oldtypes_ptr, newtype_ptr);
}
int Type_struct(int count, int * blocklens, MPI_Aint * displacements,
Datatype *oldtypes_ptr, Datatype *newtype)
{
int i, j, k;
Datatype temp, temp2;
int newcount;
char override_lower = 0, //whether to override
override_upper = 0;
MPI_Aint new_lb = LONG_MAX,
new_ub = LONG_MIN,
clb, cub; //calculated lb and ub
int simpletype_count = 0; //total additional blocks for malloc
MPI_Aint tmp_offset; //for contiguous blocks of type
MPI_Aint extent;
// find the total number of elements in the typemap we need to add.
for (i = 0; i < count; i++)
{
//check for MPI_UB or MPI_LB. These types are special
// cases and will be skipped over
temp2 = oldtypes_ptr[i];
if (temp2->pairs[0].type == SIMPLE_LOWER)
{
//found MPI_LB. This is a candidate for the actual lb
if (new_lb > displacements[i])
new_lb = displacements[i];
override_lower = 1;
}
else if (temp2->pairs[0].type == SIMPLE_UPPER)
{
//same as above, but ub
if (new_ub < displacements[i])
new_ub = displacements[i];
override_upper = 1;
}
else
{
//this is not MPI_LB or MPI_UB
//However it may still have overriding bounds
//Test for these and add its size to the typemap.
if (temp2->o_lb)
// this type's lb has been overridden.
// ONLY an overriding lb can be the actual lb now.
override_lower = 1;
if (temp2->o_ub)
//same as above, but ub
override_upper = 1;
simpletype_count += blocklens[i] * oldtypes_ptr[i]->count;
}
}
temp = malloc(sizeof(Typestruct) +
((simpletype_count-1) * sizeof(typepair)));
temp->count = simpletype_count;
i = 0; //old type's index
newcount = 0; //new type's index
while (i < count)
{
tmp_offset = 0;
temp2 = oldtypes_ptr[i];
//test for previous MPI_LB or MPI_UB in one of the comprising types.
//If found, skip over.
if (!((temp2->pairs[0].type == SIMPLE_LOWER) ||
(temp2->pairs[0].type == SIMPLE_UPPER)))
{
for (j = 0; j < blocklens[i]; j++)
{
//Copy the old type's typemap and merge into the new type
//by a "flattening" process
Type_extent((Datatype) oldtypes_ptr[i], &extent);
tmp_offset = j * extent;
if (temp2->o_lb && temp2->lb+displacements[i]+tmp_offset < new_lb)
new_lb = temp2->lb+displacements[i]+tmp_offset;
if (temp2->o_ub && temp2->ub+displacements[i]+tmp_offset > new_ub)
{
new_ub = temp2->ub+displacements[i]+tmp_offset;
}
for (k = 0; k < oldtypes_ptr[i]->count; k++)
{
Copy_type( (typepair*) oldtypes_ptr[i]->pairs+k,
(typepair*) (temp->pairs+newcount));
((typepair*) temp->pairs+(newcount))->disp +=
displacements[i] + tmp_offset;
newcount++;
}
}
}
i++;
}
//type is NOT committed
temp->committed = 0;
//assign upper and lower bounds here
if (override_lower)
{
//use lowest previous overridden lower bound
temp->o_lb = 1;
temp->lb = new_lb;
}
else
{
//use calculation
temp->lb = calc_lb(temp);
}
if (override_upper)
{
temp->o_ub = 1;
temp->ub = new_ub;
}
else
{
temp->ub = calc_ub(temp);
}
*newtype = temp;
temp = MPI_DATATYPE_NULL;
return MPI_SUCCESS;
}
/*******************************************************/
/* MPI_Type_contiguous. Create count copies of a type.
* this creates arrays of the singleton arguments and use them to call
* MPI_Type_struct()
*/
FC_FUNC( mpi_type_contiguous, MPI_TYPE_CONTIGUOUS )
(int *count, int *oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_contiguous(*count, *oldtype, newtype);
}
int MPI_Type_contiguous(int count, MPI_Datatype old, MPI_Datatype * new)
{
int ret;
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(old);
Datatype * new_ptr;
mpi_alloc_handle(new, (void**) &new_ptr);
return Type_contiguous(count, old_ptr, new_ptr);
}
int Type_contiguous(int count, Datatype oldtype, Datatype *newtype)
{
int i;
int blocklengths[count];
Datatype oldtypes[count];
MPI_Aint offsets[count];
MPI_Aint extent;
//each copy is strided by the extent of the datatype.
// Calculate that here.
Type_extent(oldtype, &extent);
for (i = 0; i < count; i++)
{
blocklengths[i] = 1;
offsets[i] = extent * i;
oldtypes[i] = oldtype;
}
return Type_struct(count, blocklengths, offsets, oldtypes, newtype);
}
/*************************/
/* Type_vector
*/
FC_FUNC( mpi_type_vector, MPI_TYPE_VECTOR )
(int * count, int * blocklen, int * stride,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_vector(*count, *blocklen, *stride, *oldtype, newtype);
}
int MPI_Type_vector(int count, int blocklen, int stride,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_vector(count, blocklen, stride, old_ptr, new_ptr);
}
int Type_vector(int count, int blocklen, int stride,
Datatype oldtype, Datatype *newtype)
{
MPI_Aint extent;
MPI_Aint bstride;
Type_extent(oldtype, &extent);
bstride = stride * extent;
return Type_hvector(count, blocklen, bstride, oldtype, newtype);
}
/*******************************************************/
FC_FUNC( mpi_type_hvector, MPI_TYPE_HVECTOR )
(int * count, long * blocklen, long * stride,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_hvector(*count, *blocklen, *stride, *oldtype, newtype);
}
int MPI_Type_hvector(int count, int blocklen, MPI_Aint stride,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_hvector(count, blocklen, stride, old_ptr, new_ptr);
}
FC_FUNC( mpi_type_create_hvector, MPI_TYPE_CREATE_HVECTOR )
(int * count, long * blocklen, long * stride,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_create_hvector(*count, *blocklen, *stride, *oldtype, newtype);
}
int MPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_hvector(count, blocklen, stride, old_ptr, new_ptr);
}
int Type_hvector(int count, int blocklen, MPI_Aint stride,
Datatype oldtype, Datatype *newtype)
{
int i;
int blocklengths[count];
Datatype oldtypes[count];
MPI_Aint offsets[count];
MPI_Aint extent;
Type_extent(oldtype, &extent);
for (i = 0; i < count; i++)
{
blocklengths[i] = blocklen;
offsets[i] = stride * i;
oldtypes[i] = oldtype;
}
return Type_struct(count, blocklengths, offsets, oldtypes, newtype);
}
/*******************************************************/
FC_FUNC( mpi_type_indexed, MPI_TYPE_INDEXED )
(int * count, int * blocklens, int * displacements,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_indexed(*count, blocklens, displacements, *oldtype, newtype);
}
int MPI_Type_indexed(int count, int *blocklens, int *displacements,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_indexed(count, blocklens, displacements, old_ptr, new_ptr);
}
int Type_indexed(int count, int *blocklens, int *displacements,
Datatype oldtype, Datatype *newtype)
{
int i;
MPI_Aint extent;
MPI_Aint bdisps[count];
for (i = 0; i < count; i++)
{
Type_extent(oldtype, &extent);
bdisps[i] = displacements[i] * extent;
}
return Type_hindexed(count, blocklens, bdisps, oldtype, newtype);
}
/*******************************************************/
FC_FUNC( mpi_type_create_indexed_block, MPI_TYPE_CREATE_INDEXED_BLOCK )
(int * count, int * blocklen, int * displacements,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_create_indexed_block(*count, *blocklen, displacements,
*oldtype, newtype);
}
int MPI_Type_create_indexed_block(int count, int blocklen, int *displacements,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
int ret;
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_create_indexed_block(count, blocklen, displacements, old_ptr, new_ptr);
}
int Type_create_indexed_block(int count, int blocklen, int *displacements,
Datatype oldtype, Datatype *newtype)
{
int i;
int blocklens[count];
for (i = 0; i < count; i++)
blocklens[i] = blocklen;
return Type_indexed(count, blocklens, displacements, oldtype, newtype);
}
/*******************************************************/
FC_FUNC( mpi_type_hindexed, MPI_TYPE_HINDEXED )
(int * count, int * blocklens, MPI_Aint * displacements,
int * oldtype, int * newtype, int * ierr)
{
*ierr = MPI_Type_hindexed(*count, blocklens, displacements,
*oldtype, newtype);
}
int MPI_Type_hindexed(int count, int *blocklens, MPI_Aint * disps,
MPI_Datatype oldtype, MPI_Datatype * newtype)
{
Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype);
Datatype * new_ptr;
mpi_alloc_handle(newtype, (void**) &new_ptr);
return Type_hindexed(count, blocklens, disps, old_ptr, new_ptr);
}
int Type_hindexed(int count, int *blocklens, MPI_Aint *displacements,
Datatype oldtype, Datatype *newtype)
{
int i;
Datatype oldtypes[count];
for (i = 0; i < count; i++)
{
oldtypes[i] = oldtype;
}
return Type_struct(count, blocklens, displacements, oldtypes, newtype);
}
/*******************************************************/
int Type_dup(Datatype oldtype, Datatype *newtype)
{
int i;
//create a deep copy of given Datatype
newtype = malloc(sizeof(oldtype));
(*newtype)->committed = oldtype->committed;
(*newtype)->lb = oldtype->lb;
(*newtype)->ub = oldtype->ub;
(*newtype)->o_lb = oldtype->o_lb;
(*newtype)->o_ub = oldtype->o_ub;
for (i = 0; i < oldtype->count; i++)
{
Copy_type((typepair*) oldtype->pairs + i,
(typepair*) (*newtype)->pairs + i );
}
}
/* copy_type: Creates a deep copy of source typepair into dest
*/
int Copy_type(typepair *source, typepair *dest)
{
dest->type = source->type;
dest->disp = source->disp;
}
/* MPI_Type_size: Returns the sum of the lengths of each simple
* type that makes up the data type argument
*/
FC_FUNC( mpi_type_size, MPI_TYPE_SIZE )(int * type, int * size, int * ierr)
{
*ierr=MPI_Type_size(*type, size);
}
int MPI_Type_size(MPI_Datatype type, int * size)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type);
return Type_size(type_ptr, size);
}
int Type_size(Datatype type, int * size)
{
int i;
*size = 0;
for (i=0; i < type->count; i++)
*size += Simpletype_length(type->pairs[i].type);
return MPI_SUCCESS;
}
/* MPI_Type_lb: Returns the lower bound (which may be overridden
* or calculated)
*/
FC_FUNC( mpi_type_lb, MPI_TYPE_LB )(int * type, long * lb, int * ierr)
{
*ierr = MPI_Type_lb(*type, lb);
}
int MPI_Type_lb(MPI_Datatype type, MPI_Aint * lb)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type);
return Type_lb(type_ptr, lb);
}
int Type_lb(Datatype type, MPI_Aint * lb)
{
*lb = type->lb;
}
/* MPI_Type_ub: Return upper bound (which may be overridden
* or calculated
*/
FC_FUNC( mpi_type_ub, MPI_TYPE_UB )(int * type, long * ub, int * ierr)
{
*ierr = MPI_Type_ub(*type, ub);
}
int MPI_Type_ub(MPI_Datatype type, MPI_Aint * ub)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type);
return Type_ub(type_ptr, ub);
}
int Type_ub(Datatype type, MPI_Aint * ub)
{
*ub = type->ub;
}
/* MPI_Get_address
* MPI_Address
* Return address of an object
*/
FC_FUNC( mpi_get_address, MPI_ADDRESS )(void * loc, long * address, int * ierr)
{
*ierr = FGet_address(loc, address);
}
FC_FUNC( mpi_address, MPI_ADDRESS )(void * loc, long * address, int * ierr)
{
*address = (long) loc;
*ierr = FGet_address(loc, address);
}
int FGet_address(void * loc, long * address, int * ierr)
{
*address = (long) loc;
return MPI_SUCCESS;
}
int MPI_Address(void * loc, MPI_Aint * address)
{
return MPI_Get_address(loc, address);
}
int MPI_Get_address(void * loc, MPI_Aint * address)
{
*address = (MPI_Aint) loc;
return MPI_SUCCESS;
}
/* MPI_Type_extent: return ub-lb, plus padding
*/
FC_FUNC( mpi_type_extent, MPI_TYPE_EXTENT)(int * type, long * extent, int * ierr)
{
*ierr = MPI_Type_extent(*type, extent);
}
int MPI_Type_extent(MPI_Datatype type, MPI_Aint * extent)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type);
return Type_extent(type_ptr, extent);
}
int Type_extent(Datatype datatype, MPI_Aint * extent)
{
if (!(datatype->o_lb || datatype->o_ub))
{
int epsilon = calc_padding(datatype);
//current epsilon value is based off of largest datatype size
int mod = (datatype->ub - datatype->lb) % epsilon;
if (mod == 0)
epsilon = 0;
else
epsilon = epsilon - mod;
*extent = (datatype->ub - datatype->lb) + epsilon;
}
else
{
*extent = datatype->ub - datatype->lb;
}
return MPI_SUCCESS;
}
/* True_extent returns an extent based only on
* calculated upper and lower bound, regardless of any
* override using MPI_LB or MPI_UB
*/
int Type_get_true_extent(Datatype type, MPI_Aint * extent)
{
long epsilon = calc_padding(type);
long ub = calc_ub(type);
long lb = calc_lb(type);
//current epsilon value is based off of largest datatype size
long mod = (ub - lb) % epsilon;
if (mod == 0)
epsilon = 0;
else
epsilon = epsilon - mod;
*extent = (ub - lb) + epsilon;
return MPI_SUCCESS;
}
/***********************/
FC_FUNC( mpi_type_commit, MPI_TYPE_COMMIT )(int * datatype, int * ierr)
{
*ierr = MPI_Type_commit(datatype);
}
int MPI_Type_commit(MPI_Datatype * datatype)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(*datatype);
(type_ptr)->committed = 1;
return MPI_SUCCESS;
}
/**********************/
FC_FUNC( mpi_type_free, MPI_TYPE_FREE )(int * datatype, int * ierr)
{
*ierr = MPI_Type_free(datatype);
}
int MPI_Type_free(MPI_Datatype * datatype)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(*datatype);
free(type_ptr);
type_ptr = MPI_DATATYPE_NULL;
mpi_free_handle(*datatype);
return MPI_SUCCESS;
}
/* Print_typemap is used in test programs only when
* --enable-test-internal is enabled in configure.
*/
#ifdef TEST_INTERNAL
FC_FUNC( print_typemap, PRINT_TYPEMAP )(int * type, int * ierr)
{
*ierr = print_typemap(*type);
}
int print_typemap(MPI_Datatype type)
{
Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type);
return Pprint_typemap(type_ptr);
}
int Pprint_typemap(Datatype type)
{
int i;
MPI_Aint extent;
Type_extent(type, &extent);
printf("Type with %d type pairs.\n>> lb is %d\n>> ub is %d\n>>"
"Extent is %d\n>>Epsilon based on %d\nTypemap: \n{",
type->count, type->lb, type->ub, extent, calc_padding(type));
for (i = 0; i < type->count; i++)
{
printf("(t%d:%d, o%d)", type->pairs[i].type,
Simpletype_length(type->pairs[i].type),
type->pairs[i].disp);
if (i != type->count-1)
printf(", ");
}
printf("}\n");
return MPI_SUCCESS;
}
#endif //TEST_INTERNAL
|
import { requestsReducer } from 'redux-saga-requests';
import { AppState } from '../states';
import { createSelector } from 'reselect';
import GitUser from '../../models/GitUser';
import { FetchedData } from '../../models/FetchedData';
import { FetchActions } from '../actions/fetchActions';
// Reducer
export const gitUserReducer = requestsReducer({ actionType: FetchActions.FETCH_GIT_USER });
// Selectors
const getGitUserState = (state:AppState) => state.gitUser;
export const getGitUserData = createSelector([getGitUserState], (f) => ({
data: f.data,
loading: (!f.data && !f.error) || f.pending > 0,
error: f.error,
} as FetchedData<GitUser>));
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.