code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import logging import time try: from urllib2 import HTTPError, urlopen except ImportError: from urllib.request import urlopen from urllib.error import HTTPError import json import os import subprocess import click import requests @click.command(short_help='Classifies datapoints from json input file.') def datapoint_classification_client(): """Classifies datapoints from json input file. Takes one or more datapoints and returns the results from a TensorFlow""" logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) with open('data_input_small.json') as json_file: points = json.load(json_file) logger.info('Predicting...') start_time = time.time() method = 'POST' headers = {'Content-Type': 'application/json'} service = 'execute-api' url = 'https://z5ekmkl0t6.execute-api.eu-west-1.amazonaws.com/dev/inferqueue' region = 'eu-west-1' print('') print(url) print('') logger.info('Creating request...') auth = {} data = {} data['input'] = points['input'] data['epoch'] = points['epoch'] json_data = json.dumps(data) json_response = requests.request(method, url, auth=auth, data=json_data, headers=headers) end_time = time.time() # Extract text from JSON response = json.loads(json_response.text) flattened_response = [val for sublist in response for val in sublist] print(response) logger.info('End-to-end prediction (including network transfers) ' 'took {} seconds.'.format( len(points), end_time - start_time)) import logging logger = logging.getLogger() logger.setLevel(logging.INFO) if __name__ == '__main__': datapoint_classification_client()
[ "json.load", "json.loads", "json.dumps", "time.time", "click.command", "requests.request", "logging.getLogger" ]
[((247, 318), 'click.command', 'click.command', ([], {'short_help': '"""Classifies datapoints from json input file."""'}), "(short_help='Classifies datapoints from json input file.')\n", (260, 318), False, 'import click\n'), ((1650, 1669), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1667, 1669), False, 'import logging\n'), ((506, 533), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'import logging\n'), ((713, 724), 'time.time', 'time.time', ([], {}), '()\n', (722, 724), False, 'import time\n'), ((1138, 1154), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1148, 1154), False, 'import json\n'), ((1176, 1249), 'requests.request', 'requests.request', (['method', 'url'], {'auth': 'auth', 'data': 'json_data', 'headers': 'headers'}), '(method, url, auth=auth, data=json_data, headers=headers)\n', (1192, 1249), False, 'import requests\n'), ((1266, 1277), 'time.time', 'time.time', ([], {}), '()\n', (1275, 1277), False, 'import time\n'), ((1327, 1357), 'json.loads', 'json.loads', (['json_response.text'], {}), '(json_response.text)\n', (1337, 1357), False, 'import json\n'), ((641, 661), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (650, 661), False, 'import json\n')]
from analysis import plot_graphs_df from training import preprocess_doc2vec_models, preprocess_tfidf_models if __name__ == "__main__": csv_file_list = ['./stackoverflow/train.csv', './stackoverflow/valid.csv'] print('Some useful data insights') plot_graphs_df(csv_file_list) print('Predictive Analysis') preprocess_tfidf_models(csv_file_list) preprocess_doc2vec_models(csv_file_list)
[ "training.preprocess_tfidf_models", "analysis.plot_graphs_df", "training.preprocess_doc2vec_models" ]
[((264, 293), 'analysis.plot_graphs_df', 'plot_graphs_df', (['csv_file_list'], {}), '(csv_file_list)\n', (278, 293), False, 'from analysis import plot_graphs_df\n'), ((332, 370), 'training.preprocess_tfidf_models', 'preprocess_tfidf_models', (['csv_file_list'], {}), '(csv_file_list)\n', (355, 370), False, 'from training import preprocess_doc2vec_models, preprocess_tfidf_models\n'), ((375, 415), 'training.preprocess_doc2vec_models', 'preprocess_doc2vec_models', (['csv_file_list'], {}), '(csv_file_list)\n', (400, 415), False, 'from training import preprocess_doc2vec_models, preprocess_tfidf_models\n')]
# ********************************************************************** # Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory # # All Rights Reserved. # For any other permission, please contact the Legal Office at JHU/APL. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ********************************************************************** from datetime import datetime from enum import Enum from bson import ObjectId from shared.log import logger from system.models.schemas_loader import SchemaLoader def count(collection: Enum) -> int: """ Returns the count of all documents in the collection. :param collection: Name of collection for query :return: Value of count """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.log("Invalid model") try: return query_set.count() except Exception as e: logger.error(repr(e)) return -1 def find_all(collection: Enum, as_json: bool = False) -> str or dict: """ Returns all documents in the collection, as JSON if applicable. :param collection: Name of collection for query :param as_json: Flag to signify if collection should be returned in JSON format :return: All documents in collection """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") try: result = query_set.all() # return each as a json if desired return list(map(lambda d: d.as_json() if as_json else d, result)) except Exception as e: logger.error(repr(e)) return None def find_by_id(collection: Enum, obj_id: ObjectId, as_json: bool = False) -> str or dict: """ Returns all documents in the collection, as JSON if applicable. :param obj_id: ID of document to find :param collection: Name of collection for query :param as_json: Flag to signify if collection should be returned in JSON format :return: All documents in collection """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(obj_id, ObjectId): # check to make sure id is an ObjectId logger.error("Invalid ID") try: # get docs with {key: value} pair in collection docs = query_set.get({"_id": obj_id}) return docs.as_json() if as_json else docs # return doc as a json if desired except Exception as e: logger.error(e) return None def find_by_key_value(collection: Enum, key: str, value, as_json: bool = False) -> str or dict: """ Filter collection by {key: value} pair :param key: The key to search for in a collection :param value: The desired value of key in searching collection :param collection: Name of collection for query :param as_json: Flag to signify if collection should be returned in JSON format :return: Documents with specified {key: value} pair. """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(key, str): # check to make sure key is a str logger.error("Invalid key") try: # get docs with {key: value} pair in collection docs = query_set.raw({key: value}) # return doc as a json if desired return list(map(lambda d: d.as_json() if as_json else d, docs)) except Exception as e: logger.error(repr(e)) return None def find_by_multi_key_value(collection: Enum, filter_map: dict, as_json: bool = False) -> str or dict: """ Filter collection by {key: value} pair :param filter_map: The filter dictionary used to search in a collection :param collection: Name of collection for query :param as_json: Flag to signify if collection should be returned in JSON format :return: Documents with specified {key: value} pair. """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(filter_map, dict): # check to make sure key is a str logger.error("Invalid filter") try: # get docs with {key: value} pair in collection docs = query_set.get(filter_map) return docs.as_json() if as_json else docs # return doc as a json if desired except Exception as e: logger.error(repr(e)) return None def update_by_id(collection: Enum, obj_id: ObjectId, key: str, value): """ Find the document specified by ID in the collection. Update it's field key to value. :param obj_id: The ID of the document to find :param collection: The collection to insert data in :param key: The key to update in the document :param value: The value to update key with :return: None """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(obj_id, ObjectId): logger.error("Invalid ID") if not isinstance(key, str): logger.error("Invalid key") try: # will make more sense to the user to have both jobs updated at the same time curr_datetime = datetime.utcnow() # desired fields to update update_input = {key: value, "updated_datetime": curr_datetime} query_set.raw({"_id": obj_id}).update({"$set": update_input}) return True except Exception as e: logger.error(repr(e)) return False def insert_one(collection: Enum, data: dict) -> ObjectId: """ Insert a Python dictionary into a collection. Update time to reflect when the collection was last altered :param data: The data to insert in collection :param collection: The collection to insert data in :return: ObjectId of doc added """ model = SchemaLoader.get_model(collection) if isinstance( collection, SchemaLoader) else None if not model: # invalid model error log logger.error("Invalid model") if not isinstance(data, dict): logger.error("Invalid data input") try: new_doc = model.from_document(data) new_doc.created_datetime = datetime.utcnow() new_doc.started_datetime = None new_doc.updated_datetime = datetime.utcnow() new_doc.completed_datetime = None new_doc.save() return new_doc._id except Exception as e: logger.error(repr(e)) return None def insert_many(collection: Enum, data: list) -> list: """ Insert a list of Python dictionaries into a collection. Update time to reflect when the collection was last altered :param data: The list of data to insert in collection :param collection: The collection to insert data in :return: list of ObjectIds added """ model = SchemaLoader.get_model(collection) if isinstance( collection, SchemaLoader) else None if not model: # invalid model error log logger.error("Invalid model") if not isinstance(data, dict): logger.error("Invalid data input") try: obj_id_list = [] for doc in data: new_doc = model.from_document(doc) new_doc.created_datetime = datetime.utcnow() new_doc.started_datetime = None new_doc.updated_datetime = datetime.utcnow() new_doc.completed_datetime = None new_doc.save() obj_id_list.append(new_doc._id) return obj_id_list except Exception as e: logger.error(repr(e)) return None def delete_by_id(collection: Enum, obj_id: ObjectId): """ Delete document in collection that has specified id. Only works for single key filtering. :param obj_id: The id of the document to delete :param collection: Name of collection of interest :return: None """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(obj_id, ObjectId): # check to make sure id is an ObjectId logger.error("Invalid ID") try: # remove docs with {key: value} pair in collection query_set.raw({"_id": obj_id}).delete() except Exception as e: logger.error(repr(e)) return None def delete_by_key_value(collection: Enum, key: str, value: str): """ Delete document in collection that has {key: value} pair. Only works for single key filtering. :param key: The key to search for in a collection :param value: The desired value of key to delete in collection :param collection: Name of collection of interest :return: None """ query_set = SchemaLoader.get_queryset(collection) if isinstance( collection, SchemaLoader) else None if not query_set: # invalid model error log logger.error("Invalid model") if not isinstance(key, str): # check to make sure key is a str logger.error("Invalid key") if not isinstance(value, str): # check to make sure value is a str logger.error("Invalid value") try: # remove docs with {key: value} pair in collection query_set.raw({key: value}).delete() except Exception as e: logger.error(repr(e)) return None
[ "system.models.schemas_loader.SchemaLoader.get_model", "shared.log.logger.error", "shared.log.logger.log", "datetime.datetime.utcnow", "system.models.schemas_loader.SchemaLoader.get_queryset" ]
[((1249, 1286), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (1274, 1286), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((1403, 1430), 'shared.log.logger.log', 'logger.log', (['"""Invalid model"""'], {}), "('Invalid model')\n", (1413, 1430), False, 'from shared.log import logger\n'), ((1898, 1935), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (1923, 1935), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((2052, 2081), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (2064, 2081), False, 'from shared.log import logger\n'), ((2730, 2767), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (2755, 2767), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((2885, 2914), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (2897, 2914), False, 'from shared.log import logger\n'), ((3004, 3030), 'shared.log.logger.error', 'logger.error', (['"""Invalid ID"""'], {}), "('Invalid ID')\n", (3016, 3030), False, 'from shared.log import logger\n'), ((3787, 3824), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (3812, 3824), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((3941, 3970), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (3953, 3970), False, 'from shared.log import logger\n'), ((4047, 4074), 'shared.log.logger.error', 'logger.error', (['"""Invalid key"""'], {}), "('Invalid key')\n", (4059, 4074), False, 'from shared.log import logger\n'), ((4824, 4861), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (4849, 4861), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((4978, 5007), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (4990, 5007), False, 'from shared.log import logger\n'), ((5092, 5122), 'shared.log.logger.error', 'logger.error', (['"""Invalid filter"""'], {}), "('Invalid filter')\n", (5104, 5122), False, 'from shared.log import logger\n'), ((5812, 5849), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (5837, 5849), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((5966, 5995), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (5978, 5995), False, 'from shared.log import logger\n'), ((6045, 6071), 'shared.log.logger.error', 'logger.error', (['"""Invalid ID"""'], {}), "('Invalid ID')\n", (6057, 6071), False, 'from shared.log import logger\n'), ((6113, 6140), 'shared.log.logger.error', 'logger.error', (['"""Invalid key"""'], {}), "('Invalid key')\n", (6125, 6140), False, 'from shared.log import logger\n'), ((6261, 6278), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6276, 6278), False, 'from datetime import datetime\n'), ((6897, 6931), 'system.models.schemas_loader.SchemaLoader.get_model', 'SchemaLoader.get_model', (['collection'], {}), '(collection)\n', (6919, 6931), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((7044, 7073), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (7056, 7073), False, 'from shared.log import logger\n'), ((7117, 7151), 'shared.log.logger.error', 'logger.error', (['"""Invalid data input"""'], {}), "('Invalid data input')\n", (7129, 7151), False, 'from shared.log import logger\n'), ((7242, 7259), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7257, 7259), False, 'from datetime import datetime\n'), ((7335, 7352), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7350, 7352), False, 'from datetime import datetime\n'), ((7883, 7917), 'system.models.schemas_loader.SchemaLoader.get_model', 'SchemaLoader.get_model', (['collection'], {}), '(collection)\n', (7905, 7917), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((8030, 8059), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (8042, 8059), False, 'from shared.log import logger\n'), ((8103, 8137), 'shared.log.logger.error', 'logger.error', (['"""Invalid data input"""'], {}), "('Invalid data input')\n", (8115, 8137), False, 'from shared.log import logger\n'), ((8935, 8972), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (8960, 8972), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((9089, 9118), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (9101, 9118), False, 'from shared.log import logger\n'), ((9208, 9234), 'shared.log.logger.error', 'logger.error', (['"""Invalid ID"""'], {}), "('Invalid ID')\n", (9220, 9234), False, 'from shared.log import logger\n'), ((9824, 9861), 'system.models.schemas_loader.SchemaLoader.get_queryset', 'SchemaLoader.get_queryset', (['collection'], {}), '(collection)\n', (9849, 9861), False, 'from system.models.schemas_loader import SchemaLoader\n'), ((9978, 10007), 'shared.log.logger.error', 'logger.error', (['"""Invalid model"""'], {}), "('Invalid model')\n", (9990, 10007), False, 'from shared.log import logger\n'), ((10084, 10111), 'shared.log.logger.error', 'logger.error', (['"""Invalid key"""'], {}), "('Invalid key')\n", (10096, 10111), False, 'from shared.log import logger\n'), ((10192, 10221), 'shared.log.logger.error', 'logger.error', (['"""Invalid value"""'], {}), "('Invalid value')\n", (10204, 10221), False, 'from shared.log import logger\n'), ((3264, 3279), 'shared.log.logger.error', 'logger.error', (['e'], {}), '(e)\n', (3276, 3279), False, 'from shared.log import logger\n'), ((8285, 8302), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8300, 8302), False, 'from datetime import datetime\n'), ((8386, 8403), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8401, 8403), False, 'from datetime import datetime\n')]
import random from rest_framework.settings import api_settings from django.conf import settings from django.contrib.gis.geos import Point from django.shortcuts import get_object_or_404 from django.db.models import Prefetch from rest_framework import mixins from rest_framework import viewsets from rest_framework import permissions from rest_framework.response import Response from rest_framework.throttling import UserRateThrottle from rest_framework.decorators import action from froide.foirequest.models import FoiRequest from froide.foirequest.api_views import throttle_action from .models import (Campaign, InformationObject, CampaignSubscription, Questionaire, Question, Report, Answer) from .serializers import InformationObjectSerializer from .serializers import CampaignProviderRequestSerializer from .geocode import run_geocode from .providers.base import BaseProvider from .filters import ( CustomSearchFilter, StatusFilter, CategoryFilter, FeaturedFilter, RandomOrderFilter ) def get_lat_lng(request): try: lat = float(request.GET.get('lat')) except (ValueError, TypeError): raise ValueError try: lng = float(request.GET.get('lng')) except (ValueError, TypeError): raise ValueError return lat, lng class AddLocationPermission(permissions.BasePermission): def has_permission(self, request, view): campaign_id = request.data.get('campaign') if campaign_id is None: return False campaign = Campaign.objects.get(id=campaign_id) return campaign.get_provider().CREATE_ALLOWED class AddLocationThrottle(UserRateThrottle): scope = 'campaign-createlocation' THROTTLE_RATES = { scope: '3/day', } class InformationObjectViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet): RANDOM_COUNT = 3 SEARCH_COUNT = 10 serializer_class = InformationObjectSerializer pagination_class = api_settings.DEFAULT_PAGINATION_CLASS filter_backends = [ CustomSearchFilter, StatusFilter, CategoryFilter, FeaturedFilter, RandomOrderFilter ] search_fields = ['translations__title', 'translations__subtitle'] def get_serializer_context(self): context = super().get_serializer_context() lang = self.request.GET.get('language', settings.LANGUAGE_CODE) context.update({'language': lang}) return context def get_permissions(self): if self.action == 'create': permission_classes = [AddLocationPermission] else: permission_classes = [permissions.AllowAny] return [permission() for permission in permission_classes] @throttle_action((AddLocationThrottle,)) def create(self, request, *args, **kwargs): return super().create(request, *args, **kwargs) def perform_create(self, serializer): obj = serializer.save() obj.ident = 'custom_{}'.format(str(obj.id)) point = self.get_geo(obj) obj.geo = point obj.save() def retrieve(self, request, *args, **kwargs): campaign = self.get_campaign() language = self.request.GET.get('language', settings.LANGUAGE_CODE) campaign.set_current_language(language) provider = campaign.get_provider() ident = kwargs.pop('pk') obj = provider.get_by_ident(ident) data = provider.get_provider_item_data(obj) data['publicbody'] = provider.get_publicbody(ident) data['publicbodies'] = provider.get_publicbodies(ident) data['makeRequestURL'] = provider.get_request_url(ident, language=language) data['userRequestCount'] = provider.get_user_request_count( request.user ) serializer = CampaignProviderRequestSerializer( data, context={'request': request} ) return Response(serializer.data) def get_campaign(self): campaign_id = self.request.GET.get('campaign') if self.request.user.is_staff: qs = Campaign.objects.all() else: qs = Campaign.objects.get_public() return get_object_or_404(qs, id=campaign_id) def get_queryset(self): campaign = self.get_campaign() iobjs = InformationObject.objects.filter( campaign=campaign ) iobjs = iobjs.prefetch_related( Prefetch( 'foirequests', queryset=FoiRequest.objects.order_by('-first_message') ) ) iobjs = iobjs.prefetch_related('campaign') iobjs = iobjs.prefetch_related('categories') return iobjs def get_geo(self, obj): if obj.address and not obj.geo: geo = run_geocode(obj.address) if geo: lat_lng = geo[0] return Point(lat_lng[1], lat_lng[0]) @action(detail=False, methods=['post']) def report(self, request): questionaire_id = int(request.data.get('questionaire')) iobj_id = int(request.data.get('informationObject')) answers = request.data.get('answers') report_id = request.data.get('report') questionaire = Questionaire.objects.get(id=questionaire_id) information_object = InformationObject.objects.get(id=iobj_id) if report_id: report = Report.objects.get(id=report_id) report.answer_set.all().delete() else: report = Report.objects.create( questionaire=questionaire, informationsobject=information_object ) for answer in answers: question_id = int(answer['questionId']) question = Question.objects.get(id=question_id) Answer.objects.create( text=answer['answer'], report=report, question=question ) return Response({ 'report': report.id }) @action(detail=False, methods=['post']) def subscribe(self, request): email = request.data.get('email') if request.user.is_authenticated: email = request.user.email campaign_id = request.data.get('campaign') subscribe = request.data.get('subscribe') if email and campaign_id: try: campaign = Campaign.objects.get(id=campaign_id) if subscribe: obj, created = CampaignSubscription.objects.get_or_create( campaign=campaign, email=email) return Response({ 'email': obj.email, 'campaign': obj.campaign.id }) else: try: obj = CampaignSubscription.objects.get( campaign=campaign, email=email).delete() except CampaignSubscription.DoesNotExist: pass except Campaign.DoesNotExist: return Response({ 'error': 'Campaign does not exist' }) return Response({}) @action(detail=False, methods=['get']) def random(self, request): campaign = self.get_campaign() provider = campaign.get_provider() filters = { 'requested': False } data = provider.search(**filters) if data: random_data = random.choices(data, k=self.RANDOM_COUNT) return Response(random_data) return Response(data) @action(detail=False, methods=['get']) def search(self, request): campaign = self.get_campaign() provider = campaign.get_provider() filters = { 'q': request.GET.get('q', '') } try: if'featured' in request.GET: filters['featured'] = int(request.GET['featured']) except ValueError: pass try: if'requested' in request.GET: filters['requested'] = int(request.GET['requested']) except ValueError: pass # TODO: geocode # location / coordinates # if location is not None: # location_search = True # point, formatted_address = geocode(location, address=False) try: lat, lng = get_lat_lng(request) filters.update({ 'coordinates': Point(lng, lat), }) except ValueError: pass try: filters['zoom'] = int(request.GET.get('zoom')) except (ValueError, TypeError): pass try: filters['radius'] = int(request.GET.get('radius')) except (ValueError, TypeError): pass data = provider.search(**filters) if not type(provider) == BaseProvider: iobjs = BaseProvider(campaign).search(**filters) data = data + iobjs return Response(data)
[ "django.contrib.gis.geos.Point", "random.choices", "froide.foirequest.api_views.throttle_action", "django.shortcuts.get_object_or_404", "rest_framework.response.Response", "rest_framework.decorators.action", "froide.foirequest.models.FoiRequest.objects.order_by" ]
[((2859, 2898), 'froide.foirequest.api_views.throttle_action', 'throttle_action', (['(AddLocationThrottle,)'], {}), '((AddLocationThrottle,))\n', (2874, 2898), False, 'from froide.foirequest.api_views import throttle_action\n'), ((5087, 5125), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (5093, 5125), False, 'from rest_framework.decorators import action\n'), ((6178, 6216), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['post']"}), "(detail=False, methods=['post'])\n", (6184, 6216), False, 'from rest_framework.decorators import action\n'), ((7369, 7406), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['get']"}), "(detail=False, methods=['get'])\n", (7375, 7406), False, 'from rest_framework.decorators import action\n'), ((7788, 7825), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['get']"}), "(detail=False, methods=['get'])\n", (7794, 7825), False, 'from rest_framework.decorators import action\n'), ((4089, 4114), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4097, 4114), False, 'from rest_framework.response import Response\n'), ((4354, 4391), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['qs'], {'id': 'campaign_id'}), '(qs, id=campaign_id)\n', (4371, 4391), False, 'from django.shortcuts import get_object_or_404\n'), ((6118, 6149), 'rest_framework.response.Response', 'Response', (["{'report': report.id}"], {}), "({'report': report.id})\n", (6126, 6149), False, 'from rest_framework.response import Response\n'), ((7350, 7362), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (7358, 7362), False, 'from rest_framework.response import Response\n'), ((7767, 7781), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (7775, 7781), False, 'from rest_framework.response import Response\n'), ((9209, 9223), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (9217, 9223), False, 'from rest_framework.response import Response\n'), ((7669, 7710), 'random.choices', 'random.choices', (['data'], {'k': 'self.RANDOM_COUNT'}), '(data, k=self.RANDOM_COUNT)\n', (7683, 7710), False, 'import random\n'), ((7730, 7751), 'rest_framework.response.Response', 'Response', (['random_data'], {}), '(random_data)\n', (7738, 7751), False, 'from rest_framework.response import Response\n'), ((5051, 5080), 'django.contrib.gis.geos.Point', 'Point', (['lat_lng[1]', 'lat_lng[0]'], {}), '(lat_lng[1], lat_lng[0])\n', (5056, 5080), False, 'from django.contrib.gis.geos import Point\n'), ((4668, 4713), 'froide.foirequest.models.FoiRequest.objects.order_by', 'FoiRequest.objects.order_by', (['"""-first_message"""'], {}), "('-first_message')\n", (4695, 4713), False, 'from froide.foirequest.models import FoiRequest\n'), ((6784, 6843), 'rest_framework.response.Response', 'Response', (["{'email': obj.email, 'campaign': obj.campaign.id}"], {}), "({'email': obj.email, 'campaign': obj.campaign.id})\n", (6792, 6843), False, 'from rest_framework.response import Response\n'), ((7250, 7296), 'rest_framework.response.Response', 'Response', (["{'error': 'Campaign does not exist'}"], {}), "({'error': 'Campaign does not exist'})\n", (7258, 7296), False, 'from rest_framework.response import Response\n'), ((8670, 8685), 'django.contrib.gis.geos.Point', 'Point', (['lng', 'lat'], {}), '(lng, lat)\n', (8675, 8685), False, 'from django.contrib.gis.geos import Point\n')]
""" urlresolver XBMC Addon Copyright (C) 2011 t0mm0 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from t0mm0.common.net import Net from urlresolver import common from urlresolver.plugnplay.interfaces import UrlResolver from urlresolver.plugnplay.interfaces import PluginSettings from urlresolver.plugnplay import Plugin class Mp4streamResolver(Plugin, UrlResolver, PluginSettings): implements = [UrlResolver, PluginSettings] name = "mp4stream" domains = ["mp4stream.com"] def __init__(self): p = self.get_setting('priority') or 100 self.priority = int(p) self.net = Net() def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) link = self.net.http_GET(web_url).content link = ''.join(link.splitlines()).replace('\t', '') videoUrl = 'nope' sPlayer = re.compile('show_player\((.+?)\)').findall(link) for sPlayer_param in sPlayer: param = re.compile('\'(.+?)\'').findall(sPlayer_param) if len(param) > 2 and 'hd_button' in param[2]: break match = re.compile('file\':(.+?),').findall(link)[0] if len(match) > 5: videoUrl = match.replace("'http:", 'http:').replace("'+cc+'", param[0]).replace("'+videourl+'", param[1]).replace("'+token", param[3]).strip() return videoUrl def get_url(self, host, media_id): return 'http://%s/embed/%s' % (host, media_id) def get_host_and_id(self, url): r = re.search('//(.+?)/embed/(.+)', url) if r: return r.groups() else: return False def valid_url(self, url, host): return 'mp4stream' in url or self.name in host
[ "re.search", "re.compile", "t0mm0.common.net.Net" ]
[((1239, 1244), 't0mm0.common.net.Net', 'Net', ([], {}), '()\n', (1242, 1244), False, 'from t0mm0.common.net import Net\n'), ((2141, 2177), 're.search', 're.search', (['"""//(.+?)/embed/(.+)"""', 'url'], {}), "('//(.+?)/embed/(.+)', url)\n", (2150, 2177), False, 'import re\n'), ((1493, 1529), 're.compile', 're.compile', (['"""show_player\\\\((.+?)\\\\)"""'], {}), "('show_player\\\\((.+?)\\\\)')\n", (1503, 1529), False, 'import re\n'), ((1600, 1621), 're.compile', 're.compile', (['"""\'(.+?)\'"""'], {}), '("\'(.+?)\'")\n', (1610, 1621), False, 'import re\n'), ((1745, 1771), 're.compile', 're.compile', (['"""file\':(.+?),"""'], {}), '("file\':(.+?),")\n', (1755, 1771), False, 'import re\n')]
from django.urls import reverse_lazy from rest_framework import serializers from .models import Institution, Person, Place, Event, Work import re class InstitutionSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Institution fields = ('id', 'name', 'uri_set', 'kind', 'collection', 'text') class PersonSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Person fields = ('id', 'name', 'first_name', 'uri_set', 'profession', 'collection', 'text') class PlaceSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Place fields = ('id', 'name', 'uri_set', 'collection', 'text', 'kind', 'lng', 'lat') class EventSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Event fields = ('id', 'name', 'uri_set', 'collection', 'text') class WorkSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Work fields = ('id', 'name', 'uri_set', 'collection', 'text') class GeoJsonSerializer(serializers.BaseSerializer): def to_representation(self, obj): p_pk = self.context.get('p_pk') short = False url_r = reverse_lazy('entities:resolve_ambigue_place', kwargs={'pk': str(p_pk), 'uri': obj['id'][7:]}) long = False if 'http://www.w3.org/2003/01/geo/wgs84_pos#long' in obj.keys(): long = float(obj['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value']) lat = float(obj['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']) elif 'long' in obj.keys(): long = float(obj['long'][0]['value']) lat = float(obj['lat'][0]['value']) short = True if long: popup = '' for k in obj.keys(): if k == 'id' or k == 'long' or k == 'lat': continue if not short or k.startswith('http'): title = k.split('#')[-1] else: title = k popup += '<b>{}:</b> {}<br/>'.format(title, obj[k][0]['value']) r = {"geometry": { "type": "Point", "coordinates": [long, lat] }, "type": "Feature", "properties": { "popupContent": """{} <b>Geonames:</b> <a href='{}'>Select this URI</a>""".format(popup, url_r) }, "id": url_r } return r else: return '' class NetJsonEdgeSerializer(serializers.BaseSerializer): def to_representation(self, obj): ent_obj = obj.__class__.__name__ ob_match = re.match(r'([A-Z][a-z]+)([A-Z][a-z]+)$', ent_obj) rel_a = 'related_' + ob_match.group(1).lower() rel_b = 'related_' + ob_match.group(2).lower() if rel_a == rel_b: rel_a += 'A' rel_b += 'B' r = { 'source': getattr(obj, rel_a).pk, 'target': getattr(obj, rel_b).pk, 'id': obj.pk, 'type': 'arrow', 'data': dict() } r['data']['start_date'] = obj.start_date_written r['data']['end_date'] = obj.end_date_written r['data']['relation_type'] = obj.relation_type.name return r class NetJsonNodeSerializer(serializers.BaseSerializer): def to_representation(self, obj): ent_obj = obj.__class__.__name__ ent_url = reverse_lazy('entities:generic_entities_edit_view', kwargs={'pk': str(obj.pk), 'entity': ent_obj.lower()}) tt = """<div class='arrow'></div> <div class='sigma-tooltip-header'>{}</div> <div class='sigma-tooltip-body'> <table> <tr><th>Type</th> <td>{}</td></tr> <tr><th>Entity</th> <td><a href='{}'>Link</a></td></tr> </table> <button class='small-button' onclick='expand_node("{}", {})'>expand</button> </div>""".format(str(obj), ent_obj, ent_url, ent_obj, obj.pk) r = { 'type': ent_obj.lower(), 'label': str(obj), 'id': obj.pk, 'tooltip': tt, 'data': dict()} r['data']['uri'] = [x.uri for x in obj.uri_set.all()] r['data']['collections'] = [x.name for x in obj.collection.all()] r['data']['notes'] = obj.notes r['data']['references'] = obj.references r['data']['start_date'] = obj.start_date_written r['data']['end_date'] = obj.end_date_written if ent_obj.lower() != 'person': if obj.kind: r['data']['kind'] = obj.kind.name if ent_obj.lower() == 'place': r['data']['lat'] = obj.lat r['data']['lon'] = obj.lng if ent_obj.lower() == 'person': r['data']['profession'] = [x.name for x in obj.profession.all()] if obj.gender: r['data']['gender'] = obj.gender return r
[ "re.match" ]
[((2769, 2817), 're.match', 're.match', (['"""([A-Z][a-z]+)([A-Z][a-z]+)$"""', 'ent_obj'], {}), "('([A-Z][a-z]+)([A-Z][a-z]+)$', ent_obj)\n", (2777, 2817), False, 'import re\n')]
#!/usr/bin/env python """ :copyright: (c) 2015 by <NAME> :license: MIT, see LICENSE for more details. """ import unittest import urllib from urlparse import ParseResult import requests import ninka class TestEndpoint(unittest.TestCase): def runTest(self): me = 'http://127.0.0.1:9999' r = ninka.discoverMicropubEndpoints(me) assert 'micropub' in r micropubURL = None for url in r['micropub']: micropubURL = url break assert micropubURL is not None assert micropubURL.scheme == 'http' assert micropubURL.netloc == '127.0.0.1:9999' assert micropubURL.path == '/micropub' # url = ParseResult(micropubURL.scheme, # micropubURL.netloc, # micropubURL.path, # micropubURL.params, # urllib.urlencode({ 'me': me, # 'redirect_uri': redirect_uri, # 'client_id': client_id, # 'scope': 'post', # 'response_type': 'id' # }), # micropubURL.fragment).geturl() # r = requests.get(url, verify=True) # assert r.status_code == 200 # POST /micropub HTTP/1.1 # Host: bear.im # Accept: */* # Authorization: Bearer 159d4823-de7d-4717-9e4b-401da86a413b # Content-Length: 30 # Content-Type: application/x-www-form-urlencoded # h=entry&content=test&slug=test
[ "ninka.discoverMicropubEndpoints" ]
[((313, 348), 'ninka.discoverMicropubEndpoints', 'ninka.discoverMicropubEndpoints', (['me'], {}), '(me)\n', (344, 348), False, 'import ninka\n')]
import unittest from unittest import mock from kafka.errors import NoBrokersAvailable from ..producer import Producer class ProducerTest(unittest.TestCase): @mock.patch("kafka_postgres.kafka_helper.producer.KafkaProducer") def test_failed_to_connect(self, mock_kafka_producer): mock_kafka_producer.side_effect = NoBrokersAvailable kf_producer = Producer("192.168.99.100:9092", "topic", 10) self.assertEqual(False, kf_producer.connect()) self.assertEqual(False, kf_producer.connected) @mock.patch("kafka_postgres.kafka_helper.consumer.KafkaConsumer") def test_connected_successfully(self, mock_kafka_producer): mock_kafka_producer.return_value = mock.Mock() kf_producer = Producer("192.168.99.100:9092", "topic", 10) self.assertEqual(True, kf_producer.connect()) self.assertEqual(True, kf_producer.connected) if __name__ == '__main__': unittest.main()
[ "unittest.mock.patch", "unittest.main", "unittest.mock.Mock" ]
[((167, 231), 'unittest.mock.patch', 'mock.patch', (['"""kafka_postgres.kafka_helper.producer.KafkaProducer"""'], {}), "('kafka_postgres.kafka_helper.producer.KafkaProducer')\n", (177, 231), False, 'from unittest import mock\n'), ((535, 599), 'unittest.mock.patch', 'mock.patch', (['"""kafka_postgres.kafka_helper.consumer.KafkaConsumer"""'], {}), "('kafka_postgres.kafka_helper.consumer.KafkaConsumer')\n", (545, 599), False, 'from unittest import mock\n'), ((929, 944), 'unittest.main', 'unittest.main', ([], {}), '()\n', (942, 944), False, 'import unittest\n'), ((707, 718), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (716, 718), False, 'from unittest import mock\n')]
# # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse def init_args(): parser = argparse.ArgumentParser('') parser.add_argument('-s', '--src_ann', type=str, default='annotation.txt', help='path to original annotation ') parser.add_argument('-o', '--dst_ann', type=str, default='processed_annotation.txt', help='path to filtered annotation') return parser.parse_args() def is_valid_char(ch): ch_ord = ord(ch) ord_0 = ord('0') ord_9 = ord('9') ord_a = ord('a') ord_z = ord('z') if (ch_ord>=ord_0 and ch_ord<=ord_9) or (ch_ord>=ord_a and ch_ord<=ord_z): return True else: return False def get_abnormal_list(ann_list): abn_list = [] for ann in ann_list: label = ann.split(',')[1] label = label.strip().lower() if len(label)<3: abn_list.append(ann) continue for l in label: flag = is_valid_char(l) if not flag: abn_list.append(ann) #print(ann) break print("number of abnormal annotation :", len(abn_list)) return abn_list def filter(): args = init_args() ann_file = open(args.src_ann,'r') annotation_list = [line.strip("\n") for line in ann_file.readlines()] ann_file.close() abn_list = get_abnormal_list(annotation_list) clean_list = [line for line in annotation_list if line not in abn_list] print("number of annotation after filtering :{}".format(len(clean_list))) output = args.dst_ann with open(output,'w') as f: for line in clean_list: line = line +'\n' f.write(line) if __name__=="__main__": filter()
[ "argparse.ArgumentParser" ]
[((1331, 1358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['""""""'], {}), "('')\n", (1354, 1358), False, 'import argparse\n')]
from src.holdings_publish_service import * from src.holding import Holding from src.holding_schema import HoldingSchema import src.sns_client from decimal import Decimal import json holdings_url = 'https://www.xyz.com/bla?foo=bar' topic_arn = 'arn:dummy-sns-topic' holding1 = Holding(**{ "name": "Commonwealth Bank of Australia", "symbol": "CBA", "sector": "Diversified Banks", "market_val_percent": Decimal('8.25647'), "market_value": Decimal('1041683340.81'), "number_of_shares": Decimal('14389879') }) holding2 = Holding(**{ "name": "BHP Group Ltd.", "symbol": "BHP", "sector": "Diversified Metals & Mining", "market_val_percent": Decimal('6.52946'), "market_value": Decimal('823793522.37'), "number_of_shares": Decimal('24066419') }) holdings = [holding1, holding2] message1 = HoldingSchema().dumps(holding1) message2 = HoldingSchema().dumps(holding2) def test_init_set_holdings_url_attribute(mocker): service = HoldingsPublishService(holdings_url, topic_arn) assert service.holdings_url == holdings_url def test_init_set_topic_arn_attribute(mocker): service = HoldingsPublishService(holdings_url, topic_arn) assert service.topic_arn == topic_arn def test_publish_retrives_holdings_using_the_repository(mocker): mocked_get = mock_holdings_repo(mocker) service = HoldingsPublishService(holdings_url, topic_arn) service.publish() mocked_get.assert_called_once_with(holdings_url) def test_publish_creates_one_sns_client(mocker): mock_holdings_repo(mocker) mocked_client_class, _ = mock_sns_topic(mocker) service = HoldingsPublishService(holdings_url, topic_arn) service.publish() mocked_client_class.assert_called_once_with(topic_arn) def test_publish_setup_the_sns_client(mocker): mock_holdings_repo(mocker) _, mocked_client = mock_sns_topic(mocker) service = HoldingsPublishService(holdings_url, topic_arn) service.publish() mocked_client.__enter__.assert_called_once() def test_publish_publishes_the_retrieved_holdings_to_sns(mocker): mock_holdings_repo(mocker, holdings=holdings) _, mocked_client = mock_sns_topic(mocker) service = HoldingsPublishService(holdings_url, topic_arn) service.publish() mocked_client.publish.assert_any_call(message1) mocked_client.publish.assert_any_call(message2) assert mocked_client.publish.call_count == 2 def mock_holdings_repo(mocker, holdings = []): return mocker.patch( 'src.holdings_repository.get', autospec=True, return_value=holdings ) def mock_sns_topic(mocker): mocked_client_class = mocker.patch('src.sns_client.SnsClient', autospec=True) mocked_client = mocked_client_class.return_value mocked_client.__enter__.return_value = mocked_client return (mocked_client_class, mocked_client)
[ "src.holding_schema.HoldingSchema", "decimal.Decimal" ]
[((830, 845), 'src.holding_schema.HoldingSchema', 'HoldingSchema', ([], {}), '()\n', (843, 845), False, 'from src.holding_schema import HoldingSchema\n'), ((873, 888), 'src.holding_schema.HoldingSchema', 'HoldingSchema', ([], {}), '()\n', (886, 888), False, 'from src.holding_schema import HoldingSchema\n'), ((417, 435), 'decimal.Decimal', 'Decimal', (['"""8.25647"""'], {}), "('8.25647')\n", (424, 435), False, 'from decimal import Decimal\n'), ((457, 481), 'decimal.Decimal', 'Decimal', (['"""1041683340.81"""'], {}), "('1041683340.81')\n", (464, 481), False, 'from decimal import Decimal\n'), ((507, 526), 'decimal.Decimal', 'Decimal', (['"""14389879"""'], {}), "('14389879')\n", (514, 526), False, 'from decimal import Decimal\n'), ((675, 693), 'decimal.Decimal', 'Decimal', (['"""6.52946"""'], {}), "('6.52946')\n", (682, 693), False, 'from decimal import Decimal\n'), ((715, 738), 'decimal.Decimal', 'Decimal', (['"""823793522.37"""'], {}), "('823793522.37')\n", (722, 738), False, 'from decimal import Decimal\n'), ((764, 783), 'decimal.Decimal', 'Decimal', (['"""24066419"""'], {}), "('24066419')\n", (771, 783), False, 'from decimal import Decimal\n')]
from django.urls import path from . import views app_name = 'twilioconfig' urlpatterns = [ path('receive/', views.receive, name='receive'), path('config/', views.configure, name='configure'), path('update/', views.updateNumbers, name='update_numbers'), path('obtain/', views.obtain_number, name='obtain_number'), ]
[ "django.urls.path" ]
[((97, 144), 'django.urls.path', 'path', (['"""receive/"""', 'views.receive'], {'name': '"""receive"""'}), "('receive/', views.receive, name='receive')\n", (101, 144), False, 'from django.urls import path\n'), ((150, 200), 'django.urls.path', 'path', (['"""config/"""', 'views.configure'], {'name': '"""configure"""'}), "('config/', views.configure, name='configure')\n", (154, 200), False, 'from django.urls import path\n'), ((206, 265), 'django.urls.path', 'path', (['"""update/"""', 'views.updateNumbers'], {'name': '"""update_numbers"""'}), "('update/', views.updateNumbers, name='update_numbers')\n", (210, 265), False, 'from django.urls import path\n'), ((271, 329), 'django.urls.path', 'path', (['"""obtain/"""', 'views.obtain_number'], {'name': '"""obtain_number"""'}), "('obtain/', views.obtain_number, name='obtain_number')\n", (275, 329), False, 'from django.urls import path\n')]
from __future__ import print_function, division import numpy as np import imgaug as ia from imgaug import augmenters as iaa def main(): quokka = ia.quokka(size=0.5) h, w = quokka.shape[0:2] heatmap = np.zeros((h, w), dtype=np.float32) heatmap[70:120, 90:150] = 0.1 heatmap[30:70, 50:65] = 0.5 heatmap[20:50, 55:85] = 1.0 heatmap[120:140, 0:20] = 0.75 heatmaps = ia.HeatmapsOnImage(heatmap[..., np.newaxis], quokka.shape) print("Affine...") aug = iaa.Affine(translate_px={"x": 20}, mode="constant", cval=128) quokka_aug = aug.augment_image(quokka) heatmaps_aug = aug.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("Affine with mode=edge...") aug = iaa.Affine(translate_px={"x": 20}, mode="edge") quokka_aug = aug.augment_image(quokka) heatmaps_aug = aug.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("PiecewiseAffine...") aug = iaa.PiecewiseAffine(scale=0.04) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("PerspectiveTransform...") aug = iaa.PerspectiveTransform(scale=0.04) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("ElasticTransformation alpha=3, sig=0.5...") aug = iaa.ElasticTransformation(alpha=3.0, sigma=0.5) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("ElasticTransformation alpha=10, sig=3...") aug = iaa.ElasticTransformation(alpha=10.0, sigma=3.0) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("CopAndPad mode=constant...") aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="constant", pad_cval=128) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("CopAndPad mode=constant + percent...") aug = iaa.CropAndPad(percent=(-0.05, 0.05, 0.1, -0.1), pad_mode="constant", pad_cval=128) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("CropAndPad mode=edge...") aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="edge") aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) print("Scale...") aug = iaa.Scale(0.5, interpolation="nearest") aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow(ia.draw_grid([heatmaps_drawn[0], heatmaps_aug_drawn[0]], cols=2)) print("Alpha...") aug = iaa.Alpha(0.7, iaa.Affine(rotate=20)) aug_det = aug.to_deterministic() quokka_aug = aug_det.augment_image(quokka) heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0] heatmaps_drawn = heatmaps.draw_on_image(quokka) heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug) ia.imshow( np.hstack([ heatmaps_drawn[0], heatmaps_aug_drawn[0] ]) ) if __name__ == "__main__": main()
[ "imgaug.HeatmapsOnImage", "imgaug.augmenters.ElasticTransformation", "imgaug.draw_grid", "imgaug.quokka", "numpy.zeros", "numpy.hstack", "imgaug.augmenters.Affine", "imgaug.augmenters.PerspectiveTransform", "imgaug.augmenters.CropAndPad", "imgaug.augmenters.Scale", "imgaug.augmenters.PiecewiseAffine" ]
[((153, 172), 'imgaug.quokka', 'ia.quokka', ([], {'size': '(0.5)'}), '(size=0.5)\n', (162, 172), True, 'import imgaug as ia\n'), ((216, 250), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.float32'}), '((h, w), dtype=np.float32)\n', (224, 250), True, 'import numpy as np\n'), ((399, 457), 'imgaug.HeatmapsOnImage', 'ia.HeatmapsOnImage', (['heatmap[..., np.newaxis]', 'quokka.shape'], {}), '(heatmap[..., np.newaxis], quokka.shape)\n', (417, 457), True, 'import imgaug as ia\n'), ((492, 553), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'translate_px': "{'x': 20}", 'mode': '"""constant"""', 'cval': '(128)'}), "(translate_px={'x': 20}, mode='constant', cval=128)\n", (502, 553), True, 'from imgaug import augmenters as iaa\n'), ((935, 982), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'translate_px': "{'x': 20}", 'mode': '"""edge"""'}), "(translate_px={'x': 20}, mode='edge')\n", (945, 982), True, 'from imgaug import augmenters as iaa\n'), ((1358, 1389), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.04)'}), '(scale=0.04)\n', (1377, 1389), True, 'from imgaug import augmenters as iaa\n'), ((1815, 1851), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.04)'}), '(scale=0.04)\n', (1839, 1851), True, 'from imgaug import augmenters as iaa\n'), ((2295, 2342), 'imgaug.augmenters.ElasticTransformation', 'iaa.ElasticTransformation', ([], {'alpha': '(3.0)', 'sigma': '(0.5)'}), '(alpha=3.0, sigma=0.5)\n', (2320, 2342), True, 'from imgaug import augmenters as iaa\n'), ((2785, 2833), 'imgaug.augmenters.ElasticTransformation', 'iaa.ElasticTransformation', ([], {'alpha': '(10.0)', 'sigma': '(3.0)'}), '(alpha=10.0, sigma=3.0)\n', (2810, 2833), True, 'from imgaug import augmenters as iaa\n'), ((3262, 3334), 'imgaug.augmenters.CropAndPad', 'iaa.CropAndPad', ([], {'px': '(-10, 10, 15, -15)', 'pad_mode': '"""constant"""', 'pad_cval': '(128)'}), "(px=(-10, 10, 15, -15), pad_mode='constant', pad_cval=128)\n", (3276, 3334), True, 'from imgaug import augmenters as iaa\n'), ((3773, 3860), 'imgaug.augmenters.CropAndPad', 'iaa.CropAndPad', ([], {'percent': '(-0.05, 0.05, 0.1, -0.1)', 'pad_mode': '"""constant"""', 'pad_cval': '(128)'}), "(percent=(-0.05, 0.05, 0.1, -0.1), pad_mode='constant',\n pad_cval=128)\n", (3787, 3860), True, 'from imgaug import augmenters as iaa\n'), ((4282, 4336), 'imgaug.augmenters.CropAndPad', 'iaa.CropAndPad', ([], {'px': '(-10, 10, 15, -15)', 'pad_mode': '"""edge"""'}), "(px=(-10, 10, 15, -15), pad_mode='edge')\n", (4296, 4336), True, 'from imgaug import augmenters as iaa\n'), ((4747, 4786), 'imgaug.augmenters.Scale', 'iaa.Scale', (['(0.5)'], {'interpolation': '"""nearest"""'}), "(0.5, interpolation='nearest')\n", (4756, 4786), True, 'from imgaug import augmenters as iaa\n'), ((792, 845), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (801, 845), True, 'import numpy as np\n'), ((1221, 1274), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (1230, 1274), True, 'import numpy as np\n'), ((1673, 1726), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (1682, 1726), True, 'import numpy as np\n'), ((2135, 2188), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (2144, 2188), True, 'import numpy as np\n'), ((2626, 2679), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (2635, 2679), True, 'import numpy as np\n'), ((3117, 3170), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (3126, 3170), True, 'import numpy as np\n'), ((3618, 3671), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (3627, 3671), True, 'import numpy as np\n'), ((4140, 4193), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (4149, 4193), True, 'import numpy as np\n'), ((4620, 4673), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (4629, 4673), True, 'import numpy as np\n'), ((5061, 5125), 'imgaug.draw_grid', 'ia.draw_grid', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {'cols': '(2)'}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]], cols=2)\n', (5073, 5125), True, 'import imgaug as ia\n'), ((5175, 5196), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(20)'}), '(rotate=20)\n', (5185, 5196), True, 'from imgaug import augmenters as iaa\n'), ((5481, 5534), 'numpy.hstack', 'np.hstack', (['[heatmaps_drawn[0], heatmaps_aug_drawn[0]]'], {}), '([heatmaps_drawn[0], heatmaps_aug_drawn[0]])\n', (5490, 5534), True, 'import numpy as np\n')]
import unittest from mygrations.helpers.dotenv import dotenv import io import tempfile class test_dotenv_get_contents(unittest.TestCase): dotenv = None test_string = 'test string' def setUp(self): self.dotenv = dotenv() # get_contents should accept a number of parameters. # It should accept a stringIO wrapper def test_get_contents_stringIO(self): self.assertEquals(self.test_string, self.dotenv.get_contents(io.StringIO(self.test_string))) # it should also accept an actual string def test_get_contents_string(self): self.assertEquals(self.test_string, self.dotenv.get_contents(self.test_string)) # as well as a more general file pointer def test_get_contents_fp(self): fp = tempfile.TemporaryFile() fp.write(self.test_string.encode(encoding='UTF-8')) fp.seek(0) self.assertEquals(self.test_string, self.dotenv.get_contents(fp)) fp.close() # it should also accept a filename def test_get_contents_filename(self): filename = '%s/unit_mygrations_dotenv' % tempfile.gettempdir() fp = open(filename, 'w') fp.write(self.test_string) fp.close() self.assertEquals(self.test_string, self.dotenv.get_contents(filename))
[ "mygrations.helpers.dotenv.dotenv", "tempfile.TemporaryFile", "io.StringIO", "tempfile.gettempdir" ]
[((235, 243), 'mygrations.helpers.dotenv.dotenv', 'dotenv', ([], {}), '()\n', (241, 243), False, 'from mygrations.helpers.dotenv import dotenv\n'), ((759, 783), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (781, 783), False, 'import tempfile\n'), ((1088, 1109), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1107, 1109), False, 'import tempfile\n'), ((456, 485), 'io.StringIO', 'io.StringIO', (['self.test_string'], {}), '(self.test_string)\n', (467, 485), False, 'import io\n')]
import zmq from functools import wraps import pickle import joblib import time class TaskZMQ(): def __init__(self): self.context = zmq.Context() self.taskdb = {} self.server = "tcp://127.0.0.1:5555" def create_socket(self, *arg): return self.context.socket(*arg) def task(self, fn): self.taskdb[fn.__name__] = fn @wraps(fn) def delayed(*arg,**kwarg): socket = self.create_socket(zmq.PUSH) socket.connect(self.server) task = {'task':fn.__name__, 'arg':arg, 'kwarg':kwarg} socket.send(pickle.dumps(task)) socket.close() return task fn.delayed = delayed return fn def start_server(self): socket = self.create_socket(zmq.PULL) socket.bind(self.server) print('START SERVER @%s'%self.server) print('TASK LIST: ', ', '.join(self.taskdb.keys())) while True: try: message = socket.recv() msg = pickle.loads(message) a = time.perf_counter() print('Executing: ', msg['task']) res = self.taskdb[msg['task']](*msg['arg'],**msg['kwarg']) print('Finished {task} in {time:.6f}s return: {res}'.format(task=msg['task'], time = time.perf_counter()-a, res = res) ) except KeyboardInterrupt: print("W: interrupt received, stopping…") break socket.close() self.context.term()
[ "pickle.loads", "time.perf_counter", "functools.wraps", "zmq.Context", "pickle.dumps" ]
[((149, 162), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (160, 162), False, 'import zmq\n'), ((394, 403), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (399, 403), False, 'from functools import wraps\n'), ((619, 637), 'pickle.dumps', 'pickle.dumps', (['task'], {}), '(task)\n', (631, 637), False, 'import pickle\n'), ((1054, 1075), 'pickle.loads', 'pickle.loads', (['message'], {}), '(message)\n', (1066, 1075), False, 'import pickle\n'), ((1097, 1116), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1114, 1116), False, 'import time\n'), ((1343, 1362), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1360, 1362), False, 'import time\n')]
from flask import Blueprint # Create blueprint for movies module mod_movies = Blueprint('movies', __name__) from imdb_rest.movies.movies import *
[ "flask.Blueprint" ]
[((80, 109), 'flask.Blueprint', 'Blueprint', (['"""movies"""', '__name__'], {}), "('movies', __name__)\n", (89, 109), False, 'from flask import Blueprint\n')]
# -*- coding: utf-8 -*- from cdn_static_website.settings.components import BASE_DIR, config # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Security # https://docs.djangoproject.com/en/3.2/topics/security/ # https://docs.djangoproject.com/en/3.2/ref/settings/#secret-key SECRET_KEY = config('DJANGO_SECRET_KEY') # https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-httponly SESSION_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-cookie-httponly CSRF_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/3.2/ref/settings/#secure-content-type-nosniff SECURE_CONTENT_TYPE_NOSNIFF = True # https://docs.djangoproject.com/en/3.2/ref/settings/#secure-browser-xss-filter SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # https://github.com/DmytroLitvinov/django-http-referrer-policy # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#Syntax REFERRER_POLICY = 'same-origin' # https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-use-sessions # Whether to store the CSRF token in the user’s session instead of in a cookie. # It requires the use of django.contrib.sessions. # Storing the CSRF token in a cookie (Django’s default) is safe,but storing it in the session # is common practice in other web frameworks and therefore sometimes demanded by security auditors. CSRF_USE_SESSIONS = True SECURE_HSTS_SECONDS = 31536000 # the same as Caddy has SECURE_HSTS_INCLUDE_SUBDOMAINS = True SECURE_HSTS_PRELOAD = True SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') SECURE_SSL_REDIRECT = True SECURE_REDIRECT_EXEMPT = [ # This is required for healthcheck to work: '^health/', ] SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True
[ "cdn_static_website.settings.components.config" ]
[((770, 797), 'cdn_static_website.settings.components.config', 'config', (['"""DJANGO_SECRET_KEY"""'], {}), "('DJANGO_SECRET_KEY')\n", (776, 797), False, 'from cdn_static_website.settings.components import BASE_DIR, config\n')]
from flask import render_template, redirect, url_for, flash, request from flask_login import login_user, logout_user, current_user, login_required from portunus import db from portunus.auth import bp from portunus.auth.forms import LoginForm, RegistrationForm, UpdateAccountForm from portunus.models import User @bp.route("/register", methods=["GET", "POST"]) def register(): if current_user.is_authenticated: return redirect(url_for("main.index")) form = RegistrationForm() if form.validate_on_submit(): user = User(username=form.username.data, email=form.email.data,) user.set_password(form.password.data) db.session.add(user) db.session.commit() flash("Your account has been created. You are now able to login!", "success") return redirect(url_for("auth.login")) return render_template("auth/register.html", title="Register", form=form) @bp.route("/login", methods=["GET", "POST"]) def login(): if current_user.is_authenticated: return redirect(url_for("main.index")) form = LoginForm() if form.validate_on_submit(): # Check if any user exists with the submitted email. # If such a user exists and the passwords match, log them in. user = User.query.filter_by(email=form.email.data).first() if user and user.check_password(form.password.data): login_user(user, remember=form.remember.data) next_page = request.args.get("next") return redirect(next_page if next_page else url_for("main.index")) else: flash("Login unsuccessful. Please check Email and Password.", "danger") return render_template("auth/login.html", title="Login", form=form) @bp.route("/logout") def logout(): logout_user() return redirect(url_for("main.index")) # TODO(mrdmnd) - make this more like "edit_profile" and have a route for /user/<username> that anyone can click on to see details about each person # go to https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-vi-profile-page-and-avatars and scroll to Profile Editor @bp.route("/update_account", methods=["GET", "POST"]) @login_required def update_account(): form = UpdateAccountForm() if form.validate_on_submit(): if not current_user.check_password(form.current_password.data): flash("Your password was incorrect.", "danger") return render_template( "update_account.html", title="Update Account", form=form ) # Update current username + email current_user.username = form.new_username.data current_user.email = form.new_email.data current_user.biography = form.new_biography.data # Set new password only if they fill the field in. if form.new_password.data: current_user.set_password(form.new_password.data) db.session.commit() flash("Your account has been updated.", "success") return redirect(url_for("auth.update_account")) elif request.method == "GET": form.new_username.data = current_user.username form.new_email.data = current_user.email form.new_biography.data = current_user.biography return render_template("update_account.html", title="Update Account", form=form)
[ "portunus.auth.forms.LoginForm", "portunus.db.session.add", "portunus.auth.bp.route", "portunus.auth.forms.UpdateAccountForm", "flask.flash", "flask.request.args.get", "flask_login.login_user", "flask_login.logout_user", "portunus.models.User", "flask.url_for", "portunus.db.session.commit", "flask.render_template", "flask_login.current_user.check_password", "flask_login.current_user.set_password", "portunus.auth.forms.RegistrationForm", "portunus.models.User.query.filter_by" ]
[((315, 361), 'portunus.auth.bp.route', 'bp.route', (['"""/register"""'], {'methods': "['GET', 'POST']"}), "('/register', methods=['GET', 'POST'])\n", (323, 361), False, 'from portunus.auth import bp\n'), ((918, 961), 'portunus.auth.bp.route', 'bp.route', (['"""/login"""'], {'methods': "['GET', 'POST']"}), "('/login', methods=['GET', 'POST'])\n", (926, 961), False, 'from portunus.auth import bp\n'), ((1735, 1754), 'portunus.auth.bp.route', 'bp.route', (['"""/logout"""'], {}), "('/logout')\n", (1743, 1754), False, 'from portunus.auth import bp\n'), ((2112, 2164), 'portunus.auth.bp.route', 'bp.route', (['"""/update_account"""'], {'methods': "['GET', 'POST']"}), "('/update_account', methods=['GET', 'POST'])\n", (2120, 2164), False, 'from portunus.auth import bp\n'), ((474, 492), 'portunus.auth.forms.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (490, 492), False, 'from portunus.auth.forms import LoginForm, RegistrationForm, UpdateAccountForm\n'), ((848, 914), 'flask.render_template', 'render_template', (['"""auth/register.html"""'], {'title': '"""Register"""', 'form': 'form'}), "('auth/register.html', title='Register', form=form)\n", (863, 914), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1071, 1082), 'portunus.auth.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (1080, 1082), False, 'from portunus.auth.forms import LoginForm, RegistrationForm, UpdateAccountForm\n'), ((1671, 1731), 'flask.render_template', 'render_template', (['"""auth/login.html"""'], {'title': '"""Login"""', 'form': 'form'}), "('auth/login.html', title='Login', form=form)\n", (1686, 1731), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1773, 1786), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (1784, 1786), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((2214, 2233), 'portunus.auth.forms.UpdateAccountForm', 'UpdateAccountForm', ([], {}), '()\n', (2231, 2233), False, 'from portunus.auth.forms import LoginForm, RegistrationForm, UpdateAccountForm\n'), ((3233, 3306), 'flask.render_template', 'render_template', (['"""update_account.html"""'], {'title': '"""Update Account"""', 'form': 'form'}), "('update_account.html', title='Update Account', form=form)\n", (3248, 3306), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((542, 598), 'portunus.models.User', 'User', ([], {'username': 'form.username.data', 'email': 'form.email.data'}), '(username=form.username.data, email=form.email.data)\n', (546, 598), False, 'from portunus.models import User\n'), ((654, 674), 'portunus.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (668, 674), False, 'from portunus import db\n'), ((683, 702), 'portunus.db.session.commit', 'db.session.commit', ([], {}), '()\n', (700, 702), False, 'from portunus import db\n'), ((711, 788), 'flask.flash', 'flash', (['"""Your account has been created. You are now able to login!"""', '"""success"""'], {}), "('Your account has been created. You are now able to login!', 'success')\n", (716, 788), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1807, 1828), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (1814, 1828), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((2891, 2910), 'portunus.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2908, 2910), False, 'from portunus import db\n'), ((2919, 2969), 'flask.flash', 'flash', (['"""Your account has been updated."""', '"""success"""'], {}), "('Your account has been updated.', 'success')\n", (2924, 2969), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((440, 461), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (447, 461), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((813, 834), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (820, 834), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1037, 1058), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (1044, 1058), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1388, 1433), 'flask_login.login_user', 'login_user', (['user'], {'remember': 'form.remember.data'}), '(user, remember=form.remember.data)\n', (1398, 1433), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((1458, 1482), 'flask.request.args.get', 'request.args.get', (['"""next"""'], {}), "('next')\n", (1474, 1482), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1588, 1659), 'flask.flash', 'flash', (['"""Login unsuccessful. Please check Email and Password."""', '"""danger"""'], {}), "('Login unsuccessful. Please check Email and Password.', 'danger')\n", (1593, 1659), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((2283, 2338), 'flask_login.current_user.check_password', 'current_user.check_password', (['form.current_password.data'], {}), '(form.current_password.data)\n', (2310, 2338), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((2352, 2399), 'flask.flash', 'flash', (['"""Your password was incorrect."""', '"""danger"""'], {}), "('Your password was incorrect.', 'danger')\n", (2357, 2399), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((2419, 2492), 'flask.render_template', 'render_template', (['"""update_account.html"""'], {'title': '"""Update Account"""', 'form': 'form'}), "('update_account.html', title='Update Account', form=form)\n", (2434, 2492), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((2833, 2882), 'flask_login.current_user.set_password', 'current_user.set_password', (['form.new_password.data'], {}), '(form.new_password.data)\n', (2858, 2882), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((2994, 3024), 'flask.url_for', 'url_for', (['"""auth.update_account"""'], {}), "('auth.update_account')\n", (3001, 3024), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1263, 1306), 'portunus.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'form.email.data'}), '(email=form.email.data)\n', (1283, 1306), False, 'from portunus.models import User\n'), ((1539, 1560), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (1546, 1560), False, 'from flask import render_template, redirect, url_for, flash, request\n')]
import requests def return_data_zip_code(zipcode): response = requests.get('https://viacep.com.br/ws/{}/json/' .format(zipcode)) print(response.status_code) print(response.json()) print(type(response.json())) data_zip_code = response.json() print(data_zip_code['logradouro']) print(data_zip_code['complemento']) return data_zip_code def return_data_pokemon(pokemon): response = requests.get('https://pokeapi.co/api/v2/pokemon/{}/' .format(pokemon)) data_pokemon = response.json() return data_pokemon def return_response(url): response = requests.get(url) return response.text if __name__ == '__main__': response = return_response('https://www.mdpi.com/journal/ai') print(response) # return_data_zip_code('01001000') # data_pokemon = return_data_pokemon('pikachu') # print(data_pokemon['sprites']['front_shiny'])
[ "requests.get" ]
[((591, 608), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (603, 608), False, 'import requests\n')]
import csv import pytest from telemetry_loader.streams.core import consume from telemetry_loader.streams.core import stream from telemetry_loader.streams.pipes import csv_pipe from telemetry_loader.streams.pipes import json_pipe pytestmark = pytest.mark.asyncio async def test_dict_reader(): run, _ = stream([b'f1,f2\n', b'v1.1,v1.2\n', b'v2.1,v2.2\n']) | csv_pipe(csv.DictReader) | consume assert [dict(line) for line in await run()] == [{'f1': 'v1.1', 'f2': 'v1.2'}, {'f1': 'v2.1', 'f2': 'v2.2'}] async def test_json_pipe(): run, _ = stream([b'{"a": 1}\n', b'{"b": 2}\n']) | json_pipe() | consume assert [line for line in await run()] == [{'a': 1}, {'b': 2}]
[ "telemetry_loader.streams.core.stream", "telemetry_loader.streams.pipes.json_pipe", "telemetry_loader.streams.pipes.csv_pipe" ]
[((310, 362), 'telemetry_loader.streams.core.stream', 'stream', (["[b'f1,f2\\n', b'v1.1,v1.2\\n', b'v2.1,v2.2\\n']"], {}), "([b'f1,f2\\n', b'v1.1,v1.2\\n', b'v2.1,v2.2\\n'])\n", (316, 362), False, 'from telemetry_loader.streams.core import stream\n'), ((365, 389), 'telemetry_loader.streams.pipes.csv_pipe', 'csv_pipe', (['csv.DictReader'], {}), '(csv.DictReader)\n', (373, 389), False, 'from telemetry_loader.streams.pipes import csv_pipe\n'), ((555, 593), 'telemetry_loader.streams.core.stream', 'stream', (['[b\'{"a": 1}\\n\', b\'{"b": 2}\\n\']'], {}), '([b\'{"a": 1}\\n\', b\'{"b": 2}\\n\'])\n', (561, 593), False, 'from telemetry_loader.streams.core import stream\n'), ((596, 607), 'telemetry_loader.streams.pipes.json_pipe', 'json_pipe', ([], {}), '()\n', (605, 607), False, 'from telemetry_loader.streams.pipes import json_pipe\n')]
import pytest import numpy as np import pclpy def test_eigen_vectorxf(): a = np.array([1, 1, 1, 1], "f") vec = pclpy.pcl.vectors.VectorXf(a) assert np.allclose(np.array(vec), a)
[ "numpy.array", "pclpy.pcl.vectors.VectorXf" ]
[((84, 111), 'numpy.array', 'np.array', (['[1, 1, 1, 1]', '"""f"""'], {}), "([1, 1, 1, 1], 'f')\n", (92, 111), True, 'import numpy as np\n'), ((122, 151), 'pclpy.pcl.vectors.VectorXf', 'pclpy.pcl.vectors.VectorXf', (['a'], {}), '(a)\n', (148, 151), False, 'import pclpy\n'), ((175, 188), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (183, 188), True, 'import numpy as np\n')]
import requests import logging import telegram import time import info from bs4 import BeautifulSoup from movie import imdbMovie from vocabulary import Vocab from weather import weathers from horoscope import horoscope from reddit_meme import reddit from pandemic_new import hot_corona ######################################33 from telegram.ext import Updater from telegram.ext import CommandHandler from telegram import ChatMember from telegram import Message from telegram import Chat from telegram import User ################################################ from selenium import webdriver from selenium.webdriver.common.keys import Keys class functs: def __init__(self,updater,dp): self.forbidden_words={"sex","haksim"} self.counter=0 self.url=info.url self.token=info.token self.updater=updater self.dp=dp self.bot = telegram.Bot(token=self.token) logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) def currency(self,update,context): result=requests.get('https://api.exchangeratesapi.io/latest?base=USD') result=result.json() response=result['rates'] key=update.message.text # /currency TRY,USD,10 cur_list=key.lstrip("/currency ") cur_list=cur_list.split(" ") _result=float(cur_list[2])*(float(response[cur_list[0]])/(float(response[cur_list[1]]))) time.sleep(2) _result=round(_result,2) context.bot.send_message(chat_id=update.effective_chat.id,text=str(_result)+" "+cur_list[0]+" 💰") def youtube(self,update,context): _url="https://www.youtube.com/" _msg=update.message.text msg_lst=_msg.split(" ") msg=msg_lst[1] driver=webdriver.Firefox() new_url=f"{_url}results?search_query={msg}" driver.get(new_url) elem=driver.find_element_by_xpath("//*[@id='thumbnail']").get_attribute("href") time.sleep(1) context.bot.send_message(chat_id=update.effective_chat.id,text=elem) def movie(self,update,context): chat_message=update.message.text _list=chat_message.split("") _list.remove("/movie") listo=[i.lower().capitalize() for i in _list] x=imdbMovie("".join(listo)).get_movie() context.bot.send_message(chat_id=update.effective_chat.id,text=x) def word(self,update,context): try: chat_message=update.message.text chat_message.lower().capitalize() x=Vocab(chat_message).mean() context.bot.send_message(chat_id=update.effective_chat.id,text=x) except KeyError: context.bot.send_message(chat_id=update.effective_chat.id,text="İnvaild Syntax :(") def weather(self,update,context): try: chat_message=update.message.text _lst=chat_message.split("") _lst.remove("/weather") listo=[i.lower().capitalize() for i in _lst] _=weathers(" ".join(listo)).get_location() context.bot.send_message(chat_id=update.effective_chat.id,text=_) except KeyError: context.bot.send_message(chat_id=update.effective_chat.id,text="İnvaild Syntax :(") def horos(self,update,context): try: chat_message=update.message.text _lst=chat_message.split(" ") _lst.remove("/horos") listo=[i.lower().capitalize() for i in _lst] _=horoscope(" ".join(listo)).get_daily() context.bot.send_message(chat_id=update.effective_chat.id,text=_) except KeyError: context.bot.send_message(chat_id=update.effective_chat.id,text="Arıes,Taurus,Gemını,Cancer,Leo,Virgo,Libra,Scorpio,Sagittarius,Capricorn,aquarius,pısces") def meme(self,update,context): _=reddit(info.password,info.username).get_meme() context.bot.send_photo(chat_id=update.effective_chat.id,photo=_) def gif(self,update,context): _=reddit(info.password,info.username).get_gif() context.bot.send_video(chat_id=update.effective_chat.id,video=_) def pandemic(self,update,context): chat_message=update.message.text _lst=chat_message.split(" ") _lst.remove("/pandemic") listo=[i.lower().capitalize() for i in _lst] _content="".join(listo) if 4 > len(_content) > 1: _context = _content.upper() if len(_lst)<1: _=hot_corona().get_country_case("World") else: _=hot_corona().get_country_case(_context) context.bot.send_message(chat_id=update.effective_chat.id,text=_) def corona(self,update,context): chat_message=update.message _txt=chat_message.text a=_txt.split(" ") a.remove("/corona") hot_corona().plot("".join(a)) _photo=hot_corona().upload_cloud() time.sleep(5) context.bot.send_photo( chat_id=update.effective_chat.id, photo=_photo) def kick(self,update,context): message=update.message reply=message.reply_to_message reply_id=reply.from_user.id reply_name=reply.from_user.name try: context.bot.send_message(chat_id=update.effective_chat.id,text=reply_name+","+"KICK!") context.bot.send_photo(chat_id=update.effective_chat.id,photo=info.coffin1) self.bot.kick_chat_member(update.effective_chat.id,reply_id) except: context.bot.send_message(chat_id=update.effective_chat.id,text=reply_name+","+"KICK!") context.bot.send_photo(chat_id=update.effective_chat.id,photo=info.coffin1) def github(self,update,context): context.bot.send_message(chat_id=update.effective_chat.id,text=info._github) def help(self,update,context): context.bot.send_message(chat_id=update.effective_chat.id,text=info._help) class group(functs): def __init__(self,updater,dp): super().__init__(updater,dp) def welcome_member(self,update,context): message=update.message user=message.new_chat_members[0] user_name=user.username name=message.new_chat_members[0].name try: context.bot.send_message(chat_id=update.effective_chat.id,text=info.welcome.format(user_name)) except: context.bot.send_message(chat_id=update.effective_chat.id,text=info.welcome.format(name)) def remove_forbidden_words(self,update,context): _msg=update.message txt=_msg.text for i in self.forbidden_words: if i in txt: self.bot.delete_message(update._effective_chat.id,update._effective_message.message_id) context.bot.send_message(chat_id=update.effective_chat.id,text="Don't be rude")
[ "reddit_meme.reddit", "logging.basicConfig", "selenium.webdriver.Firefox", "pandemic_new.hot_corona", "time.sleep", "telegram.Bot", "vocabulary.Vocab", "requests.get", "info.welcome.format" ]
[((885, 915), 'telegram.Bot', 'telegram.Bot', ([], {'token': 'self.token'}), '(token=self.token)\n', (897, 915), False, 'import telegram\n'), ((924, 1031), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (943, 1031), False, 'import logging\n'), ((1109, 1172), 'requests.get', 'requests.get', (['"""https://api.exchangeratesapi.io/latest?base=USD"""'], {}), "('https://api.exchangeratesapi.io/latest?base=USD')\n", (1121, 1172), False, 'import requests\n'), ((1474, 1487), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1484, 1487), False, 'import time\n'), ((1829, 1848), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (1846, 1848), False, 'from selenium import webdriver\n'), ((2041, 2054), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2051, 2054), False, 'import time\n'), ((4988, 5001), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4998, 5001), False, 'import time\n'), ((3917, 3953), 'reddit_meme.reddit', 'reddit', (['info.password', 'info.username'], {}), '(info.password, info.username)\n', (3923, 3953), False, 'from reddit_meme import reddit\n'), ((4082, 4118), 'reddit_meme.reddit', 'reddit', (['info.password', 'info.username'], {}), '(info.password, info.username)\n', (4088, 4118), False, 'from reddit_meme import reddit\n'), ((4907, 4919), 'pandemic_new.hot_corona', 'hot_corona', ([], {}), '()\n', (4917, 4919), False, 'from pandemic_new import hot_corona\n'), ((4952, 4964), 'pandemic_new.hot_corona', 'hot_corona', ([], {}), '()\n', (4962, 4964), False, 'from pandemic_new import hot_corona\n'), ((2612, 2631), 'vocabulary.Vocab', 'Vocab', (['chat_message'], {}), '(chat_message)\n', (2617, 2631), False, 'from vocabulary import Vocab\n'), ((4551, 4563), 'pandemic_new.hot_corona', 'hot_corona', ([], {}), '()\n', (4561, 4563), False, 'from pandemic_new import hot_corona\n'), ((4625, 4637), 'pandemic_new.hot_corona', 'hot_corona', ([], {}), '()\n', (4635, 4637), False, 'from pandemic_new import hot_corona\n'), ((6416, 6446), 'info.welcome.format', 'info.welcome.format', (['user_name'], {}), '(user_name)\n', (6435, 6446), False, 'import info\n'), ((6539, 6564), 'info.welcome.format', 'info.welcome.format', (['name'], {}), '(name)\n', (6558, 6564), False, 'import info\n')]
from numeric_edahelper.flag_outliers import flag_outliers import pandas as pd import pytest def test_flag_outliers(): """ Test the correct output of variables containing outliers from given df """ df = pd.DataFrame({'col1': [-100,-200, 1,2,3,4,5,6,7,8,9,10, 1000], 'col2': [1,2,3,4,5,6,7,8,9,10,11,12,13], 'col3': [-50, 1,2,3,4,5,6,7,8,9,10,11,50000]}) output1 = flag_outliers(df, threshold=0.1) # Assert statements assert len(output1) == 2, "Output should return 2 variables!" assert flag_outliers(df, 1.0) == {}, "Output should return an empty dictionary" assert output1 == {'col1': 0.23076923076923078, 'col3': 0.15384615384615385}, "Output is incorrect" assert type(output1) == dict, "The returned output datatype should be a dictionary" # Raise specific type errors with pytest.raises(TypeError): flag_outliers(12345) with pytest.raises(TypeError): flag_outliers(df, threshold=2)
[ "pandas.DataFrame", "numeric_edahelper.flag_outliers.flag_outliers", "pytest.raises" ]
[((221, 412), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [-100, -200, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000], 'col2': [1, 2, \n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 'col3': [-50, 1, 2, 3, 4, 5, 6, 7,\n 8, 9, 10, 11, 50000]}"], {}), "({'col1': [-100, -200, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000],\n 'col2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 'col3': [-50, 1, 2,\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 50000]})\n", (233, 412), True, 'import pandas as pd\n'), ((426, 458), 'numeric_edahelper.flag_outliers.flag_outliers', 'flag_outliers', (['df'], {'threshold': '(0.1)'}), '(df, threshold=0.1)\n', (439, 458), False, 'from numeric_edahelper.flag_outliers import flag_outliers\n'), ((562, 584), 'numeric_edahelper.flag_outliers.flag_outliers', 'flag_outliers', (['df', '(1.0)'], {}), '(df, 1.0)\n', (575, 584), False, 'from numeric_edahelper.flag_outliers import flag_outliers\n'), ((870, 894), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (883, 894), False, 'import pytest\n'), ((904, 924), 'numeric_edahelper.flag_outliers.flag_outliers', 'flag_outliers', (['(12345)'], {}), '(12345)\n', (917, 924), False, 'from numeric_edahelper.flag_outliers import flag_outliers\n'), ((934, 958), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (947, 958), False, 'import pytest\n'), ((968, 998), 'numeric_edahelper.flag_outliers.flag_outliers', 'flag_outliers', (['df'], {'threshold': '(2)'}), '(df, threshold=2)\n', (981, 998), False, 'from numeric_edahelper.flag_outliers import flag_outliers\n')]
#!/usr/bin/python # -*- coding: utf-8 -*- # Input files for this script are hpoTermDef.obo (downloaded from HPO, flat file of HPO term defs), # OMIM_NumList.csv (list of OMIM Nums generated by OMIMdisease scripts, and phenotype_annotation.tab, # a file downloaded from the online HPO, that maps HPO terms onto OMIM disease ids (OMIM_Num) # Output is HPOforLeukoDisease.tsv, a tab-deliminated file of OMIM_Nums matched to # their corresponding HPO terms # pandas used to read in OMIM_NumList and phenotype_annotation as dataframes # PURPOSE: This file creates the tsv file that is used to create the diseaseHPO table by parsing previously created files # files downloaded from the online Human Phenotype Ontology import pandas as pd from functools import reduce outputfile = open('outputFile/HPOforLeukoDisease.tsv', 'w') hpoTermDef = open('inputFile/hpoTermDef.obo', 'r') HPO_annotation = pd.read_csv('inputFile/phenotype_annotation.tab', sep='\t', index_col=0, low_memory=False) OMIMNums = pd.read_csv('inputFile/OMIM_NumList.csv', low_memory=False) OMIMList = OMIMNums['OMIM_Num'].values.tolist() # Empty dataframe to which extracted values will be added df = pd.DataFrame(columns=['OMIM_Num', 'diseaseName', 'HPO_ID']) # Empty HPOList to keep track of HPO IDs to be searched for HPOList = [] # Loop extracts values from dataframe, adds them to a temp dataframe, and then appends that to the exisiting dataframe for (diseaseID, HPOID, diseaseName) in \ zip(HPO_annotation['disease-identifier'], HPO_annotation['HPO-ID'], HPO_annotation['disease-name']): if diseaseID in OMIMList: # temp dataframe for storing values df2 = pd.DataFrame([[diseaseID, diseaseName, HPOID]], columns=['OMIM_Num', 'diseaseName', 'HPO_ID' ]) df = df.append(df2, ignore_index=True) # add the HPO_ID onto the list, to be used to map HPO IDs to HPO Names HPOList.append(HPOID) # Reads in the first line of the hpoTermDef file line = hpoTermDef.readline() # Creates an empty dataframe to store HPO ids and names together hpoDF = pd.DataFrame(columns=['HPO_ID', 'HPO_Name']) for line in hpoTermDef: # "id: " indicates that line contains an HPO ID if line.startswith('id: '): # Splits line, strips the new line char, then determines if HPO ID is in list line = line.split(' ') # Temp dataframe is created to store the HPO name and ID, then is appended to HPODF if line[1].strip('\n') in HPOList: HPOID = line[1].strip('\n') line = hpoTermDef.readline() HPOName = line.strip('name: ').strip('\n') hpoDF_Temp = pd.DataFrame([[HPOID, HPOName]], columns=['HPO_ID', 'HPO_Name']) hpoDF = hpoDF.append(hpoDF_Temp, ignore_index=True) # List of dataframes to be joined together dfs = [df, hpoDF] # dataframes are joined based on matching HPO ID final_df = reduce(lambda left, right: pd.merge(left, right, on=['HPO_ID' ], how='outer'), dfs) # Sorts Values by OMIM Num (Intially sorted by HPO ID) and then writes dataframe to output file final_df.sort_values(by=['OMIM_Num'], inplace=True) final_df.to_csv('outputFile/HPOforLeukoDisease.tsv', sep='\t') outputfile.close() hpoTermDef.close()
[ "pandas.read_csv", "pandas.merge", "pandas.DataFrame" ]
[((894, 988), 'pandas.read_csv', 'pd.read_csv', (['"""inputFile/phenotype_annotation.tab"""'], {'sep': '"""\t"""', 'index_col': '(0)', 'low_memory': '(False)'}), "('inputFile/phenotype_annotation.tab', sep='\\t', index_col=0,\n low_memory=False)\n", (905, 988), True, 'import pandas as pd\n'), ((1026, 1085), 'pandas.read_csv', 'pd.read_csv', (['"""inputFile/OMIM_NumList.csv"""'], {'low_memory': '(False)'}), "('inputFile/OMIM_NumList.csv', low_memory=False)\n", (1037, 1085), True, 'import pandas as pd\n'), ((1199, 1258), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['OMIM_Num', 'diseaseName', 'HPO_ID']"}), "(columns=['OMIM_Num', 'diseaseName', 'HPO_ID'])\n", (1211, 1258), True, 'import pandas as pd\n'), ((2162, 2206), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['HPO_ID', 'HPO_Name']"}), "(columns=['HPO_ID', 'HPO_Name'])\n", (2174, 2206), True, 'import pandas as pd\n'), ((1699, 1797), 'pandas.DataFrame', 'pd.DataFrame', (['[[diseaseID, diseaseName, HPOID]]'], {'columns': "['OMIM_Num', 'diseaseName', 'HPO_ID']"}), "([[diseaseID, diseaseName, HPOID]], columns=['OMIM_Num',\n 'diseaseName', 'HPO_ID'])\n", (1711, 1797), True, 'import pandas as pd\n'), ((3036, 3085), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': "['HPO_ID']", 'how': '"""outer"""'}), "(left, right, on=['HPO_ID'], how='outer')\n", (3044, 3085), True, 'import pandas as pd\n'), ((2735, 2799), 'pandas.DataFrame', 'pd.DataFrame', (['[[HPOID, HPOName]]'], {'columns': "['HPO_ID', 'HPO_Name']"}), "([[HPOID, HPOName]], columns=['HPO_ID', 'HPO_Name'])\n", (2747, 2799), True, 'import pandas as pd\n')]
from django.core.files.storage import get_storage_class from django.shortcuts import redirect from django.utils.cache import add_never_cache_headers from storages.backends.s3boto3 import S3Boto3Storage from wagtail.core import hooks from wagtail.documents import get_document_model from wagtail.documents.models import document_served @hooks.register("before_serve_document", order=100) def serve_document_from_s3(document, request): # Skip this hook if not using django-storages boto3 backend. if not issubclass(get_storage_class(), S3Boto3Storage): return # Send document_served signal. document_served.send( sender=get_document_model(), instance=document, request=request ) # Get direct S3 link. file_url = document.file.url # Generate redirect response and add never_cache headers. response = redirect(file_url) del response["Cache-control"] add_never_cache_headers(response) return response @hooks.register("construct_settings_menu") def hide_main_menu_menu_item(request, menu_items): menu_items[:] = [item for item in menu_items if item.name != "main-menu"]
[ "wagtail.documents.get_document_model", "wagtail.core.hooks.register", "django.shortcuts.redirect", "django.core.files.storage.get_storage_class", "django.utils.cache.add_never_cache_headers" ]
[((339, 389), 'wagtail.core.hooks.register', 'hooks.register', (['"""before_serve_document"""'], {'order': '(100)'}), "('before_serve_document', order=100)\n", (353, 389), False, 'from wagtail.core import hooks\n'), ((969, 1010), 'wagtail.core.hooks.register', 'hooks.register', (['"""construct_settings_menu"""'], {}), "('construct_settings_menu')\n", (983, 1010), False, 'from wagtail.core import hooks\n'), ((855, 873), 'django.shortcuts.redirect', 'redirect', (['file_url'], {}), '(file_url)\n', (863, 873), False, 'from django.shortcuts import redirect\n'), ((912, 945), 'django.utils.cache.add_never_cache_headers', 'add_never_cache_headers', (['response'], {}), '(response)\n', (935, 945), False, 'from django.utils.cache import add_never_cache_headers\n'), ((524, 543), 'django.core.files.storage.get_storage_class', 'get_storage_class', ([], {}), '()\n', (541, 543), False, 'from django.core.files.storage import get_storage_class\n'), ((654, 674), 'wagtail.documents.get_document_model', 'get_document_model', ([], {}), '()\n', (672, 674), False, 'from wagtail.documents import get_document_model\n')]
#!/usr/bin/env python3 from pyln.client import Plugin plugin = Plugin() @plugin.hook('custommsg') def on_custommsg(peer_id, payload, plugin, message=None, **kwargs): plugin.log("Got custommessage_a {msg} from peer {peer_id}".format( msg=payload, peer_id=peer_id )) return {'result': 'continue'} plugin.run()
[ "pyln.client.Plugin" ]
[((64, 72), 'pyln.client.Plugin', 'Plugin', ([], {}), '()\n', (70, 72), False, 'from pyln.client import Plugin\n')]
# Copyright 2021 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import os from . import Modifier from . import Logger class InsertCssModifier(Modifier): def _mv2(self): pass def _mv3(self): needsChange = False for root, dirs, files in os.walk(self.wrapper.destination, topdown=False): for name in files: if str(Path(name).suffix) != '.js': continue path = root + os.sep + os.sep.join(dirs) + name if os.path.exists(path): with open(path, 'r+') as file: data = file.read() seek = 'chrome.tabs.insertCSS' if data.find(seek) == -1: continue log("Changing to chrome.scripting.insertCSS") if not needsChange: needsChange = True file.seek(0) file.write(data.replace(seek, 'chrome.scripting.insertCSS')) file.truncate() if needsChange: if 'permissions' not in self.wrapper.manifest: self.wrapper.manifest['permissions'] = [] permissions = self.wrapper.manifest['permissions'] for permission in permissions: if permission == "scripting": needsChange = False break if needsChange: Logger().log("Adding scripting permission to manifest") self.wrapper.manifest['permissions'].append("scripting") self.writeManifest()
[ "pathlib.Path", "os.walk", "os.path.exists", "os.sep.join" ]
[((806, 854), 'os.walk', 'os.walk', (['self.wrapper.destination'], {'topdown': '(False)'}), '(self.wrapper.destination, topdown=False)\n', (813, 854), False, 'import os\n'), ((1001, 1021), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1015, 1021), False, 'import os\n'), ((965, 982), 'os.sep.join', 'os.sep.join', (['dirs'], {}), '(dirs)\n', (976, 982), False, 'import os\n'), ((896, 906), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (900, 906), False, 'from pathlib import Path\n')]
""" bandage, v1.0. Made by perpetualCreations """ from platform import system from hashlib import md5 from time import time from tempfile import gettempdir from os import mkdir, path, remove, listdir from shutil import unpack_archive, copyfile, make_archive, rmtree, copytree from io import StringIO from contextlib import redirect_stdout from json import load as jsonload from json import dump as jsondump from typing import Union import urllib3 import filecmp class dircmp(filecmp.dircmp): """Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, \ this subclass compares the content of files with the same path.""" def phase3(self): """Find out differences between common files. \ Ensure we are using content comparison with shallow=False.""" fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files, shallow=False) self.same_files, self.diff_files, self.funny_files = fcomp class Backend: """Shared backend static functions.""" @staticmethod def fetch(target: str) -> object: """ Fetch HTTP and HTTPS requests through URLLIB3, return request \ object, raises exception if status is not in 2XX or 301, 302. :param target: HTTPS/HTTP address :type target: str :return: request :rtype: object """ urllib3_pool_manager = urllib3.PoolManager() fetch_request = urllib3_pool_manager.request("GET", target) if str(fetch_request.status)[:1] != "2" and fetch_request.status \ not in [301, 302]: raise Exceptions.FetchError( "Failed to fetch resource, returned HTTP status code " + str(fetch_request.status) + ".") from None else: return fetch_request @staticmethod def directory_split_recursive(whole: str) -> list: """ Take path parameter and apply path.split recursively, dump \ spliced directory tree to return variable. Produces segmented directories, i.e: /path/to/somewhere/ -> /path/to -> /path/ ...Which will be appended to the return list as mentioned previously. :param whole: path for splitting into component directories :type whole: str :return: contains components :rtype: list """ # append components to this list, function return dump = [] # remaining path after splitting previous component previous = "/INITIAL/INITIAL" while path.split(previous)[1] != "": if previous == "/INITIAL/INITIAL": previous = path.split(whole)[0] else: previous = path.split(previous)[0] if previous != "/": dump.append(previous) return dump class Exceptions: """bandage exception class with children classes.""" class FetchError(BaseException): """Raised when a web fetch request fails (qualified when status code \ is not in 200 range and is not 301 or 302).""" class PatchError(BaseException): """Raised for errors involving patch files, caused by them being \ missing, invalid...""" class ReleaseError(BaseException): """Raised for errors involving release archive files, caused by them \ being missing, invalid...""" class TargetError(BaseException): """Raised for errors involving specified bandage.Patcher target for \ upgrade.""" class RemoteError(BaseException): """Raised for errors involving specified bandage.Patcher remotes for \ fetching.""" class VersionError(BaseException): """Raised for errors involving VERSION files, caused by them being \ missing, invalid, mismatching, being the same...""" class UnableToParseError(BaseException): """Raised when Bandage is unable to interpret a string, usually \ raised with additional information.""" class Patcher: """Main class for bandage.Patcher instances, which apply patches.""" def __init__(self, patch: str, target: str, suppress_version_check: bool = False, suppress_name_check: bool = False, skip_keep_check: bool = False): """ Take patch file and target application directory, and apply \ changes after checking VERSION and NAME. Inorganic and for robots. :param patch: web address or path to patch file :type patch: str :param target: path to application directory for patching :type target: str :param suppress_version_check: if True VERSION/VERSIONS check is ignored, unsafe, default is False :type suppress_version_check: bool :param suppress_name_check: if True NAME check is ignored, unsafe, default is False :type suppress_name_check: bool :param skip_keep_check: if True Patcher does not check if files listed under Keep exist, default is False :type skip_keep_check: bool """ self.WORK_DIR = Patcher.create_work_directory() self.patch = patch self.target = target if "https://" in patch[:8] or "http://" in patch[:8]: patch_grab = Backend.fetch(patch) with open(gettempdir() + self.WORK_DIR + path.splitext(self.patch)[1], "w") as patch_data_dump: patch_data_dump.write(patch_grab.data) self.patch = gettempdir() + self.WORK_DIR + \ path.splitext(self.patch)[1] else: if path.isfile(self.patch) is False: raise Exceptions.PatchError("Patch file with path " + self.patch + " does not exist.") if path.isdir(self.target) is False or not listdir(self.target): raise Exceptions.TargetError("Target directory " + self.target + " does not exist or is empty.") unpack_archive(self.patch, gettempdir() + self.WORK_DIR) try: if suppress_name_check is False: with open(gettempdir() + self.WORK_DIR + "/NAME") as \ patch_name_handle: patch_name = patch_name_handle.read() with open(self.target + "/NAME") as target_name_handle: if target_name_handle.read() != patch_name: raise Exceptions.PatchError( "NAME files of target and patch are different. " + "Target is " + target_name_handle.read() + " and patch " + patch_name + ".") except FileNotFoundError as ParentException: raise Exceptions.PatchError("Missing NAME file(s).") from \ ParentException try: if suppress_version_check is False: with open(gettempdir() + self.WORK_DIR + "/VERSIONS") as \ versions_handle: patch_versions = versions_handle.read() self.patch_versions = patch_versions.split(" -> ") with open(path.join(target, "VERSION")) as version_handle: current_version = version_handle.read() if current_version != self.patch_versions[0]: raise Exceptions.VersionError( "VERSIONS file specifies a different upgrade-from " + "version compared to the target VERSION file. " + "Target is on " + current_version + ", and patch supporting " + self.patch_versions[0] + ".") except FileNotFoundError as ParentException: raise Exceptions.VersionError("Missing VERSION(S) file(s).") from \ ParentException try: with open(gettempdir() + self.WORK_DIR + "/CHANGE.json") as \ changelog_handle: self.change = jsonload(changelog_handle) except FileNotFoundError as ParentException: raise Exceptions.PatchError( "CHANGE.json file of patch archive is missing.") from \ ParentException for x in self.change: self.change[x] = self.change[x].strip("[]").split(", ") for y in range(0, len(self.change[x])): self.change[x][y] = self.change[x][y].strip("'") if skip_keep_check is False: for x in range(0, len(self.change["keep"])): if path.isdir(path.join(self.target, self.change["keep"][x])) \ is not True and path.isfile( path.join(self.target, self.change["keep"][x])) \ is not True: raise Exceptions.TargetError( "Target missing item(s) that should exist, listed " + "under the keep operation. Raised on " + self.change["keep"][x] + ".") for x in range(0, len(self.change["add"])): if path.isdir(gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x]) is not True and path.isfile( gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x]) is not True: raise Exceptions.PatchError( "Missing item(s) for addition. Raised on " + self.change["add"][x] + ".") for x in range(0, len(self.change["replace"])): if path.isdir(gettempdir() + self.WORK_DIR + "/replace/" + self.change["replace"][x]) is not True and \ path.isfile(gettempdir() + self.WORK_DIR + "/replace/" + self.change["replace"] [x]) is not True: raise Exceptions.PatchError( "Missing item(s) for replacement. Raised on " + self.change["replace"][x] + ".") for x in range(0, len(self.change["add"])): component = \ Backend.directory_split_recursive(self.change["add"][x]) for a in component: if path.isdir(path.join(self.target, a)) is False: mkdir(path.join(self.target, a)) if path.isfile(gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x]): copyfile(gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x], path.join(self.target, self.change["add"][x])) if path.isdir(gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x]): copytree(gettempdir() + self.WORK_DIR + "/add/" + self.change["add"][x], path.join(self.target, self.change["add"][x])) for x in range(0, len(self.change["replace"])): if path.isfile(path.join(self.target, self.change["replace"][x])) is True: remove(path.join(self.target, self.change["replace"][x])) copyfile(gettempdir() + self.WORK_DIR + "/replace/" + self.change["replace"][x], path.join(self.target, self.change["replace"][x])) elif path.isdir(path.join(self.target, self.change["replace"][x])) is True: rmtree(path.join(self.target, self.change["replace"][x])) copytree(gettempdir() + self.WORK_DIR + "/replace/" + self.change["replace"][x], path.join(self.target, self.change["replace"][x])) else: raise Exceptions.TargetError( "Target " + self.change["replace"][x] + " for replacement does not exist.") for x in range(0, len(self.change["remove"])): if path.isdir(path.join(self.target, self.change["remove"][x])) is True: rmtree(path.join(self.target, self.change["remove"][x])) elif path.isfile(path.join(self.target, self.change["remove"][x])) is True: remove(path.join(self.target, self.change["remove"][x])) else: raise Exceptions.TargetError( "Target " + self.change["remove"][x] + " for removal does not exist, or is not a file or" + " directory.") with open(self.target + "/VERSION", "w") as version_overwrite_handle: # this is redundant, VERSION gets overwritten by replace anyways, # since Weave detects two different version files automatically # if one day this module needed to be slimmed down, remove this # for a slight amount of I/O performance gain version_overwrite_handle.truncate(0) version_overwrite_handle.write(self.patch_versions[1]) rmtree(gettempdir() + self.WORK_DIR) @staticmethod def create_work_directory() -> str: """ Create directory under the OS temporary directory with a unique name \ to prevent conflicting instances. :return: generated tempdir name :rtype: str """ identifier = "/bandage_patcher_session_" + md5( str(time()).encode(encoding="ascii", errors="replace")).hexdigest() mkdir(gettempdir() + identifier) return identifier class Weave: """Main class for bandage.Weave instances, which generates patches.""" def __init__(self, release_old: str, release_new: str, output_path: str, set_name: Union[str, None] = None, suppress_missing_versions: bool = False): """ Take two release files, and compare them for differences, then \ generate patch file to given output path. Inorganic and for robots. :param release_old: web address or path to old release file :type release_old: str :param release_new: web address or path to new release file :type release_new: str :param output_path: path to output archive, if archive already exists, deletes archive and "overwrites" it with the new archive file :type output_path: str :param set_name: new patch NAME file, if not None, NAME check is ignored, default None :type set_name: Union[str, None] :param suppress_missing_versions: if True missing versions error is ignored, Supply class cannot detect the release automatically, Patcher must be directed to the patch archive manually, default False :type suppress_missing_versions: bool """ self.WORK_DIR = Weave.create_work_directory() self.release_old = release_old self.release_new = release_new if path.isdir(output_path) is False: raise Exceptions.PatchError("Specified output directory " + output_path + " is not a directory.") if "https://" in self.release_old[:8] or "http://" in \ self.release_old[:8]: release_old_grab = Backend.fetch(self.release_old) with open(gettempdir() + self.WORK_DIR + "/old/" + path.splitext(self.release_old)[1], "w") as \ release_old_data_dump: release_old_data_dump.write(release_old_grab.data) self.release_old = gettempdir() + self.WORK_DIR + "/old/" + \ path.splitext(self.release_old)[1] else: if path.isfile(self.release_old) is False: raise Exceptions.ReleaseError( "Old release file " + self.release_old + " does not exist.") if "https://" in self.release_new[:8] or "http://" in \ self.release_new[:8]: release_new_grab = Backend.fetch(self.release_new) with open(gettempdir() + self.WORK_DIR + "/new/" + path.splitext(self.release_new)[1], "w") as \ release_new_data_dump: release_new_data_dump.write(release_new_grab.data) self.release_new = gettempdir() + self.WORK_DIR + "/new/" + \ path.splitext(self.release_new)[1] else: if path.isfile(self.release_new) is False: raise Exceptions.ReleaseError( "New release file " + self.release_new + " does not exist.") unpack_archive(self.release_old, gettempdir() + self.WORK_DIR + "/old/") unpack_archive(self.release_new, gettempdir() + self.WORK_DIR + "/new/") try: with open(gettempdir() + self.WORK_DIR + "/old/NAME") as \ release_name_handle: self.release_name_old = release_name_handle.read() with open(gettempdir() + self.WORK_DIR + "/new/NAME") as \ release_name_handle: self.release_name_new = release_name_handle.read() if self.release_name_new != self.release_name_old and \ set_name is None: raise Exceptions.ReleaseError( "NAME files of old and new releases do not match." + " Old is " + self.release_name_old + " and new " + self.release_name_new + ".") except FileNotFoundError as ParentException: if set_name is not None: raise Exceptions.ReleaseError( "NAME files of old and new releases are missing.") from \ ParentException try: with open(gettempdir() + self.WORK_DIR + "/old/VERSION") as \ release_version_handle: self.release_version_old = release_version_handle.read() with open(gettempdir() + self.WORK_DIR + "/new/VERSION") as \ release_version_handle: self.release_version_new = release_version_handle.read() except FileNotFoundError as ParentException: if suppress_missing_versions is False: raise Exceptions.VersionError( "VERSION files of old and new releases are missing.") \ from ParentException else: self.release_version_old = "NaN" self.release_version_new = "NaN" if suppress_missing_versions is False and \ len(self.release_version_old.split(" -> ")) != 1 or \ len(self.release_version_new.split(" -> ")) != 1: raise Exceptions.UnableToParseError( 'Release versions contain " -> " which will disrupt Patcher ' + 'when trying to read the VERSIONS header.') self.index = Weave.comparison(self) with open(gettempdir() + self.WORK_DIR + "/patch/CHANGE.json", "w") \ as changelog_dump_handle: jsondump({"remove": str(self.index[0]), "add": str(self.index[1]), "keep": str(self.index[2]), "replace": str(self.index[3])}, changelog_dump_handle) for x in range(0, len(self.index[1])): component = Backend.directory_split_recursive(self.index[1][x]) for a in component: if path.isdir(gettempdir() + self.WORK_DIR + "/patch/add/" + a) is False: mkdir(gettempdir() + self.WORK_DIR + "/patch/add/" + a) if path.isfile(gettempdir() + self.WORK_DIR + "/new/" + self.index[1][x]) is True: copyfile(gettempdir() + self.WORK_DIR + "/new/" + self.index[1][x], gettempdir() + self.WORK_DIR + "/patch/add/" + self.index[1][x]) if path.isdir(gettempdir() + self.WORK_DIR + "/new/" + self.index[1][x]) is True: copytree(gettempdir() + self.WORK_DIR + "/new/" + self.index[1][x], gettempdir() + self.WORK_DIR + "/patch/add/" + self.index[1][x]) for y in range(0, len(self.index[3])): component = Backend.directory_split_recursive(self.index[3][y]) for b in component: if path.isdir(gettempdir() + self.WORK_DIR + "/patch/replace/" + b) is False: mkdir(gettempdir() + self.WORK_DIR + "/patch/replace/" + b) if path.isfile(gettempdir() + self.WORK_DIR + "/new/" + self.index[3][y]) is True: copyfile(gettempdir() + self.WORK_DIR + "/new/" + self.index[3][y], gettempdir() + self.WORK_DIR + "/patch/replace/" + self.index[3][y]) if path.isdir(gettempdir() + self.WORK_DIR + "/new/" + self.index[3][y]) is True: copytree(gettempdir() + self.WORK_DIR + "/new/" + self.index[3][y], gettempdir() + self.WORK_DIR + "/patch/replace/" + self.index[3][y]) with open(gettempdir() + self.WORK_DIR + "/patch/VERSIONS", "w") as \ release_version_handle: release_version_handle.write(self.release_version_old + " -> " + self.release_version_new) if set_name is None: with open(gettempdir() + self.WORK_DIR + "/patch/NAME", "w") as \ release_name_handle: release_name_handle.write(self.release_name_new) base_name = output_path + self.release_name_new + "_" + \ self.release_version_old + "_to_" + \ self.release_version_new + "_bandage_patch" make_archive(root_dir=gettempdir() + self.WORK_DIR + "/patch/", base_name=base_name, format="zip") else: with open(gettempdir() + self.WORK_DIR + "/patch/NAME", "w") as \ release_name_handle: release_name_handle.write(set_name) base_name = (output_path + set_name + "_" + self.release_version_old + "_to_" + self.release_version_new + "_bandage_patch") make_archive(root_dir=gettempdir() + self.WORK_DIR + "/patch/", base_name=base_name, format="zip") # TODO archive checksum generation rmtree(gettempdir() + self.WORK_DIR) @staticmethod def create_work_directory() -> str: """ Create directory under the OS temporary directory with a unique name \ to prevent conflicting instances. :return: generated tempdir name :rtype: str """ identifier = "/bandage_weave_session_" + \ md5(str(time()).encode(encoding="ascii", errors="replace") ).hexdigest() mkdir(gettempdir() + identifier) mkdir(gettempdir() + identifier + "/old") mkdir(gettempdir() + identifier + "/new") mkdir(gettempdir() + identifier + "/patch") mkdir(gettempdir() + identifier + "/patch/add") mkdir(gettempdir() + identifier + "/patch/replace") return identifier def comparison(self) -> list: """ Compare old and new directories under self.WORK_DIR for differences, \ returns as list. :return: contains release differences :rtype: list """ handle = StringIO() with redirect_stdout(handle): dircmp(gettempdir() + self.WORK_DIR + "/old/", gettempdir() + self.WORK_DIR + "/new/").report_full_closure() raw = handle.getvalue().split("\n") dump = [[], [], [], []] # directory path appends for old and new archive, # allows for handling of sub-directories. parsing_directory = "" for x in range(0, len(raw)): if raw[x][:4] == "diff": if len(raw[x].split(" ")) != 3: raise Exceptions.UnableToParseError( "Release archives contain directories with spaces" + " in their names. This breaks comparison " + "interpretation.") from None parsing_directory = \ raw[x].split(" ")[1].lstrip(gettempdir()).lstrip( self.WORK_DIR).lstrip("/old/") if parsing_directory != "": parsing_directory += "/" if raw[x][:(8 + len( gettempdir() + self.WORK_DIR + "/old/"))] == "Only in " + \ gettempdir() + self.WORK_DIR + "/old/": for_extend = raw[x].lstrip( "Only in " + gettempdir() + self.WORK_DIR + "/old/" + parsing_directory).strip("[]").split(", ") for y in range(0, len(for_extend)): for_extend[y] = parsing_directory + \ for_extend[y].strip("'") dump[0].extend(for_extend) if raw[x][:(8 + len( gettempdir() + self.WORK_DIR + "/new/"))] == "Only in " + \ gettempdir() + self.WORK_DIR + "/new/": for_extend = raw[x].lstrip( "Only in " + gettempdir() + self.WORK_DIR + "/new/" + parsing_directory).strip("[]").split(", ") for y in range(0, len(for_extend)): for_extend[y] = parsing_directory + \ for_extend[y].strip("'") dump[1].extend(for_extend) if raw[x][:18] == "Identical files : ": for_extend = \ raw[x].lstrip("Identical files : ").strip("[]").split(", ") for y in range(0, len(for_extend)): for_extend[y] = \ parsing_directory + for_extend[y].strip("'") dump[2].extend(for_extend) if raw[x][:18] == "Differing files : ": for_extend = \ raw[x].lstrip("Differing files : ").strip("[]").split(", ") for y in range(0, len(for_extend)): for_extend[y] = \ parsing_directory + for_extend[y].strip("'") dump[3].extend(for_extend) return dump class Supply: """Main class for bandage.Supply instances, which checks for new patches \ on remotes.""" def __init__(self, remote: str, version_file: str): """ Check given remote HTTP endpoint for new patches. Inorganic and for \ robots. If no exception is thrown, dumps status and patch \ download URL to self.result and self.patch_web_source \ respectively, which can be retrieved as a list through \ method bandage.Supply.realize. The remote parameter should be an HTTPS/HTTP address pointing to a web server, or a Github release tagged BANDAGE. For pointing to a Github repository's contents, use raw.githubusercontent.com. If bandage.Supply succeeded in finding headers and looking up lineage series, however finds the current version to be the latest, self.result is 0. If bandage.Supply succeeded in finding headers and looking up lineage series, and finds a patch to be applied for updating, self.result is -1 with self.patch_web_source as web address to patch file. If bandage.Supply succeeded in finding headers and looking up lineage series, however finds no patch available to upgrade with, self.result is 1. If bandage.Supply raised an exception, self.result and self.patch_web_source are None. Preliminary information if obtained is dumped into self.pre_collect. Contains version list and patchesc available, as list object. Retrieved through bandage.Supply.pre_collect_dump. See documentation for more information. :param remote: web address of patch host :type remote: str :param version_file: path to version file :type version_file: str """ self.patch_web_source = None self.result = 1 self.remote = remote self.version_file = version_file try: with open(version_file) as version_handle: self.version = version_handle.read() except FileNotFoundError as ParentException: raise Exceptions.VersionError( "VERSION file directed by path " + self.version_file + " does not exist.") from ParentException if "https://" not in self.remote[:8] and "http://" not in \ self.remote[:8]: raise Exceptions.RemoteError( "Supplied remote " + self.remote + " is not a HTTP/HTTPS web address.") if self.remote[-1:] != "/": self.remote += "/" if "https://github.com" == self.remote[:18] or "http://github.com" == \ self.remote[:18]: if self.remote[-22:] == "/releases/tag/BANDAGE/": self.pre_collect = [Backend.fetch( self.remote.rstrip("/tag/BANDAGE/") + "/download/BANDAGE/BANDAGE_PATCHES").data.decode( encoding="utf-8", errors="replace").split("\n"), Backend.fetch( self.remote.rstrip("/tag/BANDAGE/") + "/download/BANDAGE/BANDAGE_LINEAGE").data.decode( encoding="utf-8", errors="replace").split("\n")] for x in range(0, len(self.pre_collect[1])): if self.version == self.pre_collect[1][x].rstrip("\r"): self.version_gap = x break if self.version_gap is None: raise Exceptions.VersionError( "Version " + self.version + " does not exist in remote's lineage header.") elif self.version_gap == 0: self.result = 0 else: compatible_sources = [] for x in range(0, len(self.pre_collect[0])): if self.pre_collect[0][x].split( "||")[0].split(" -> ")[0] == self.version: compatible_sources.append( self.pre_collect[0][x].split("||")[0]) if not compatible_sources: self.result = 1 else: for x in self.pre_collect[0]: for y in compatible_sources: if y.split(" -> ")[1] == \ x.split("||")[0].split(" -> ")[1]: self.result = -1 self.patch_web_source = \ self.remote.rstrip("/BANDAGE/") + \ path.join("/download/BANDAGE/", x.split("||")[1]) else: raise Exceptions.RemoteError( "Remote defined as " + self.remote + " is not supported.") else: self.pre_collect = [ Backend.fetch( self.remote + "BANDAGE_PATCHES" ).data.decode(encoding="utf-8", errors="replace").split( "\n"), Backend.fetch( self.remote + "BANDAGE_LINEAGE" ).data.decode(encoding="utf-8", errors="replace").split( "\n")] for x in range(0, len(self.pre_collect[1])): if self.version == self.pre_collect[1][x]: self.version_gap = x break if self.version_gap is None: raise Exceptions.VersionError( "Version " + self.version + " does not exist in remote's lineage header.") elif self.version_gap == 0: self.result = 0 else: compatible_sources = [] for x in range(0, len(self.pre_collect[0])): if self.pre_collect[0][x].split( "||")[0].split(" -> ")[0] == self.version: compatible_sources.append( self.pre_collect[0][x].split("||")[0]) if not compatible_sources: self.result = 1 else: for x in self.pre_collect[0]: for y in compatible_sources: if y.split(" -> ")[1] == \ x.split("||")[0].split(" -> ")[1]: self.result = -1 if x.split("||")[1][:8] == "https://" or \ "http://" in x.split("||")[1][:8]: self.patch_web_source = x.split("||")[1] else: self.patch_web_source = path.join( self.remote, x.split("||")[1]) def realize(self) -> list: """ Return list containing self.result and self.patch_web_source. :return: [self.result, self.patch_web_source] :rtype: list """ return [self.result, self.patch_web_source] def pre_collect_dump(self) -> list: """ Return self.pre_collect_dump. :return: pre_collect_dump :rtype: list """ return self.pre_collect
[ "filecmp.cmpfiles", "io.StringIO", "json.load", "os.path.isdir", "tempfile.gettempdir", "time.time", "os.path.isfile", "contextlib.redirect_stdout", "urllib3.PoolManager", "os.path.splitext", "os.path.split", "os.path.join", "os.listdir" ]
[((820, 893), 'filecmp.cmpfiles', 'filecmp.cmpfiles', (['self.left', 'self.right', 'self.common_files'], {'shallow': '(False)'}), '(self.left, self.right, self.common_files, shallow=False)\n', (836, 893), False, 'import filecmp\n'), ((1429, 1450), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (1448, 1450), False, 'import urllib3\n'), ((24142, 24152), 'io.StringIO', 'StringIO', ([], {}), '()\n', (24150, 24152), False, 'from io import StringIO\n'), ((15355, 15378), 'os.path.isdir', 'path.isdir', (['output_path'], {}), '(output_path)\n', (15365, 15378), False, 'from os import mkdir, path, remove, listdir\n'), ((24166, 24189), 'contextlib.redirect_stdout', 'redirect_stdout', (['handle'], {}), '(handle)\n', (24181, 24189), False, 'from contextlib import redirect_stdout\n'), ((2585, 2605), 'os.path.split', 'path.split', (['previous'], {}), '(previous)\n', (2595, 2605), False, 'from os import mkdir, path, remove, listdir\n'), ((5723, 5746), 'os.path.isfile', 'path.isfile', (['self.patch'], {}), '(self.patch)\n', (5734, 5746), False, 'from os import mkdir, path, remove, listdir\n'), ((5915, 5938), 'os.path.isdir', 'path.isdir', (['self.target'], {}), '(self.target)\n', (5925, 5938), False, 'from os import mkdir, path, remove, listdir\n'), ((5955, 5975), 'os.listdir', 'listdir', (['self.target'], {}), '(self.target)\n', (5962, 5975), False, 'from os import mkdir, path, remove, listdir\n'), ((6162, 6174), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (6172, 6174), False, 'from tempfile import gettempdir\n'), ((8170, 8196), 'json.load', 'jsonload', (['changelog_handle'], {}), '(changelog_handle)\n', (8178, 8196), True, 'from json import load as jsonload\n'), ((13423, 13435), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (13433, 13435), False, 'from tempfile import gettempdir\n'), ((13872, 13884), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (13882, 13884), False, 'from tempfile import gettempdir\n'), ((16099, 16128), 'os.path.isfile', 'path.isfile', (['self.release_old'], {}), '(self.release_old)\n', (16110, 16128), False, 'from os import mkdir, path, remove, listdir\n'), ((16847, 16876), 'os.path.isfile', 'path.isfile', (['self.release_new'], {}), '(self.release_new)\n', (16858, 16876), False, 'from os import mkdir, path, remove, listdir\n'), ((23104, 23116), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23114, 23116), False, 'from tempfile import gettempdir\n'), ((23569, 23581), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23579, 23581), False, 'from tempfile import gettempdir\n'), ((2690, 2707), 'os.path.split', 'path.split', (['whole'], {}), '(whole)\n', (2700, 2707), False, 'from os import mkdir, path, remove, listdir\n'), ((2756, 2776), 'os.path.split', 'path.split', (['previous'], {}), '(previous)\n', (2766, 2776), False, 'from os import mkdir, path, remove, listdir\n'), ((5616, 5628), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (5626, 5628), False, 'from tempfile import gettempdir\n'), ((5665, 5690), 'os.path.splitext', 'path.splitext', (['self.patch'], {}), '(self.patch)\n', (5678, 5690), False, 'from os import mkdir, path, remove, listdir\n'), ((10833, 10878), 'os.path.join', 'path.join', (['self.target', "self.change['add'][x]"], {}), "(self.target, self.change['add'][x])\n", (10842, 10878), False, 'from os import mkdir, path, remove, listdir\n'), ((11136, 11181), 'os.path.join', 'path.join', (['self.target', "self.change['add'][x]"], {}), "(self.target, self.change['add'][x])\n", (11145, 11181), False, 'from os import mkdir, path, remove, listdir\n'), ((11266, 11315), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11275, 11315), False, 'from os import mkdir, path, remove, listdir\n'), ((11386, 11435), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11395, 11435), False, 'from os import mkdir, path, remove, listdir\n'), ((11584, 11633), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11593, 11633), False, 'from os import mkdir, path, remove, listdir\n'), ((12294, 12342), 'os.path.join', 'path.join', (['self.target', "self.change['remove'][x]"], {}), "(self.target, self.change['remove'][x])\n", (12303, 12342), False, 'from os import mkdir, path, remove, listdir\n'), ((12412, 12460), 'os.path.join', 'path.join', (['self.target', "self.change['remove'][x]"], {}), "(self.target, self.change['remove'][x])\n", (12421, 12460), False, 'from os import mkdir, path, remove, listdir\n'), ((16035, 16066), 'os.path.splitext', 'path.splitext', (['self.release_old'], {}), '(self.release_old)\n', (16048, 16066), False, 'from os import mkdir, path, remove, listdir\n'), ((16783, 16814), 'os.path.splitext', 'path.splitext', (['self.release_new'], {}), '(self.release_new)\n', (16796, 16814), False, 'from os import mkdir, path, remove, listdir\n'), ((17076, 17088), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (17086, 17088), False, 'from tempfile import gettempdir\n'), ((17180, 17192), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (17190, 17192), False, 'from tempfile import gettempdir\n'), ((23610, 23622), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23620, 23622), False, 'from tempfile import gettempdir\n'), ((23660, 23672), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23670, 23672), False, 'from tempfile import gettempdir\n'), ((23710, 23722), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23720, 23722), False, 'from tempfile import gettempdir\n'), ((23762, 23774), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23772, 23774), False, 'from tempfile import gettempdir\n'), ((23818, 23830), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (23828, 23830), False, 'from tempfile import gettempdir\n'), ((7310, 7338), 'os.path.join', 'path.join', (['target', '"""VERSION"""'], {}), "(target, 'VERSION')\n", (7319, 7338), False, 'from os import mkdir, path, remove, listdir\n'), ((10485, 10510), 'os.path.join', 'path.join', (['self.target', 'a'], {}), '(self.target, a)\n', (10494, 10510), False, 'from os import mkdir, path, remove, listdir\n'), ((10548, 10573), 'os.path.join', 'path.join', (['self.target', 'a'], {}), '(self.target, a)\n', (10557, 10573), False, 'from os import mkdir, path, remove, listdir\n'), ((11663, 11712), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11672, 11712), False, 'from os import mkdir, path, remove, listdir\n'), ((11784, 11833), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11793, 11833), False, 'from os import mkdir, path, remove, listdir\n'), ((11982, 12031), 'os.path.join', 'path.join', (['self.target', "self.change['replace'][x]"], {}), "(self.target, self.change['replace'][x])\n", (11991, 12031), False, 'from os import mkdir, path, remove, listdir\n'), ((12524, 12572), 'os.path.join', 'path.join', (['self.target', "self.change['remove'][x]"], {}), "(self.target, self.change['remove'][x])\n", (12533, 12572), False, 'from os import mkdir, path, remove, listdir\n'), ((12645, 12693), 'os.path.join', 'path.join', (['self.target', "self.change['remove'][x]"], {}), "(self.target, self.change['remove'][x])\n", (12654, 12693), False, 'from os import mkdir, path, remove, listdir\n'), ((15976, 15988), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (15986, 15988), False, 'from tempfile import gettempdir\n'), ((16724, 16736), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (16734, 16736), False, 'from tempfile import gettempdir\n'), ((19437, 19449), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (19447, 19449), False, 'from tempfile import gettempdir\n'), ((21761, 21773), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21771, 21773), False, 'from tempfile import gettempdir\n'), ((5428, 5440), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (5438, 5440), False, 'from tempfile import gettempdir\n'), ((5481, 5506), 'os.path.splitext', 'path.splitext', (['self.patch'], {}), '(self.patch)\n', (5494, 5506), False, 'from os import mkdir, path, remove, listdir\n'), ((8050, 8062), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (8060, 8062), False, 'from tempfile import gettempdir\n'), ((8738, 8784), 'os.path.join', 'path.join', (['self.target', "self.change['keep'][x]"], {}), "(self.target, self.change['keep'][x])\n", (8747, 8784), False, 'from os import mkdir, path, remove, listdir\n'), ((8869, 8915), 'os.path.join', 'path.join', (['self.target', "self.change['keep'][x]"], {}), "(self.target, self.change['keep'][x])\n", (8878, 8915), False, 'from os import mkdir, path, remove, listdir\n'), ((10602, 10614), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (10612, 10614), False, 'from tempfile import gettempdir\n'), ((10906, 10918), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (10916, 10918), False, 'from tempfile import gettempdir\n'), ((15789, 15820), 'os.path.splitext', 'path.splitext', (['self.release_old'], {}), '(self.release_old)\n', (15802, 15820), False, 'from os import mkdir, path, remove, listdir\n'), ((16537, 16568), 'os.path.splitext', 'path.splitext', (['self.release_new'], {}), '(self.release_new)\n', (16550, 16568), False, 'from os import mkdir, path, remove, listdir\n'), ((17278, 17290), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (17288, 17290), False, 'from tempfile import gettempdir\n'), ((17457, 17469), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (17467, 17469), False, 'from tempfile import gettempdir\n'), ((18250, 18262), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (18260, 18262), False, 'from tempfile import gettempdir\n'), ((18441, 18453), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (18451, 18453), False, 'from tempfile import gettempdir\n'), ((22056, 22068), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (22066, 22068), False, 'from tempfile import gettempdir\n'), ((22436, 22448), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (22446, 22448), False, 'from tempfile import gettempdir\n'), ((22574, 22586), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (22584, 22586), False, 'from tempfile import gettempdir\n'), ((22944, 22956), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (22954, 22956), False, 'from tempfile import gettempdir\n'), ((25309, 25321), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25319, 25321), False, 'from tempfile import gettempdir\n'), ((25865, 25877), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25875, 25877), False, 'from tempfile import gettempdir\n'), ((6276, 6288), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (6286, 6288), False, 'from tempfile import gettempdir\n'), ((7067, 7079), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (7077, 7079), False, 'from tempfile import gettempdir\n'), ((10719, 10731), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (10729, 10731), False, 'from tempfile import gettempdir\n'), ((11022, 11034), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (11032, 11034), False, 'from tempfile import gettempdir\n'), ((11462, 11474), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (11472, 11474), False, 'from tempfile import gettempdir\n'), ((15726, 15738), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (15736, 15738), False, 'from tempfile import gettempdir\n'), ((16474, 16486), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (16484, 16486), False, 'from tempfile import gettempdir\n'), ((20123, 20135), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20133, 20135), False, 'from tempfile import gettempdir\n'), ((20243, 20255), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20253, 20255), False, 'from tempfile import gettempdir\n'), ((20327, 20339), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20337, 20339), False, 'from tempfile import gettempdir\n'), ((20443, 20455), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20453, 20455), False, 'from tempfile import gettempdir\n'), ((20562, 20574), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20572, 20574), False, 'from tempfile import gettempdir\n'), ((20646, 20658), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20656, 20658), False, 'from tempfile import gettempdir\n'), ((21122, 21134), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21132, 21134), False, 'from tempfile import gettempdir\n'), ((21242, 21254), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21252, 21254), False, 'from tempfile import gettempdir\n'), ((21326, 21338), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21336, 21338), False, 'from tempfile import gettempdir\n'), ((21446, 21458), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21456, 21458), False, 'from tempfile import gettempdir\n'), ((21565, 21577), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21575, 21577), False, 'from tempfile import gettempdir\n'), ((21649, 21661), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21659, 21661), False, 'from tempfile import gettempdir\n'), ((24210, 24222), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (24220, 24222), False, 'from tempfile import gettempdir\n'), ((24250, 24262), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (24260, 24262), False, 'from tempfile import gettempdir\n'), ((9281, 9293), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (9291, 9293), False, 'from tempfile import gettempdir\n'), ((9430, 9442), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (9440, 9442), False, 'from tempfile import gettempdir\n'), ((9778, 9790), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (9788, 9790), False, 'from tempfile import gettempdir\n'), ((9936, 9948), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (9946, 9948), False, 'from tempfile import gettempdir\n'), ((11860, 11872), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (11870, 11872), False, 'from tempfile import gettempdir\n'), ((13794, 13800), 'time.time', 'time', ([], {}), '()\n', (13798, 13800), False, 'from time import time\n'), ((19930, 19942), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (19940, 19942), False, 'from tempfile import gettempdir\n'), ((20046, 20058), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20056, 20058), False, 'from tempfile import gettempdir\n'), ((20921, 20933), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (20931, 20933), False, 'from tempfile import gettempdir\n'), ((21041, 21053), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (21051, 21053), False, 'from tempfile import gettempdir\n'), ((23474, 23480), 'time.time', 'time', ([], {}), '()\n', (23478, 23480), False, 'from time import time\n'), ((25010, 25022), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25020, 25022), False, 'from tempfile import gettempdir\n'), ((25229, 25241), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25239, 25241), False, 'from tempfile import gettempdir\n'), ((25785, 25797), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25795, 25797), False, 'from tempfile import gettempdir\n'), ((25426, 25438), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25436, 25438), False, 'from tempfile import gettempdir\n'), ((25982, 25994), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (25992, 25994), False, 'from tempfile import gettempdir\n')]
import json from fhir_types import FHIR_CodeableConcept from google.fhir.r4.json_format import json_fhir_string_to_proto from proto.google.fhir.proto.r4.core import datatypes_pb2 from fhir_helpers.resources.codeable_concept import ( CodeableConceptDict, CodeableConceptProto, ) def test_get_codings() -> None: test_concept: FHIR_CodeableConcept = { "coding": [ { "system": "loinc.org", "code": "14", "display": "Body1", }, { "system": "oinc.org", "code": "45", "display": "Height", }, {"system": "loinc.org", "code": "456", "display": "Body"}, ], "text": "Body Height", } concept_dict = CodeableConceptDict(test_concept) concept_proto = CodeableConceptProto( json_fhir_string_to_proto( json.dumps(test_concept), datatypes_pb2.CodeableConcept ) ) assert concept_dict.has_coding("14") assert concept_proto.has_coding("14") assert not concept_dict.has_coding("X") assert not concept_proto.has_coding("X") assert concept_dict.has_coding("14", system_str="loinc.org") assert concept_proto.has_coding("14", system_str="loinc.org") assert not concept_dict.has_coding("14", system_str="C.org") assert not concept_proto.has_coding("14", system_str="C.org") assert concept_dict.has_coding("14", display_str="Body1") assert concept_proto.has_coding("14", display_str="Body1") assert not concept_dict.has_coding("1X", display_str="Body1") assert not concept_proto.has_coding("1X", display_str="Body1") assert not concept_dict.has_coding("14", display_str="Height") assert not concept_proto.has_coding("14", display_str="Height")
[ "fhir_helpers.resources.codeable_concept.CodeableConceptDict", "json.dumps" ]
[((793, 826), 'fhir_helpers.resources.codeable_concept.CodeableConceptDict', 'CodeableConceptDict', (['test_concept'], {}), '(test_concept)\n', (812, 826), False, 'from fhir_helpers.resources.codeable_concept import CodeableConceptDict, CodeableConceptProto\n'), ((916, 940), 'json.dumps', 'json.dumps', (['test_concept'], {}), '(test_concept)\n', (926, 940), False, 'import json\n')]
""" Copyright 2019 <NAME> Copyright 2019 The University of Texas at Austin Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from .arch_utils import layerUtil arch = {} for i in range(1, 6): conv_idx = (i-1)*2 bn_idx = conv_idx +1 arch[conv_idx] = {'name':'conv'+str(i) , 'kernel_size':3, 'stride':1, 'padding':1, 'bias':False} arch[bn_idx] = {'name':'bn'+str(i)} arch[10] = {'name':'pool', 'kernel_size':2, 'stride':2} arch[11] = {'name':'relu'} arch[12] = {'name':'fc', 'out_chs':'num_classes'} def _genDenseArchVGG8BN(model, out_f_dir1, out_f_dir2, arch_name, dense_chs, chs_map=None): print ("[INFO] Generating a new dense architecture...") # File heading ctx = 'import torch.nn as nn\n' ctx += '__all__ = [\'vgg8_bn_flat\']\n' ctx += 'class VGG8(nn.Module):\n' ctx += '\tdef __init__(self, num_classes=10):\n' ctx += '\t\tsuper(VGG8, self).__init__()\n' lyr = layerUtil(model, dense_chs) # Layer definition for idx in sorted(arch): ctx += lyr.getLayerDef(arch[idx]) # Architecture sequential ctx += '\tdef forward(self, x):\n' ctx += lyr.forward('conv1') ctx += lyr.forward('bn1') ctx += lyr.forward('relu') ctx += lyr.forward('pool') ctx += lyr.forward('conv2') ctx += lyr.forward('bn2') ctx += lyr.forward('relu') ctx += lyr.forward('pool') ctx += lyr.forward('conv3') ctx += lyr.forward('bn3') ctx += lyr.forward('relu') ctx += lyr.forward('pool') ctx += lyr.forward('conv4') ctx += lyr.forward('bn4') ctx += lyr.forward('relu') ctx += lyr.forward('pool') ctx += lyr.forward('conv5') ctx += lyr.forward('bn5') ctx += lyr.forward('relu') ctx += lyr.forward('pool') ctx += '\t\tx = x.view(x.size(0), -1)\n' ctx += lyr.forward('fc') ctx += '\t\treturn x\n' # AlexNet definition ctx += 'def vgg8_bn_flat(**kwargs):\n' ctx += '\tmodel = VGG8(**kwargs)\n' ctx += '\treturn model\n' if not os.path.exists(out_f_dir2): os.makedirs(out_f_dir2) f_out1 = open(os.path.join(out_f_dir1, 'vgg8_bn_flat.py'),'w') f_out1.write(ctx) f_out = open(os.path.join(out_f_dir2, arch_name),'w') f_out.write(ctx)
[ "os.path.join", "os.path.exists", "os.makedirs" ]
[((2405, 2431), 'os.path.exists', 'os.path.exists', (['out_f_dir2'], {}), '(out_f_dir2)\n', (2419, 2431), False, 'import os\n'), ((2439, 2462), 'os.makedirs', 'os.makedirs', (['out_f_dir2'], {}), '(out_f_dir2)\n', (2450, 2462), False, 'import os\n'), ((2480, 2523), 'os.path.join', 'os.path.join', (['out_f_dir1', '"""vgg8_bn_flat.py"""'], {}), "(out_f_dir1, 'vgg8_bn_flat.py')\n", (2492, 2523), False, 'import os\n'), ((2564, 2599), 'os.path.join', 'os.path.join', (['out_f_dir2', 'arch_name'], {}), '(out_f_dir2, arch_name)\n', (2576, 2599), False, 'import os\n')]
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GLES2 import _types as _cs # End users want this... from OpenGL.raw.GLES2._types import * from OpenGL.raw.GLES2 import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GLES2_EXT_unpack_subimage' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_EXT_unpack_subimage',error_checker=_errors._error_checker) GL_UNPACK_ROW_LENGTH_EXT=_C('GL_UNPACK_ROW_LENGTH_EXT',0x0CF2) GL_UNPACK_SKIP_PIXELS_EXT=_C('GL_UNPACK_SKIP_PIXELS_EXT',0x0CF4) GL_UNPACK_SKIP_ROWS_EXT=_C('GL_UNPACK_SKIP_ROWS_EXT',0x0CF3)
[ "OpenGL.constant.Constant", "OpenGL.platform.createFunction" ]
[((555, 591), 'OpenGL.constant.Constant', '_C', (['"""GL_UNPACK_ROW_LENGTH_EXT"""', '(3314)'], {}), "('GL_UNPACK_ROW_LENGTH_EXT', 3314)\n", (557, 591), True, 'from OpenGL.constant import Constant as _C\n'), ((620, 657), 'OpenGL.constant.Constant', '_C', (['"""GL_UNPACK_SKIP_PIXELS_EXT"""', '(3316)'], {}), "('GL_UNPACK_SKIP_PIXELS_EXT', 3316)\n", (622, 657), True, 'from OpenGL.constant import Constant as _C\n'), ((684, 719), 'OpenGL.constant.Constant', '_C', (['"""GL_UNPACK_SKIP_ROWS_EXT"""', '(3315)'], {}), "('GL_UNPACK_SKIP_ROWS_EXT', 3315)\n", (686, 719), True, 'from OpenGL.constant import Constant as _C\n'), ((417, 534), 'OpenGL.platform.createFunction', '_p.createFunction', (['function', '_p.PLATFORM.GLES2', '"""GLES2_EXT_unpack_subimage"""'], {'error_checker': '_errors._error_checker'}), "(function, _p.PLATFORM.GLES2, 'GLES2_EXT_unpack_subimage',\n error_checker=_errors._error_checker)\n", (434, 534), True, 'from OpenGL import platform as _p, arrays\n')]
import numpy as np import cv2 # import faces CascadeClassifier face_cascade = cv2.CascadeClassifier("/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml") def find_marker(image): gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray,(5,5),0) edged = cv2.Canny(gray,35,125) #find the contours (cnts,hierarchy) = cv2.findContours(edged.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) c = max(cnts, key = cv2.contourArea) # compute the bounding box return cv2.minAreaRect(c) def distance_to_camera(knownWidth,focalLength,perWidth): return (knownWidth * focalLength) / perWidth def face_region(image): gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (30,30), flags = cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT) return faces # Initialize the known parameters KNOWN_DISTANCE = 35.0 KNOWN_WIDTH = 10.0 IMAGE_PATHS = ["images/1.jpg"] image = cv2.imread(IMAGE_PATHS[0]) face = face_region(image) focalLength = (face[0][2] * KNOWN_DISTANCE) / KNOWN_WIDTH # run camera cap = cv2.VideoCapture(0) cap.read() while True: ret,image = cap.read() #get image from camera faces = face_region(image) #mark the region with rectangle for (x,y,w,h) in faces: cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2) distance = distance_to_camera(KNOWN_WIDTH,focalLength,w) cv2.putText(image, "%.2fcm" % (distance), ( x+ 1/2 * w, y + h + 50), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 2) cv2.imshow("image", image) if cv2.waitKey(5)%0x100 == 27: break cap.release() cv2.destroyAllWindows()
[ "cv2.minAreaRect", "cv2.GaussianBlur", "cv2.Canny", "cv2.putText", "cv2.cvtColor", "cv2.waitKey", "cv2.imshow", "cv2.VideoCapture", "cv2.imread", "cv2.rectangle", "cv2.CascadeClassifier", "cv2.destroyAllWindows" ]
[((79, 181), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"""'], {}), "(\n '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\n", (100, 181), False, 'import cv2\n'), ((1149, 1175), 'cv2.imread', 'cv2.imread', (['IMAGE_PATHS[0]'], {}), '(IMAGE_PATHS[0])\n', (1159, 1175), False, 'import cv2\n'), ((1280, 1299), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1296, 1299), False, 'import cv2\n'), ((1806, 1829), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1827, 1829), False, 'import cv2\n'), ((211, 250), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (223, 250), False, 'import cv2\n'), ((259, 292), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (275, 292), False, 'import cv2\n'), ((300, 324), 'cv2.Canny', 'cv2.Canny', (['gray', '(35)', '(125)'], {}), '(gray, 35, 125)\n', (309, 324), False, 'import cv2\n'), ((514, 532), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (529, 532), False, 'import cv2\n'), ((673, 712), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (685, 712), False, 'import cv2\n'), ((1722, 1748), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (1732, 1748), False, 'import cv2\n'), ((1467, 1527), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (1480, 1527), False, 'import cv2\n'), ((1582, 1702), 'cv2.putText', 'cv2.putText', (['image', "('%.2fcm' % distance)", '(x + 1 / 2 * w, y + h + 50)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2.0)', '(0, 255, 0)', '(2)'], {}), "(image, '%.2fcm' % distance, (x + 1 / 2 * w, y + h + 50), cv2.\n FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 2)\n", (1593, 1702), False, 'import cv2\n'), ((1754, 1768), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (1765, 1768), False, 'import cv2\n')]
""" Hackernews data via Dogsheep [[hacker-news-to-sqlite][https://github.com/dogsheep/hacker-news-to-sqlite]] """ from __future__ import annotations from dataclasses import dataclass from datetime import datetime from typing import Iterator, Sequence, Optional, Dict from my.config import hackernews as user_config from ..core import Paths @dataclass class config(user_config.dogsheep): # paths[s]/glob to the dogsheep database export_path: Paths # todo so much boilerplate... really need some common wildcard imports?... # at least for stuff which realistically is used in each module like get_files/Sequence/Paths/dataclass/Iterator/Optional from ..core import get_files from pathlib import Path def inputs() -> Sequence[Path]: return get_files(config.export_path) from .common import hackernews_link # TODO not sure if worth splitting into Comment and Story? @dataclass(unsafe_hash=True) class Item: id: str type: str # TODO is it urc?? created: datetime title: Optional[str] # only present for Story text_html: Optional[str] # should be present for Comment and might for Story url: Optional[str] # might be present for Story # todo process 'deleted'? fields? # todo process 'parent'? @property def permalink(self) -> str: return hackernews_link(self.id) from ..core.error import Res from ..core.dataset import connect_readonly def items() -> Iterator[Res[Item]]: f = max(inputs()) with connect_readonly(f) as db: items = db['items'] for r in items.all(order_by='time'): yield Item( id=r['id'], type=r['type'], created=datetime.fromtimestamp(r['time']), title=r['title'], # todo hmm maybe a method to stip off html tags would be nice text_html=r['text'], url=r['url'], )
[ "dataclasses.dataclass", "datetime.datetime.fromtimestamp" ]
[((885, 912), 'dataclasses.dataclass', 'dataclass', ([], {'unsafe_hash': '(True)'}), '(unsafe_hash=True)\n', (894, 912), False, 'from dataclasses import dataclass\n'), ((1684, 1717), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["r['time']"], {}), "(r['time'])\n", (1706, 1717), False, 'from datetime import datetime\n')]
# Generated by Django 3.0.3 on 2020-11-10 07:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('structure', '0032_structure_author_state'), ] operations = [ migrations.RemoveField( model_name='structuremodelrmsd', name='TM_all', ), migrations.RemoveField( model_name='structuremodelrmsd', name='date', ), migrations.RemoveField( model_name='structuremodelrmsd', name='overall_all', ), migrations.RemoveField( model_name='structuremodelrmsd', name='overall_backbone', ), migrations.RemoveField( model_name='structuremodelrmsd', name='pdb', ), migrations.RemoveField( model_name='structuremodelrmsd', name='service', ), migrations.RemoveField( model_name='structuremodelrmsd', name='version', ), migrations.AddField( model_name='structuremodelrmsd', name='ECL1', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='ECL2', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='H8', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='ICL1', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='ICL2', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='binding_pocket', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AddField( model_name='structuremodelrmsd', name='main_template', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='main_template', to='structure.Structure'), preserve_default=False, ), migrations.AddField( model_name='structuremodelrmsd', name='target_structure', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='target_structure', to='structure.Structure'), preserve_default=False, ), migrations.AlterField( model_name='structuremodelrmsd', name='TM_backbone', field=models.DecimalField(decimal_places=1, max_digits=2, null=True), ), migrations.AlterField( model_name='structuremodelrmsd', name='homology_model', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.StructureModel'), ), ]
[ "django.db.migrations.RemoveField", "django.db.models.DecimalField", "django.db.models.ForeignKey" ]
[((274, 344), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""TM_all"""'}), "(model_name='structuremodelrmsd', name='TM_all')\n", (296, 344), False, 'from django.db import migrations, models\n'), ((389, 457), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""date"""'}), "(model_name='structuremodelrmsd', name='date')\n", (411, 457), False, 'from django.db import migrations, models\n'), ((502, 577), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""overall_all"""'}), "(model_name='structuremodelrmsd', name='overall_all')\n", (524, 577), False, 'from django.db import migrations, models\n'), ((622, 707), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""overall_backbone"""'}), "(model_name='structuremodelrmsd', name='overall_backbone'\n )\n", (644, 707), False, 'from django.db import migrations, models\n'), ((747, 814), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""pdb"""'}), "(model_name='structuremodelrmsd', name='pdb')\n", (769, 814), False, 'from django.db import migrations, models\n'), ((859, 930), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""service"""'}), "(model_name='structuremodelrmsd', name='service')\n", (881, 930), False, 'from django.db import migrations, models\n'), ((975, 1046), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""structuremodelrmsd"""', 'name': '"""version"""'}), "(model_name='structuremodelrmsd', name='version')\n", (997, 1046), False, 'from django.db import migrations, models\n'), ((1200, 1262), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (1219, 1262), False, 'from django.db import migrations, models\n'), ((1392, 1454), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (1411, 1454), False, 'from django.db import migrations, models\n'), ((1582, 1644), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (1601, 1644), False, 'from django.db import migrations, models\n'), ((1774, 1836), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (1793, 1836), False, 'from django.db import migrations, models\n'), ((1966, 2028), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (1985, 2028), False, 'from django.db import migrations, models\n'), ((2168, 2230), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (2187, 2230), False, 'from django.db import migrations, models\n'), ((2369, 2505), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': 'None', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""main_template"""', 'to': '"""structure.Structure"""'}), "(default=None, on_delete=django.db.models.deletion.CASCADE,\n related_name='main_template', to='structure.Structure')\n", (2386, 2505), False, 'from django.db import migrations, models\n'), ((2679, 2818), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': 'None', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""target_structure"""', 'to': '"""structure.Structure"""'}), "(default=None, on_delete=django.db.models.deletion.CASCADE,\n related_name='target_structure', to='structure.Structure')\n", (2696, 2818), False, 'from django.db import migrations, models\n'), ((2989, 3051), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(decimal_places=1, max_digits=2, null=True)\n', (3008, 3051), False, 'from django.db import migrations, models\n'), ((3193, 3301), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""structure.StructureModel"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='structure.StructureModel')\n", (3210, 3301), False, 'from django.db import migrations, models\n')]
## UnitUtil ## ## Utilities for dealing with Civ4 Units and their related objects. ## ## Copyright (c) 2008 The BUG Mod. ## ## Author: EmperorFool from CvPythonExtensions import * import BugUtil import PlayerUtil # BUG - Mac Support - start BugUtil.fixSets(globals()) # BUG - Mac Support - end gc = CyGlobalContext() NUM_UNITS = 0 NUM_CLASSES = 0 NUM_OR_BONUSES = 0 NUM_AND_TECHS = 0 TRAIN_CAN = 0 TRAIN_CANNOT = 1 TRAIN_LACK_BONUS = 2 # units that don't require any resources to build unitsWithoutBonuses = set() # units that require at least one resource to build unitsWithBonuses = set() # bonuses that are required to build at least one unit strategicBonuses = set() # unit ID -> set of tech IDs unitTechs = dict() # unit ID -> tuple ( set of units which can upgrade to it, # set of units to which it can be upgraded ) # e.g. Bowman -> ( (Archer), # (Longbowman, Crossbowman, Rifleman, ...) ) genericUnits = set() genericUnitIDs = dict() upgradeUnits = dict() genericUpgradeUnits = dict() olderUnits = dict() newerUnits = dict() def init(): """ Segregates units into two sets: those that require resources and those that don't. Creates a map of units from ID to the set of tech prerequisites. """ global NUM_UNITS, NUM_CLASSES NUM_UNITS = gc.getNumUnitInfos() NUM_CLASSES = gc.getNumUnitClassInfos() global NUM_OR_BONUSES, NUM_AND_TECHS NUM_OR_BONUSES = gc.getNUM_UNIT_PREREQ_OR_BONUSES() NUM_AND_TECHS = gc.getNUM_UNIT_AND_TECH_PREREQS() for eUnit in range(NUM_UNITS): unitInfo = gc.getUnitInfo(eUnit) BugUtil.debug("==== %s ====", unitInfo.getDescription()) # generic unit classInfo = gc.getUnitClassInfo(unitInfo.getUnitClassType()) eGenericUnit = classInfo.getDefaultUnitIndex() genericUnitIDs[eUnit] = eGenericUnit if eUnit == eGenericUnit: genericUnits.add(eUnit) else: BugUtil.debug(" unique of %s", gc.getUnitInfo(eGenericUnit).getDescription()) # resource sets found = False eBonus = unitInfo.getPrereqAndBonus() if eBonus != -1: found = True strategicBonuses.add(eBonus) BugUtil.debug(" requires %s", gc.getBonusInfo(eBonus).getDescription()) for i in range(NUM_OR_BONUSES): eBonus = unitInfo.getPrereqOrBonuses(i) if eBonus != -1: found = True strategicBonuses.add(eBonus) BugUtil.debug(" requires %s", gc.getBonusInfo(eBonus).getDescription()) if found: unitsWithBonuses.add(eUnit) else: unitsWithoutBonuses.add(eUnit) # tech map techs = set() unitTechs[eUnit] = techs eTech = unitInfo.getPrereqAndTech() if eTech != -1: techs.add(eTech) for i in range(NUM_AND_TECHS): eTech = unitInfo.getPrereqAndTechs(i) if eTech != -1: techs.add(eTech) for eTech in techs: BugUtil.debug(" requires %s", gc.getTechInfo(eTech).getDescription()) # upgrade maps for eUnit in range(NUM_UNITS): getOlderUnits(eUnit) getNewerUnits(eUnit) initOrders() def unitInfos(): """Iterates through all CvUnitInfos.""" for eUnit in range(NUM_UNITS): yield gc.getUnitInfo(eUnit) def unitClassInfos(): """Iterates through all CvUnitClassInfos.""" for eClass in range(NUM_CLASSES): yield gc.getUnitClassInfo(eClass) def isGeneric(eUnit): """Returns True if the given unit is generic and valid.""" return eUnit in genericUnits def isUnique(eUnit): """Returns True if the given unit is unique and valid.""" return eUnit not in genericUnits and eUnit != -1 def getGeneric(eUnit): """ Returns the generic unit counterpart to the given unique unit or the same unit if it's not unique. """ return genericUnitIDs[eUnit] def getGenerics(units): """ Returns a set of the generic units which are counterparts to the unique units in the given set. Generic units in the set are not returned. """ generics = set() for eUnit in units: eGenericUnit = genericUnitIDs[eUnit] if eGenericUnit != eUnit: generics.add(eGenericUnit) return generics def getGenericUpgrades(eUnit): """Returns the set of all generic units to which eUnit can upgrade.""" return genericUpgradeUnits[eUnit] def getUpgrades(eUnit): """Returns the set of all units to which eUnit can upgrade.""" return upgradeUnits[eUnit] def getOlderUnits(eUnit): """Returns the set of all units from which eUnit can upgrade.""" if eUnit in olderUnits: return olderUnits[eUnit] unitInfo = gc.getUnitInfo(eUnit) eClass = unitInfo.getUnitClassType() units = set() olderUnits[eUnit] = units for eOldUnit in range(NUM_UNITS): oldUnitInfo = gc.getUnitInfo(eOldUnit) if oldUnitInfo.getUpgradeUnitClass(eClass): #BugUtil.debug("%s -> %s", oldUnitInfo.getDescription(), unitInfo.getDescription()) units.add(eOldUnit) units |= getOlderUnits(eOldUnit) return units def getNewerUnits(eUnit): """Returns the set of all units to which eUnit can upgrade.""" if eUnit in newerUnits: return newerUnits[eUnit] unitInfo = gc.getUnitInfo(eUnit) upgrades = set() upgradeUnits[eUnit] = upgrades genericUpgrades = set() genericUpgradeUnits[eUnit] = genericUpgrades newer = set() newerUnits[eUnit] = newer for eNewUnit in range(NUM_UNITS): newUnitInfo = gc.getUnitInfo(eNewUnit) if unitInfo.getUpgradeUnitClass(newUnitInfo.getUnitClassType()): #BugUtil.debug("%s -> %s", unitInfo.getDescription(), newUnitInfo.getDescription()) upgrades.add(eNewUnit) if isGeneric(eNewUnit): genericUpgrades.add(eNewUnit) newer.add(eNewUnit) newer |= getNewerUnits(eNewUnit) return newer def isUnitOrUpgradeInSet(eUnit, units): """ Returns True if eUnit is in the given set of units or can be upgraded to at least one unit in it. """ return eUnit in units or len(getNewerUnits(eUnit) & units) > 0 def areUpgradesInSet(eUnit, units): """ Returns True if every immediate upgrade of eUnit is in the given set. This ignores transitive upgrades (Warrior doesn't check for Macemen). Need to take UUs into consideration. """ upgrades = getUpgrades(eUnit) return upgrades <= units def replaceUniqueUnits(units): """ Replaces unique units with their generic counterparts in the given set. """ uniques = set() generics = set() for eUnit in units: if isUnique(eUnit): uniques.add(eUnit) generics.add(getGeneric(eUnit)) units -= uniques units += generics def findObsoleteUnits(units): """ Returns a set containing the units whose immediate upgrades are all in the set, taking unique units into consideration. For example, if the set contains Maceman and Redcoat, neither is returned, but if it contains Grenadier as well, Maceman is returned. """ #result = units.copy() generics = getGenerics(units) obsoletes = set() for eUnit in units: # unitInfo = gc.getUnitInfo(eUnit) upgrades = getGenericUpgrades(eUnit) if upgrades: for eUpgradeUnit in upgrades: if eUpgradeUnit not in units and eUpgradeUnit not in generics: # BugUtil.debug("findObsoleteUnits - %s, cannot build %s", # unitInfo.getDescription(), # gc.getUnitInfo(eUpgradeUnit).getDescription()) break else: obsoletes.add(eUnit) return obsoletes def getKnownUnits(playerOrID): player, team = PlayerUtil.getPlayerAndTeam(playerOrID) units = set() for eUnit in range(NUM_UNITS): for eTech in unitTechs[eUnit]: if team.isHasTech(eTech): units.add(eUnit) return units def getKnowableUnits(playerOrID): player, team = PlayerUtil.getPlayerAndTeam(playerOrID) units = set() for eUnit in range(NUM_UNITS): for eTech in unitTechs[eUnit]: if not (team.isHasTech(eTech) or player.canResearch(eTech, False)): break else: units.add(eUnit) return units def getTrainableUnits(playerOrID, knowableUnits, checkCities=True, military=None): """ Returns the set of all units the player can train, including obsolete ones. """ game = CyGame() player, team = PlayerUtil.getPlayerAndTeam(playerOrID) civInfo = gc.getCivilizationInfo(player.getCivilizationType()) if checkCities: cities = PlayerUtil.getPlayerCities(player) else: cities = None units = set() BugUtil.debug("%s =========", player.getCivilizationShortDescription(0)) for eClass in range(NUM_CLASSES): eUnit = civInfo.getCivilizationUnits(eClass) if eUnit == -1 or eUnit not in knowableUnits: #BugUtil.debug(" %s -> unknowable", gc.getUnitClassInfo(eClass).getDescription()) continue unitInfo = gc.getUnitInfo(eUnit) # military if military is not None: combat = (unitInfo.getUnitCombatType() > 0 or unitInfo.getNukeRange() != -1 or unitInfo.getAirCombat() > 0) if military != combat: #BugUtil.debug(" %s -> combat is %s", unitInfo.getDescription(), combat) continue # OCC and Settlers if game.isOption(GameOptionTypes.GAMEOPTION_ONE_CITY_CHALLENGE) and unitInfo.isFound(): BugUtil.debug(" %s -> no founding units in OCC", unitInfo.getDescription()) continue # techs for eTech in unitTechs[eUnit]: if not team.isHasTech(eTech): BugUtil.debug(" %s -> doesn't know %s", unitInfo.getDescription(), gc.getTechInfo(eTech).getDescription()) missing = True break else: missing = False if missing: continue # state religion eReligion = unitInfo.getStateReligion() if eReligion != -1 and player.getStateReligion() != eReligion: BugUtil.debug(" %s -> wrong state religion", unitInfo.getDescription()) continue # nukes if (game.isNoNukes() or not game.isNukesValid()) and unitInfo.getNukeRange() != -1: BugUtil.debug(" %s -> no nukes", unitInfo.getDescription()) continue # getSpecialUnitType, game.isSpecialUnitValid eSpecialType = unitInfo.getSpecialUnitType() if eSpecialType != -1 and not game.isSpecialUnitValid(eSpecialType): BugUtil.debug(" %s -> special unit type %s invalid", unitInfo.getDescription(), gc.getSpecialUnitInfo(eSpecialType).getDescription()) continue # cities if cities and not canAnyCityBuildUnit(eUnit, cities, -1, True): BugUtil.debug(" %s -> no city can train unit", unitInfo.getDescription()) continue BugUtil.debug(" %s", unitInfo.getDescription()) units.add(eUnit) return units def getTrainableAndUntrainableUnits(playerOrID, knowableUnits, military=None): player, team = PlayerUtil.getPlayerAndTeam(playerOrID) cities = PlayerUtil.getPlayerCities(player) # separate units into two groups: yes and no units = getTrainableUnits(playerOrID, knowableUnits, False, military) yesUnits = set() noUnits = set() BugUtil.debug("-----------------------") for eUnit in units: if canAnyCityBuildUnit(eUnit, cities, -1, True): BugUtil.debug(" yes %s", gc.getUnitInfo(eUnit).getDescription()) yesUnits.add(eUnit) else: BugUtil.debug(" no %s", gc.getUnitInfo(eUnit).getDescription()) noUnits.add(eUnit) return yesUnits, noUnits def getKnownTrainableUnits(playerOrID, askingPlayerOrID, knowableUnits, bonuses, military=None): player, team = PlayerUtil.getPlayerAndTeam(playerOrID) askingPlayer = PlayerUtil.getPlayer(askingPlayerOrID) eAskingTeam, askingTeam = PlayerUtil.getPlayerTeamAndID(askingPlayer) #trade = player.canTradeNetworkWith(askingPlayer.getID()) cities = PlayerUtil.getPlayerCities(player, lambda city: city.isRevealed(eAskingTeam, False)) # separate units into two groups: yes and maybe units = getTrainableUnits(playerOrID, knowableUnits, False, military) yesUnits = set() maybeUnits = set() BugUtil.debug("-----------------------") for eUnit in units: if not canAnyCityBuildUnit(eUnit, cities, eAskingTeam, False): BugUtil.debug(" no %s", gc.getUnitInfo(eUnit).getDescription()) elif hasBonusesForUnit(eUnit, bonuses): BugUtil.debug(" yes %s", gc.getUnitInfo(eUnit).getDescription()) yesUnits.add(eUnit) elif bonuses is None: BugUtil.debug(" maybe %s", gc.getUnitInfo(eUnit).getDescription()) maybeUnits.add(eUnit) return yesUnits, maybeUnits def hasBonusesForUnit(eUnit, bonuses): if eUnit not in unitsWithBonuses: return True if not bonuses: return False unitInfo = gc.getUnitInfo(eUnit) eBonus = unitInfo.getPrereqAndBonus() if eBonus != -1 and eBonus not in bonuses: return False requiresBonus = False for i in range(NUM_OR_BONUSES): eBonus = unitInfo.getPrereqOrBonuses(i) if eBonus != -1: requiresBonus = True if eBonus in bonuses: break else: if requiresBonus: return False return True def canAnyCityBuildUnit(eUnit, cities=None, askingTeamOrID=-1, checkBonuses=True): eAskingTeam = PlayerUtil.getTeamID(askingTeamOrID) unitInfo = gc.getUnitInfo(eUnit) if cities: for city in cities: if canCityBuildUnit(unitInfo, city, eAskingTeam, checkBonuses): return True return False else: return canCityBuildUnit(unitInfo, None, eAskingTeam, checkBonuses) def canCityBuildUnit(unitInfo, city, eAskingTeam, checkBonuses=True): # religion if unitInfo.isPrereqReligion(): if not city or city.getReligionCount() > 0: # EF: Seems odd to enforce NO religions in the city, # but this is how CvPlot.canTrain() does it. # The function should actually be called isPrereqNoReligion(). return False eReligion = unitInfo.getPrereqReligion() if eReligion != -1 and not (city and city.isHasReligion(eReligion)): return False # corporation eCorp = unitInfo.getPrereqCorporation() if eCorp != -1 and not (city and city.isActiveCorporation(eCorp)): return False # skipping isPrereqBonuses as the land part looks broken # and we don't want to limit work boats if all resources are covered # domain if unitInfo.getDomainType() == DomainTypes.DOMAIN_SEA: if not (city and PlayerUtil.isSaltWaterPort(city, eAskingTeam)): return False # EF: this is how CyPlot does it #plot = city.plot() #if not plot.isWater() or not plot.isCoastalLand(unitInfo.getMinAreaSize()): # continue else: minArea = unitInfo.getMinAreaSize() if minArea != -1: if eAskingTeam != -1 or not city or city.plot().area().getNumTiles() < minArea: return False # holy city eReligion = unitInfo.getHolyCity() if eReligion != -1 and not (city and city.isHolyCity(eReligion)): return False # building eBuilding = unitInfo.getPrereqBuilding() if eBuilding != -1: if eAskingTeam != -1 or not city: return False if city.getNumBuilding(eBuilding) == 0: eSpecialBuilding = gc.getBuildingInfo(eBuilding).getSpecialBuildingType() if eSpecialBuilding == -1 or not gc.getPlayer(city.getOwner()).isSpecialBuildingNotRequired(eSpecialBuilding): return False # resources if checkBonuses and not cityHasBonusesForUnit(unitInfo, city): return False # passes all tests for this city return True def cityHasBonusesForUnit(unitInfo, city): if not city: return False eBonus = unitInfo.getPrereqAndBonus() if eBonus != -1 and not city.hasBonus(eBonus): return False requiresBonus = False for i in range(NUM_OR_BONUSES): eBonus = unitInfo.getPrereqOrBonuses(i) if eBonus != -1: requiresBonus = True if city.hasBonus(eBonus): break else: if requiresBonus: return False return True def getCanTrainUnits(playerOrID, askingPlayerOrID=None, military=None): """ Returns the set of all units the player can train. Searches all of the player's cities to find which units can be trained. If askingPlayerOrID is given, only cities they have seen are checked, and only units whose prerequisite techs they know or can research are returned. Also, if the two players' trade networks are not connected, units that require resources to train are returned in a second set. If military is provided, only military or civilian units are checked depending on its value, True or False, respectively. *** OBSOLETE *** """ player, team = PlayerUtil.getPlayerAndTeam(playerOrID) askingPlayer = PlayerUtil.getPlayer(askingPlayerOrID) if askingPlayer: eAskingTeam, askingTeam = PlayerUtil.getPlayerTeamAndID(askingPlayer) trade = player.canTradeNetworkWith(askingPlayer.getID()) civInfo = gc.getCivilizationInfo(player.getCivilizationType()) units = set() maybeUnits = set() for eClass in range(NUM_CLASSES): eUnit = civInfo.getCivilizationUnits(eClass) if eUnit == -1: classInfo = gc.getUnitClassInfo(eClass) BugUtil.debug("%s doesn't have %s", civInfo.getDescription(), classInfo.getDescription()) eUnit = classInfo.getDefaultUnitIndex() unitInfo = gc.getUnitInfo(eUnit) if unitInfo: if ((military == True and unitInfo.getUnitCombatType() <= 0) or (military == False and unitInfo.getUnitCombatType() > 0)): BugUtil.debug("skipping (non-)military %s", unitInfo.getDescription()) continue if askingPlayer: for eTech in unitTechs[eUnit]: if not (askingTeam.isHasTech(eTech) or askingPlayer.canResearch(eTech, False)): BugUtil.debug("%s doesn't comprehend %s", askingPlayer.getCivilizationShortDescription(0), gc.getTechInfo(eTech).getDescription()) skip = True break else: skip = False if skip: BugUtil.debug("skipping unknowable %s", unitInfo.getDescription()) continue for city in PlayerUtil.playerCities(player): if askingPlayer: if not city.isRevealed(eAskingTeam, False): continue if city.canTrain(eUnit, False, not trade): if eUnit in unitsWithBonuses: maybeUnits.add(eUnit) else: units.add(eUnit) break else: if city.canTrain(eUnit, False, False): units.add(eUnit) break BugUtil.debug("%s can train:", player.getCivilizationShortDescription(0)) for eUnit in units: unitInfo = gc.getUnitInfo(eUnit) BugUtil.debug(" %s", unitInfo.getDescription()) if askingPlayer: BugUtil.debug("%s can maybe train:", player.getCivilizationShortDescription(0)) for eUnit in maybeUnits: unitInfo = gc.getUnitInfo(eUnit) BugUtil.debug(" %s", unitInfo.getDescription()) return units, maybeUnits else: return units ( ORDER_NONE, ORDER_SKIP, ORDER_SLEEP, ORDER_FORTIFY, ORDER_HEAL, ORDER_SENTRY, ORDER_INTERCEPT, ORDER_PATROL, ORDER_PLUNDER, ORDER_BUILD, # improvement ORDER_CONSTRUCT, # building ORDER_GOTO, ORDER_EXPLORE, ORDER_AUTO_BUILD, ORDER_AUTO_NETWORK, ORDER_AUTO_CITY, ORDER_AUTO_RELIGION, ) = range(17) ORDERS_BY_ACTIVITY = { ActivityTypes.ACTIVITY_AWAKE: ORDER_NONE, ActivityTypes.ACTIVITY_INTERCEPT: ORDER_INTERCEPT, ActivityTypes.ACTIVITY_PATROL: ORDER_PATROL, ActivityTypes.ACTIVITY_PLUNDER: ORDER_PLUNDER, ActivityTypes.ACTIVITY_HEAL: ORDER_HEAL, ActivityTypes.ACTIVITY_SENTRY: ORDER_SENTRY, ActivityTypes.ACTIVITY_HOLD: ORDER_SKIP, } ORDERS_BY_AUTOMATION = { AutomateTypes.AUTOMATE_EXPLORE: ORDER_EXPLORE, AutomateTypes.AUTOMATE_BUILD: ORDER_AUTO_BUILD, AutomateTypes.AUTOMATE_NETWORK: ORDER_AUTO_NETWORK, AutomateTypes.AUTOMATE_CITY: ORDER_AUTO_CITY, AutomateTypes.AUTOMATE_RELIGION: ORDER_AUTO_RELIGION, } MOVE_TO_MISSIONS = [ MissionTypes.MISSION_MOVE_TO, MissionTypes.MISSION_MOVE_TO_UNIT, ] def getOrder(unit): group = unit.getGroup() eActivityType = group.getActivityType() if eActivityType in ORDERS_BY_ACTIVITY: return ORDERS_BY_ACTIVITY[eActivityType] eAutomationType = group.getAutomateType() if eAutomationType in ORDERS_BY_AUTOMATION: return ORDERS_BY_AUTOMATION[eAutomationType] if (group.getLengthMissionQueue() > 0): # TODO: loop to find the first non-goto and check in ORDERS_BY_MISSION eMissionType = group.getMissionType(0) if eMissionType == MissionTypes.MISSION_BUILD: return ORDER_BUILD elif eMissionType in MOVE_TO_MISSIONS: return ORDER_GOTO elif (unit.isWaiting()): if (unit.isFortifyable()): return ORDER_FORTIFY else: return ORDER_SLEEP return ORDER_NONE def initOrders(): """ Adds orders added by BULL. """ try: ORDERS_BY_ACTIVITY[ActivityTypes.ACTIVITY_SENTRY_WHILE_HEAL] = ORDER_HEAL ORDERS_BY_ACTIVITY[ActivityTypes.ACTIVITY_SENTRY_LAND_UNITS] = ORDER_SENTRY ORDERS_BY_ACTIVITY[ActivityTypes.ACTIVITY_SENTRY_NAVAL_UNITS] = ORDER_SENTRY MOVE_TO_MISSIONS.append(MissionTypes.MISSION_MOVE_TO_SENTRY) except: pass
[ "PlayerUtil.playerCities", "BugUtil.debug", "PlayerUtil.getPlayerCities", "PlayerUtil.getPlayerTeamAndID", "PlayerUtil.getPlayerAndTeam", "PlayerUtil.getTeamID", "PlayerUtil.isSaltWaterPort", "PlayerUtil.getPlayer" ]
[((7353, 7392), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (7380, 7392), False, 'import PlayerUtil\n'), ((7597, 7636), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (7624, 7636), False, 'import PlayerUtil\n'), ((8059, 8098), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (8086, 8098), False, 'import PlayerUtil\n'), ((10468, 10507), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (10495, 10507), False, 'import PlayerUtil\n'), ((10519, 10553), 'PlayerUtil.getPlayerCities', 'PlayerUtil.getPlayerCities', (['player'], {}), '(player)\n', (10545, 10553), False, 'import PlayerUtil\n'), ((10712, 10752), 'BugUtil.debug', 'BugUtil.debug', (['"""-----------------------"""'], {}), "('-----------------------')\n", (10725, 10752), False, 'import BugUtil\n'), ((11167, 11206), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (11194, 11206), False, 'import PlayerUtil\n'), ((11224, 11262), 'PlayerUtil.getPlayer', 'PlayerUtil.getPlayer', (['askingPlayerOrID'], {}), '(askingPlayerOrID)\n', (11244, 11262), False, 'import PlayerUtil\n'), ((11291, 11334), 'PlayerUtil.getPlayerTeamAndID', 'PlayerUtil.getPlayerTeamAndID', (['askingPlayer'], {}), '(askingPlayer)\n', (11320, 11334), False, 'import PlayerUtil\n'), ((11660, 11700), 'BugUtil.debug', 'BugUtil.debug', (['"""-----------------------"""'], {}), "('-----------------------')\n", (11673, 11700), False, 'import BugUtil\n'), ((12761, 12797), 'PlayerUtil.getTeamID', 'PlayerUtil.getTeamID', (['askingTeamOrID'], {}), '(askingTeamOrID)\n', (12781, 12797), False, 'import PlayerUtil\n'), ((16064, 16103), 'PlayerUtil.getPlayerAndTeam', 'PlayerUtil.getPlayerAndTeam', (['playerOrID'], {}), '(playerOrID)\n', (16091, 16103), False, 'import PlayerUtil\n'), ((16121, 16159), 'PlayerUtil.getPlayer', 'PlayerUtil.getPlayer', (['askingPlayerOrID'], {}), '(askingPlayerOrID)\n', (16141, 16159), False, 'import PlayerUtil\n'), ((8194, 8228), 'PlayerUtil.getPlayerCities', 'PlayerUtil.getPlayerCities', (['player'], {}), '(player)\n', (8220, 8228), False, 'import PlayerUtil\n'), ((16208, 16251), 'PlayerUtil.getPlayerTeamAndID', 'PlayerUtil.getPlayerTeamAndID', (['askingPlayer'], {}), '(askingPlayer)\n', (16237, 16251), False, 'import PlayerUtil\n'), ((17433, 17464), 'PlayerUtil.playerCities', 'PlayerUtil.playerCities', (['player'], {}), '(player)\n', (17456, 17464), False, 'import PlayerUtil\n'), ((13905, 13950), 'PlayerUtil.isSaltWaterPort', 'PlayerUtil.isSaltWaterPort', (['city', 'eAskingTeam'], {}), '(city, eAskingTeam)\n', (13931, 13950), False, 'import PlayerUtil\n')]
import json import pandas as pd from werkzeug.security import generate_password_hash, check_password_hash import sqlalchemy as sa from api.sqlalchemy import Base class User(Base): __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) email = sa.Column(sa.String(100)) password = sa.Column(sa.String) username = sa.Column(sa.String(100)) api_key = sa.Column(sa.String) first_name = sa.Column(sa.String(100)) last_name = sa.Column(sa.String(100)) # relationship for group access access_groups = sa.orm.relationship("AccessLink", back_populates="user") # relationship for user owned - legacy transition code ftms_data = sa.orm.relationship('FTMS_Data', backref='user', lazy='dynamic') ftms_parameters = sa.orm.relationship('FTMS_Parameters', backref='user', lazy='dynamic') ftms_results = sa.orm.relationship('FTMS_Result', backref='user', lazy='dynamic') gcms_data = sa.orm.relationship('GCMS_Data', backref='user', lazy='dynamic') gcms_parameters = sa.orm.relationship('GCMS_Parameters', backref='user', lazy='dynamic') gcms_results = sa.orm.relationship('GCMS_Result', backref='user', lazy='dynamic') def __repr__(self): return '<User {}>'.format(self.first_name) def set_password(self, password): self.password = generate_password_hash(password, method='<PASSWORD>') def check_password(self, password): return check_password_hash(self.password, password) class AccessGroup(Base): __tablename__ = 'accessGroup' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(100), unique=True) users = sa.orm.relationship("AccessLink", back_populates="access_group") ftms_all_data = sa.orm.relationship("FTMS_DataAccessLink", back_populates="access_group") gcms_all_data = sa.orm.relationship("GCMS_DataAccessLink", back_populates="access_group") def __repr__(self): return '<AccessGroup, name: {}, id: {}>'.format(self.name, self.id) class AccessLink(Base): __tablename__ = 'accessLink' __table_args__ = (sa.sql.schema.UniqueConstraint('access_id', 'user_id', name='unique_access_link'), ) access_id = sa.Column(sa.Integer, sa.ForeignKey('accessGroup.id'), primary_key=True) user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'), primary_key=True) user = sa.orm.relationship("User", back_populates="access_groups") access_group = sa.orm.relationship("AccessGroup", back_populates="users")
[ "sqlalchemy.sql.schema.UniqueConstraint", "sqlalchemy.ForeignKey", "sqlalchemy.orm.relationship", "werkzeug.security.check_password_hash", "sqlalchemy.Column", "sqlalchemy.String", "werkzeug.security.generate_password_hash" ]
[((221, 260), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)'}), '(sa.Integer, primary_key=True)\n', (230, 260), True, 'import sqlalchemy as sa\n'), ((314, 334), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (323, 334), True, 'import sqlalchemy as sa\n'), ((390, 410), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (399, 410), True, 'import sqlalchemy as sa\n'), ((553, 609), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""AccessLink"""'], {'back_populates': '"""user"""'}), "('AccessLink', back_populates='user')\n", (572, 609), True, 'import sqlalchemy as sa\n'), ((686, 750), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""FTMS_Data"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('FTMS_Data', backref='user', lazy='dynamic')\n", (705, 750), True, 'import sqlalchemy as sa\n'), ((773, 843), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""FTMS_Parameters"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('FTMS_Parameters', backref='user', lazy='dynamic')\n", (792, 843), True, 'import sqlalchemy as sa\n'), ((863, 929), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""FTMS_Result"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('FTMS_Result', backref='user', lazy='dynamic')\n", (882, 929), True, 'import sqlalchemy as sa\n'), ((947, 1011), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""GCMS_Data"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('GCMS_Data', backref='user', lazy='dynamic')\n", (966, 1011), True, 'import sqlalchemy as sa\n'), ((1034, 1104), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""GCMS_Parameters"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('GCMS_Parameters', backref='user', lazy='dynamic')\n", (1053, 1104), True, 'import sqlalchemy as sa\n'), ((1124, 1190), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""GCMS_Result"""'], {'backref': '"""user"""', 'lazy': '"""dynamic"""'}), "('GCMS_Result', backref='user', lazy='dynamic')\n", (1143, 1190), True, 'import sqlalchemy as sa\n'), ((1556, 1595), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)'}), '(sa.Integer, primary_key=True)\n', (1565, 1595), True, 'import sqlalchemy as sa\n'), ((1660, 1724), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""AccessLink"""'], {'back_populates': '"""access_group"""'}), "('AccessLink', back_populates='access_group')\n", (1679, 1724), True, 'import sqlalchemy as sa\n'), ((1746, 1819), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""FTMS_DataAccessLink"""'], {'back_populates': '"""access_group"""'}), "('FTMS_DataAccessLink', back_populates='access_group')\n", (1765, 1819), True, 'import sqlalchemy as sa\n'), ((1841, 1914), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""GCMS_DataAccessLink"""'], {'back_populates': '"""access_group"""'}), "('GCMS_DataAccessLink', back_populates='access_group')\n", (1860, 1914), True, 'import sqlalchemy as sa\n'), ((2364, 2423), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""User"""'], {'back_populates': '"""access_groups"""'}), "('User', back_populates='access_groups')\n", (2383, 2423), True, 'import sqlalchemy as sa\n'), ((2443, 2501), 'sqlalchemy.orm.relationship', 'sa.orm.relationship', (['"""AccessGroup"""'], {'back_populates': '"""users"""'}), "('AccessGroup', back_populates='users')\n", (2462, 2501), True, 'import sqlalchemy as sa\n'), ((283, 297), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (292, 297), True, 'import sqlalchemy as sa\n'), ((360, 374), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (369, 374), True, 'import sqlalchemy as sa\n'), ((438, 452), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (447, 452), True, 'import sqlalchemy as sa\n'), ((480, 494), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (489, 494), True, 'import sqlalchemy as sa\n'), ((1330, 1383), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {'method': '"""<PASSWORD>"""'}), "(password, method='<PASSWORD>')\n", (1352, 1383), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((1440, 1484), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password', 'password'], {}), '(self.password, password)\n', (1459, 1484), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((1618, 1632), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (1627, 1632), True, 'import sqlalchemy as sa\n'), ((2097, 2183), 'sqlalchemy.sql.schema.UniqueConstraint', 'sa.sql.schema.UniqueConstraint', (['"""access_id"""', '"""user_id"""'], {'name': '"""unique_access_link"""'}), "('access_id', 'user_id', name=\n 'unique_access_link')\n", (2127, 2183), True, 'import sqlalchemy as sa\n'), ((2221, 2252), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""accessGroup.id"""'], {}), "('accessGroup.id')\n", (2234, 2252), True, 'import sqlalchemy as sa\n'), ((2308, 2332), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (2321, 2332), True, 'import sqlalchemy as sa\n')]
""" class for running sqs message launcher Created December 22nd, 2016 @author: <NAME> @version: 0.1.0 @license: Apache """ # ================ # start imports # ================ import json import logging import os import boto3 import boto3.session # ================ # start class # ================ sqs_logger = logging.getLogger('sqs_listener') class SqsLauncher(object): def __init__(self, queue=None, queue_url=None, create_queue=False, visibility_timeout='600'): """ :param queue: (str) name of queue to listen to :param queue_url: (str) url of queue to listen to :param create_queue (boolean) determines whether to create the queue if it doesn't exist. If False, an Exception will be raised if the queue doesn't already exist :param visibility_timeout: (str) Relevant to queue creation. Indicates the number of seconds for which the SQS will hide the message. Typically this should reflect the maximum amount of time your handler method will take to finish execution. See http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html for more information """ if not queue and not queue_url: raise ValueError('Either `queue` or `queue_url` should be provided.') if (not os.environ.get('AWS_ACCOUNT_ID', None) and not (boto3.Session().get_credentials().method in ['iam-role', 'assume-role'])): raise EnvironmentError('Environment variable `AWS_ACCOUNT_ID` not set and no role found.') # new session for each instantiation self._session = boto3.session.Session() self._client = self._session.client('sqs') self._queue_name = queue self._queue_url = queue_url if not queue_url: queues = self._client.list_queues(QueueNamePrefix=self._queue_name) exists = False for q in queues.get('QueueUrls', []): qname = q.split('/')[-1] if qname == self._queue_name: exists = True self._queue_url = q if not exists: if create_queue: q = self._client.create_queue( QueueName=self._queue_name, Attributes={ 'VisibilityTimeout': visibility_timeout # 10 minutes } ) self._queue_url = q['QueueUrl'] else: raise ValueError('No queue found with name ' + self._queue_name) else: self._queue_name = self._get_queue_name_from_url(queue_url) def launch_message(self, message, **kwargs): """ sends a message to the queue specified in the constructor :param message: (dict) :param kwargs: additional optional keyword arguments (DelaySeconds, MessageAttributes, MessageDeduplicationId, or MessageGroupId) See http://boto3.readthedocs.io/en/latest/reference/services/sqs.html#SQS.Client.send_message for more information :return: (dict) the message response from SQS """ sqs_logger.info("Sending message to queue " + self._queue_name) if not kwargs: return self._client.send_message( QueueUrl=self._queue_url, MessageBody=json.dumps(message) ) return self._client.send_message( QueueUrl=self._queue_url, MessageBody=json.dumps(message), **kwargs ) def _get_queue_name_from_url(self, url): return url.split('/')[-1]
[ "boto3.Session", "json.dumps", "os.environ.get", "boto3.session.Session", "logging.getLogger" ]
[((320, 353), 'logging.getLogger', 'logging.getLogger', (['"""sqs_listener"""'], {}), "('sqs_listener')\n", (337, 353), False, 'import logging\n'), ((1762, 1785), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (1783, 1785), False, 'import boto3\n'), ((1451, 1489), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCOUNT_ID"""', 'None'], {}), "('AWS_ACCOUNT_ID', None)\n", (1465, 1489), False, 'import os\n'), ((3678, 3697), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (3688, 3697), False, 'import json\n'), ((3540, 3559), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (3550, 3559), False, 'import json\n'), ((1515, 1530), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (1528, 1530), False, 'import boto3\n')]
# Generated by Django 2.2.6 on 2019-10-29 11:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('neigh1', '0029_auto_20191029_1436'), ] operations = [ migrations.DeleteModel( name='NeighborhoodPost', ), ]
[ "django.db.migrations.DeleteModel" ]
[((226, 273), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""NeighborhoodPost"""'}), "(name='NeighborhoodPost')\n", (248, 273), False, 'from django.db import migrations\n')]
# # Copyright (c) 2017-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # """ Tests for the API /certificate_install/delete methods. """ import json import mock import os import sys import uuid as UUID from cryptography import x509 from cryptography.hazmat.backends import default_backend from six.moves import http_client from sysinv.api.controllers.v1 import certificate as cert_api from sysinv.common import constants from sysinv.tests.api import base from sysinv.tests.db import utils as dbutils SKIP_PYTHON_VERSIONS = {'RFC_6125': [(3, 9)]} def check_skip_test(test_reference): # In Python 3.9 versus Python 3.6 RFC 6125 got handling improvements # in the STDLIB. Check _dnsname_match implementation. versions = SKIP_PYTHON_VERSIONS['RFC_6125'] runtime_version = sys.version_info[:2] if (runtime_version[0], runtime_version[1]) in versions: test_reference.skipTest("Skipping SAN tests not aligning to RFC 6125, " "section 6.4.3 in Python {}.{}" "".format(runtime_version[0], runtime_version[1])) class FakeConductorAPI(object): def __init__(self): self.config_certificate = self.fake_config_certificate self.delete_certificate = mock.MagicMock() self.config_certificate_return = None self.platcert_k8s_secret_value = False def fake_config_certificate(self, context, pem, config_dict): return self.config_certificate_return def setup_config_certificate(self, data): self.config_certificate_return = data def update_admin_ep_certificate(self, context): return True class CertificateTestCase(base.FunctionalTest): def setUp(self): super(CertificateTestCase, self).setUp() def test_check_cert_dns_name_valid_SAN(self): # This certificate contains # CN: *.vbox.local # DNS: *.vbox.local certfile = os.path.join(os.path.dirname(__file__), "data", 'cert-with-key-SAN.pem') with open(certfile, 'rb') as f: pem_contents = f.read() cert = x509.load_pem_x509_certificate(pem_contents, default_backend()) result = cert_api._check_cert_dns_name(cert, 'vbox.local') self.assertTrue(result) result = cert_api._check_cert_dns_name(cert, 'domain.org') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'lab.vbox.local') self.assertIn("doesn't match", str(result)) def test_check_cert_dns_name_invalid_SAN(self): # This certificate contains # CN: *.vbox.local # DNS:*.*.vbox.local, DNS:bad.*.vbox.local check_skip_test(self) certfile = os.path.join(os.path.dirname(__file__), "data", 'cert-with-key-invalidDNS.pem') with open(certfile, 'rb') as f: pem_contents = f.read() cert = x509.load_pem_x509_certificate(pem_contents, default_backend()) result = cert_api._check_cert_dns_name(cert, 'vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'a.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'a.b.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'bad.b.vbox.local') self.assertIn("doesn't match", str(result)) def test_check_cert_dns_name_CN_only(self): # This certificate contains CN:*.vbox.local certfile = os.path.join(os.path.dirname(__file__), "data", 'cert-with-key-CNnoSAN.pem') with open(certfile, 'rb') as f: pem_contents = f.read() cert = x509.load_pem_x509_certificate(pem_contents, default_backend()) result = cert_api._check_cert_dns_name(cert, 'vbox.local') self.assertTrue(result) result = cert_api._check_cert_dns_name(cert, 'a.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'a.b.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'bad.b.vbox.local') self.assertIn("doesn't match", str(result)) def test_check_cert_dns_name_multi_SAN(self): # This certificate contains # CN: *.vbox.local # DNS: *.vbox.local, bad.*.vbox.local, *.example.com check_skip_test(self) certfile = os.path.join(os.path.dirname(__file__), "data", 'cert-with-key-multiSAN.pem') with open(certfile, 'rb') as f: pem_contents = f.read() cert = x509.load_pem_x509_certificate(pem_contents, default_backend()) result = cert_api._check_cert_dns_name(cert, 'vbox.local') self.assertTrue(result) # domain matches one of the DNS names, but not the CN result = cert_api._check_cert_dns_name(cert, 'example.com') self.assertTrue(result) result = cert_api._check_cert_dns_name(cert, 'a.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'x.example.com') self.assertIn("doesn't match", str(result)) def test_check_cert_dns_name_CN_differ_SAN(self): # This certificate contains # CN: *.vbox.local # DNS: bad.*.vbox.local, *.example.com check_skip_test(self) certfile = os.path.join(os.path.dirname(__file__), "data", 'cert-with-key-CNdifferSAN.pem') with open(certfile, 'rb') as f: pem_contents = f.read() cert = x509.load_pem_x509_certificate(pem_contents, default_backend()) # domain matches CN, but does not match any of the DNS names result = cert_api._check_cert_dns_name(cert, 'vbox.local') self.assertIn("doesn't match", str(result)) # domain matches one of the DNS names, but not the CN result = cert_api._check_cert_dns_name(cert, 'example.com') self.assertTrue(result) result = cert_api._check_cert_dns_name(cert, 'a.vbox.local') self.assertIn("doesn't match", str(result)) result = cert_api._check_cert_dns_name(cert, 'x.example.com') self.assertIn("doesn't match", str(result)) class ApiCertificateTestCaseMixin(object): # API_HEADERS are a generic header passed to most API calls API_HEADERS = {'User-Agent': 'sysinv-test'} # API_PREFIX is the prefix for the URL API_PREFIX = '/certificate' # RESULT_KEY is the python table key for the list of results RESULT_KEY = 'certificates' # COMMON_FIELD is a field that is known to exist for inputs and outputs COMMON_FIELD = 'certificates' # expected_api_fields are attributes that should be populated by # an API query expected_api_fields = ['uuid'] # hidden_api_fields are attributes that should not be populated by # an API query hidden_api_fields = [] def setUp(self): super(ApiCertificateTestCaseMixin, self).setUp() self.fake_conductor_api = FakeConductorAPI() p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') self.mock_conductor_api = p.start() self.mock_conductor_api.return_value = self.fake_conductor_api self.addCleanup(p.stop) def get_single_url(self, uuid): return '%s/%s' % (self.API_PREFIX, uuid) def _create_db_object(self, obj_id=None): return dbutils.create_test_certificate( id=obj_id, certtype='ssl_ca', signature='ssl_ca_123456789') @staticmethod def extract_certs_from_pem_file(certfile): """ extract certificates from a X509 PEM file """ marker = b'-----BEGIN CERTIFICATE-----' with open(certfile, 'rb') as f: pem_contents = f.read() start = 0 certs = [] while True: index = pem_contents.find(marker, start) if index == -1: break cert = x509.load_pem_x509_certificate(pem_contents[index::], default_backend()) certs.append(cert) start = index + len(marker) return certs @staticmethod def get_cert_signature(mode, cert): signature = mode + '_' + str(cert.serial_number) if len(signature) > 255: signature = signature[:255] return signature class ApiCertificatePostTestSuite(ApiCertificateTestCaseMixin, base.FunctionalTest): """ Certificate post operations """ def setUp(self): super(ApiCertificatePostTestSuite, self).setUp() self.create_test_isystem() # Mock the KubeOperator self.kube_get_secret_result = None def mock_kube_get_secret(obj, name, namespace): return self.kube_get_secret_result self.mocked_kube_get_secret = mock.patch( 'sysinv.common.kubernetes.KubeOperator.kube_get_secret', mock_kube_get_secret) self.mocked_kube_get_secret.start() self.addCleanup(self.mocked_kube_get_secret.stop) def create_test_isystem(self): return dbutils.create_test_isystem(capabilities={'https_enabled': True}) # Test successful POST operation to install 1 CA certificate def test_install_one_CA_certificate(self): mode = 'ssl_ca' certfile = os.path.join(os.path.dirname(__file__), "data", 'ca-cert-one-cert.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for in_cert in in_certs: fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=False) self.assertEqual(response.status_code, http_client.OK) resp = json.loads(response.body) self.assertIn('certificates', resp) ret_certs = resp.get('certificates') self.assertEqual(len(in_certs), len(ret_certs)) for ret_cert in ret_certs: self.assertIn('certtype', ret_cert) self.assertEqual(ret_cert.get('certtype'), mode) self.assertIn('signature', ret_cert) self.assertIn('start_date', ret_cert) self.assertIn('expiry_date', ret_cert) found_match = False for in_cert in in_certs: ret_cert_start_date = str(ret_cert.get('start_date')) ret_cert_start_date = ret_cert_start_date.replace('+00:00', '') ret_cert_expiry_date = str(ret_cert.get('expiry_date')) ret_cert_expiry_date = \ ret_cert_expiry_date.replace('+00:00', '') if ret_cert.get('signature') == \ self.get_cert_signature(mode, in_cert) and \ ret_cert_start_date == \ str(in_cert.not_valid_before) and \ ret_cert_expiry_date == \ str(in_cert.not_valid_after): found_match = True self.assertTrue(found_match) def test_renew_certificate(self): certtype = constants.CERTIFICATE_TYPE_ADMIN_ENDPOINT data = {'certtype': certtype} response = self.post_json('%s/%s' % (self.API_PREFIX, 'renew_certificate'), data, headers=self.API_HEADERS, expect_errors=True) self.assertTrue(response) # Test successful POST operation to install 2 CA certificate def test_install_two_CA_certificate(self): mode = 'ssl_ca' certfile = os.path.join(os.path.dirname(__file__), "data", 'ca-cert-two-certs.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for in_cert in in_certs: fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=False) self.assertEqual(response.status_code, http_client.OK) resp = json.loads(response.body) self.assertIn('certificates', resp) ret_certs = resp.get('certificates') self.assertEqual(len(in_certs), len(ret_certs)) for ret_cert in ret_certs: self.assertIn('certtype', ret_cert) self.assertEqual(ret_cert.get('certtype'), mode) self.assertIn('signature', ret_cert) self.assertIn('start_date', ret_cert) self.assertIn('expiry_date', ret_cert) found_match = False for in_cert in in_certs: ret_cert_start_date = str(ret_cert.get('start_date')) ret_cert_start_date = ret_cert_start_date.replace('+00:00', '') ret_cert_expiry_date = str(ret_cert.get('expiry_date')) ret_cert_expiry_date = \ ret_cert_expiry_date.replace('+00:00', '') if ret_cert.get('signature') == \ self.get_cert_signature(mode, in_cert) and \ ret_cert_start_date == \ str(in_cert.not_valid_before) and \ ret_cert_expiry_date == \ str(in_cert.not_valid_after): found_match = True self.assertTrue(found_match) # Test successful POST operation to install ssl certificate signed by # intermediate CA def test_install_2xcert_1xkey_ssl_certificate(self): mode = 'ssl' certfile = os.path.join(os.path.dirname(__file__), "data", 'ssl-cert-2xcert-1xkey-with-key.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for index, in_cert in enumerate(in_certs): is_ca = False if index == 0 else True fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after, 'is_ca': is_ca}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=False) self.assertEqual(response.status_code, http_client.OK) resp = json.loads(response.body) self.assertIn('certificates', resp) ret_certs = resp.get('certificates') # The installed cert contains the server cert and the intermediate # CA cert but the API returns only the server cert, which should match # the server cert in the cert file (the first one). self.assertEqual(len(ret_certs), 1) ret_cert = ret_certs[0] in_cert = in_certs[0] self.assertIn('certtype', ret_cert) self.assertEqual(ret_cert.get('certtype'), mode) self.assertIn('signature', ret_cert) self.assertIn('start_date', ret_cert) self.assertIn('expiry_date', ret_cert) ret_cert_start_date = str(ret_cert.get('start_date')) ret_cert_start_date = ret_cert_start_date.replace('+00:00', '') ret_cert_expiry_date = str(ret_cert.get('expiry_date')) ret_cert_expiry_date = ret_cert_expiry_date.replace('+00:00', '') found_match = False if ret_cert.get('signature') == \ self.get_cert_signature(mode, in_cert) and \ ret_cert_start_date == \ str(in_cert.not_valid_before) and \ ret_cert_expiry_date == \ str(in_cert.not_valid_after): found_match = True self.assertTrue(found_match) # Test POST operation to install ssl certificate signed by intermediate CA, # but the server cert and intermediate cert in the file is in wrong order. def test_install_2xcert_1xkey_ssl_certificate_wrong_order(self): mode = 'ssl' certfile = os.path.join(os.path.dirname(__file__), "data", 'ssl-cert-2xcert-1xkey-with-key-wrong-order.pem') data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=True) self.assertTrue(response.body) resp = json.loads(response.body) self.assertTrue(resp.get('error')) fault_string_expected = 'The first cert in the file should not be a ' \ 'CA cert' self.assertIn(fault_string_expected, str(resp.get('error'))) # Test successful POST operation to install docker_registry certificate # signed by intermediate CA def test_install_2xcert_1xkey_docker_registry_certificate(self): mode = 'docker_registry' certfile = os.path.join(os.path.dirname(__file__), "data", 'docker_registry-cert-2xcert-1xkey-with-key.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for index, in_cert in enumerate(in_certs): is_ca = False if index == 0 else True fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after, 'is_ca': is_ca}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=False) self.assertEqual(response.status_code, http_client.OK) resp = json.loads(response.body) self.assertIn('certificates', resp) ret_certs = resp.get('certificates') # The installed cert contains the server cert and the intermediate # CA cert but the API returns only the server cert, which should match # the server cert in the cert file (the first one). self.assertEqual(len(ret_certs), 1) ret_cert = ret_certs[0] in_cert = in_certs[0] self.assertIn('certtype', ret_cert) self.assertEqual(ret_cert.get('certtype'), mode) self.assertIn('signature', ret_cert) self.assertIn('start_date', ret_cert) self.assertIn('expiry_date', ret_cert) ret_cert_start_date = str(ret_cert.get('start_date')) ret_cert_start_date = ret_cert_start_date.replace('+00:00', '') ret_cert_expiry_date = str(ret_cert.get('expiry_date')) ret_cert_expiry_date = ret_cert_expiry_date.replace('+00:00', '') found_match = False if ret_cert.get('signature') == \ self.get_cert_signature(mode, in_cert) and \ ret_cert_start_date == \ str(in_cert.not_valid_before) and \ ret_cert_expiry_date == \ str(in_cert.not_valid_after): found_match = True self.assertTrue(found_match) # Test POST operation to install docker_registry certificate signed by # intermediate CA, but the server cert and intermediate cert in the file # is in wrong order. def test_install_2xcert_1xkey_docker_registry_certificate_wrong_order(self): mode = 'docker_registry' certfile = os.path.join(os.path.dirname(__file__), "data", 'docker_registry-cert-2xcert-1xkey-with-key-wrong-order.pem') data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=True) self.assertTrue(response.body) resp = json.loads(response.body) self.assertTrue(resp.get('error')) fault_string_expected = 'The first cert in the file should not be a ' \ 'CA cert' self.assertIn(fault_string_expected, str(resp.get('error'))) # Test failed installation of ssl certificate managed by cert-manager def test_force_failure_install_ssl_certificate(self): self.force_failure_install_certificate(constants.CERT_MODE_SSL) # Test failed installation of docker_registry certificate managed by cert-manager def test_force_failure_install_docker_registry_certificate(self): self.force_failure_install_certificate(constants.CERT_MODE_DOCKER_REGISTRY) def force_failure_install_certificate(self, mode): certfile = os.path.join(os.path.dirname(__file__), "data", 'ssl-cert-2xcert-1xkey-with-key.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for index, in_cert in enumerate(in_certs): is_ca = False if index == 0 else True fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after, 'is_ca': is_ca}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) # Set k8s_secret value to True (mark it as being managed by cert-manager) self.kube_get_secret_result = 'true' # Default behavior (force=false) should fail data = {'mode': mode} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=True) self.assertEqual(response.status_code, http_client.OK) self.assertTrue(response.body) resp = json.loads(response.body) self.assertTrue(resp.get('error')) fault_err_msg = "Certificate is currently being managed by cert-manager" self.assertIn(fault_err_msg, str(resp.get('error'))) # Test successful forced installation of ssl certificate managed by cert-manager def test_force_success_install_ssl_certificate(self): self.force_success_install_certificate(constants.CERT_MODE_SSL) # Test successful forced installation of docker_registry certificate managed by cert-manager def test_force_success_install_docker_registry_certificate(self): self.force_success_install_certificate(constants.CERT_MODE_DOCKER_REGISTRY) def force_success_install_certificate(self, mode): certfile = os.path.join(os.path.dirname(__file__), "data", 'ssl-cert-2xcert-1xkey-with-key.pem') in_certs = self.extract_certs_from_pem_file(certfile) fake_config_certificate_return = [] for index, in_cert in enumerate(in_certs): is_ca = False if index == 0 else True fake_config_certificate_return.append( {'signature': self.get_cert_signature(mode, in_cert), 'not_valid_before': in_cert.not_valid_before, 'not_valid_after': in_cert.not_valid_after, 'is_ca': is_ca}) self.fake_conductor_api.\ setup_config_certificate(fake_config_certificate_return) # Set k8s_secret value to True (mark it as being managed by cert-manager) self.kube_get_secret_result = 'true' data = {'mode': mode, 'force': 'true'} files = [('file', certfile)] response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'), data, upload_files=files, headers=self.API_HEADERS, expect_errors=True) self.assertEqual(response.status_code, http_client.OK) resp = json.loads(response.body) self.assertIn('certificates', resp) class ApiCertificateDeleteTestSuite(ApiCertificateTestCaseMixin, base.FunctionalTest): """ Certificate delete operations """ def setUp(self): super(ApiCertificateDeleteTestSuite, self).setUp() self.delete_object = self._create_db_object() # Test successful CA certficate DELETE operation def test_delete_ca_certificate(self): uuid = self.delete_object.uuid certtype = self.delete_object.certtype signature = self.delete_object.signature response = self.delete(self.get_single_url(uuid), headers=self.API_HEADERS, expect_errors=False) self.assertEqual(response.status_code, http_client.OK) self.assertTrue(response.body) resp = json.loads(response.body) self.assertIn('uuid', resp) self.assertEqual(uuid, resp.get('uuid')) self.assertIn('certtype', resp) self.assertEqual(certtype, resp.get('certtype')) self.assertIn('signature', resp) self.assertEqual(signature, resp.get('signature')) # Test CA certficate DELETE operation, no certificate found def test_delete_ca_certificate_not_found(self): uuid = UUID.uuid4() response = self.delete(self.get_single_url(uuid), headers=self.API_HEADERS, expect_errors=True) self.assertEqual(response.status_code, http_client.BAD_REQUEST) self.assertTrue(response.body) resp = json.loads(response.body) self.assertTrue(resp.get('error_message')) fault_string_expected = 'No certificate found for %s' % uuid self.assertIn(fault_string_expected, str(resp.get('error_message')))
[ "uuid.uuid4", "sysinv.api.controllers.v1.certificate._check_cert_dns_name", "sysinv.tests.db.utils.create_test_isystem", "json.loads", "os.path.dirname", "mock.patch", "sysinv.tests.db.utils.create_test_certificate", "mock.MagicMock", "cryptography.hazmat.backends.default_backend" ]
[((1318, 1334), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1332, 1334), False, 'import mock\n'), ((2313, 2362), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""vbox.local"""'], {}), "(cert, 'vbox.local')\n", (2342, 2362), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((2413, 2462), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""domain.org"""'], {}), "(cert, 'domain.org')\n", (2442, 2462), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((2533, 2586), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""lab.vbox.local"""'], {}), "(cert, 'lab.vbox.local')\n", (2562, 2586), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((3187, 3236), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""vbox.local"""'], {}), "(cert, 'vbox.local')\n", (3216, 3236), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((3307, 3358), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.vbox.local"""'], {}), "(cert, 'a.vbox.local')\n", (3336, 3358), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((3429, 3482), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.b.vbox.local"""'], {}), "(cert, 'a.b.vbox.local')\n", (3458, 3482), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((3553, 3608), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""bad.b.vbox.local"""'], {}), "(cert, 'bad.b.vbox.local')\n", (3582, 3608), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((4109, 4158), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""vbox.local"""'], {}), "(cert, 'vbox.local')\n", (4138, 4158), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((4209, 4260), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.vbox.local"""'], {}), "(cert, 'a.vbox.local')\n", (4238, 4260), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((4331, 4384), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.b.vbox.local"""'], {}), "(cert, 'a.b.vbox.local')\n", (4360, 4384), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((4455, 4510), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""bad.b.vbox.local"""'], {}), "(cert, 'bad.b.vbox.local')\n", (4484, 4510), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((5117, 5166), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""vbox.local"""'], {}), "(cert, 'vbox.local')\n", (5146, 5166), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((5279, 5329), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""example.com"""'], {}), "(cert, 'example.com')\n", (5308, 5329), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((5380, 5431), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.vbox.local"""'], {}), "(cert, 'a.vbox.local')\n", (5409, 5431), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((5502, 5554), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""x.example.com"""'], {}), "(cert, 'x.example.com')\n", (5531, 5554), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((6223, 6272), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""vbox.local"""'], {}), "(cert, 'vbox.local')\n", (6252, 6272), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((6405, 6455), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""example.com"""'], {}), "(cert, 'example.com')\n", (6434, 6455), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((6506, 6557), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""a.vbox.local"""'], {}), "(cert, 'a.vbox.local')\n", (6535, 6557), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((6628, 6680), 'sysinv.api.controllers.v1.certificate._check_cert_dns_name', 'cert_api._check_cert_dns_name', (['cert', '"""x.example.com"""'], {}), "(cert, 'x.example.com')\n", (6657, 6680), True, 'from sysinv.api.controllers.v1 import certificate as cert_api\n'), ((7563, 7613), 'mock.patch', 'mock.patch', (['"""sysinv.conductor.rpcapi.ConductorAPI"""'], {}), "('sysinv.conductor.rpcapi.ConductorAPI')\n", (7573, 7613), False, 'import mock\n'), ((7909, 8005), 'sysinv.tests.db.utils.create_test_certificate', 'dbutils.create_test_certificate', ([], {'id': 'obj_id', 'certtype': '"""ssl_ca"""', 'signature': '"""ssl_ca_123456789"""'}), "(id=obj_id, certtype='ssl_ca', signature=\n 'ssl_ca_123456789')\n", (7940, 8005), True, 'from sysinv.tests.db import utils as dbutils\n'), ((9414, 9507), 'mock.patch', 'mock.patch', (['"""sysinv.common.kubernetes.KubeOperator.kube_get_secret"""', 'mock_kube_get_secret'], {}), "('sysinv.common.kubernetes.KubeOperator.kube_get_secret',\n mock_kube_get_secret)\n", (9424, 9507), False, 'import mock\n'), ((9682, 9747), 'sysinv.tests.db.utils.create_test_isystem', 'dbutils.create_test_isystem', ([], {'capabilities': "{'https_enabled': True}"}), "(capabilities={'https_enabled': True})\n", (9709, 9747), True, 'from sysinv.tests.db import utils as dbutils\n'), ((10969, 10994), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (10979, 10994), False, 'import json\n'), ((13967, 13992), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (13977, 13992), False, 'import json\n'), ((16621, 16646), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (16631, 16646), False, 'import json\n'), ((18772, 18797), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (18782, 18797), False, 'import json\n'), ((20462, 20487), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (20472, 20487), False, 'import json\n'), ((22667, 22692), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (22677, 22692), False, 'import json\n'), ((24855, 24880), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (24865, 24880), False, 'import json\n'), ((26941, 26966), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (26951, 26966), False, 'import json\n'), ((27832, 27857), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (27842, 27857), False, 'import json\n'), ((28272, 28284), 'uuid.uuid4', 'UUID.uuid4', ([], {}), '()\n', (28282, 28284), True, 'import uuid as UUID\n'), ((28578, 28603), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (28588, 28603), False, 'import json\n'), ((2002, 2027), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2017, 2027), False, 'import os\n'), ((2276, 2293), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2291, 2293), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2869, 2894), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2884, 2894), False, 'import os\n'), ((3150, 3167), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (3165, 3167), False, 'from cryptography.hazmat.backends import default_backend\n'), ((3794, 3819), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3809, 3819), False, 'import os\n'), ((4072, 4089), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (4087, 4089), False, 'from cryptography.hazmat.backends import default_backend\n'), ((4801, 4826), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4816, 4826), False, 'import os\n'), ((5080, 5097), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (5095, 5097), False, 'from cryptography.hazmat.backends import default_backend\n'), ((5835, 5860), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5850, 5860), False, 'import os\n'), ((6117, 6134), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (6132, 6134), False, 'from cryptography.hazmat.backends import default_backend\n'), ((9917, 9942), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9932, 9942), False, 'import os\n'), ((12839, 12864), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12854, 12864), False, 'import os\n'), ((15446, 15471), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15461, 15471), False, 'import os\n'), ((18232, 18257), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18247, 18257), False, 'import os\n'), ((19275, 19300), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (19290, 19300), False, 'import os\n'), ((22115, 22140), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (22130, 22140), False, 'import os\n'), ((23461, 23486), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (23476, 23486), False, 'import os\n'), ((25622, 25647), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (25637, 25647), False, 'import os\n'), ((8585, 8602), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (8600, 8602), False, 'from cryptography.hazmat.backends import default_backend\n')]
# -*- coding: UTF8 -*- ''' Web status database unit tests @author: <NAME> @version: 1.1 ''' import unittest from database.utils.configuration import DBConfigurator from database.systemStatusDB.systemStatusDBReader import SystemStatusDatabaseReader from database.systemStatusDB.systemStatusDBWriter import SystemStatusDatabaseWriter class Test(unittest.TestCase): def setUp(self): dbConfigurator = DBConfigurator("") dbConfigurator.runSQLScript("SystemStatusDBTest", "./SystemStatusDBTest.sql") dbConfigurator.addUser("website", "cygnuscloud", "SystemStatusDBTest", False) dbConfigurator.addUser("statusDBUpdater", "cygnuscloud", "SystemStatusDBTest", True) self.__reader = SystemStatusDatabaseReader("website", "cygnuscloud", "SystemStatusDBTest") self.__reader.connect() self.__writer = SystemStatusDatabaseWriter("statusDBUpdater", "cygnuscloud", "SystemStatusDBTest") self.__writer.connect() def tearDown(self): self.__reader.disconnect() self.__writer.disconnect() dbConfigurator = DBConfigurator("") dbConfigurator.dropDatabase("SystemStatusDBTest") def testUpdateVMServerData(self): segment1Data = [('Server1', 'Ready', 'IP1', 1)] segment2Data = [('Server2', 'Booting', 'IP2', 1)] self.__writer.processVMServerSegment(1, 2, segment1Data) serversData = self.__reader.getVMServersData() self.assertEquals(serversData, [], "processVMServerSegment does not work") self.__writer.processVMServerSegment(2, 2, segment2Data) serversData = self.__reader.getVMServersData() d2 = {"VMServerName":"Server2", "VMServerStatus":"Booting", "VMServerIP":"IP2", "VMServerListenningPort":1} d1 = {"VMServerName":"Server1", "VMServerStatus":"Ready", "VMServerIP":"IP1", "VMServerListenningPort":1} self.assertEquals(serversData, [d1,d2], "processVMServerSegment does not work") segment3Data = [("Server3", "Ready", "IP3", 1)] self.__writer.processVMServerSegment(1, 1, segment3Data) serversData = self.__reader.getVMServersData() d3 = {"VMServerName":"Server3", "VMServerStatus":"Ready", "VMServerIP":"IP3", "VMServerListenningPort":1} self.assertEquals(serversData, [d3], "processVMServerSegment does not work") segment4Data =[("Server3", "Ready", "IP3", 1), ("Server4", "Ready", "IP4", 1)] self.__writer.processVMServerSegment(1, 1, segment4Data) serversData = self.__reader.getVMServersData() self.assertEquals(serversData, [d3], "processVMServerSegment does not work") def testUpdateVMDistributionData(self): segment1Data = [('Server1', 1), ('Server1', 2), ('Server1', 3)] segment2Data = [('Server2', 1), ('Server2', 4), ('Server2', 5)] self.__writer.processVMDistributionSegment(1, 2, segment1Data) serversData = self.__reader.getVMServersData() self.assertEquals(serversData, [], "processVMDistributionSegment does not work") self.__writer.processVMDistributionSegment(2, 2, segment2Data) serversData = self.__reader.getVMDistributionData() d1 = {"VMServerName":"Server1", "VMID":1} d2 = {"VMServerName":"Server1", "VMID":2} d3 = {"VMServerName":"Server1", "VMID":3} d4 = {"VMServerName":"Server2", "VMID":1} d5 = {"VMServerName":"Server2", "VMID":4} d6 = {"VMServerName":"Server2", "VMID":5} self.assertEquals(serversData, [d1,d2,d3,d4,d5,d6], "processVMDistributionSegment does not work") segment3Data = [('Server4', 10)] self.__writer.processVMDistributionSegment(1, 1, segment3Data) serversData = self.__reader.getVMDistributionData() d1 = {"VMServerName":"Server4", "VMID":10} self.assertEquals(serversData, [d1], "processVMDistributionSegment does not work") def testUpdateActiveVMData(self): segment1Data = [('Server1', 'Ready', 'IP1', 1)] self.__writer.processVMServerSegment(1, 1, segment1Data) segment1Data = [(1, 1, 'Debian1', 15800, 'Password')] segment2Data = [(2, 1, 'Debian1', 15802, 'Password')] self.__writer.processActiveVMSegment(1, 2, 'IP1', segment1Data) self.__writer.processActiveVMSegment(2, 2, 'IP1', segment2Data) result = self.__reader.getActiveVMsData(True) expectedResult = [ {"VMServerName" : "Server1", "UserID" : 2, "VMID" : 1, "VMName" : "Debian1", "VNCPort" : 15802, "VNCPassword" : "Password" }, {"VMServerName" : "Server1", "UserID" : 1, "VMID" : 1, "VMName" : "Debian1", "VNCPort" : 15800, "VNCPassword" : "Password" } ] self.assertEquals(result, expectedResult, "processVMServerSegment does not work") result = self.__reader.getActiveVMsData(True) expectedResult = [ {"VMServerName" : "Server1", "UserID" : 1, "VMID" : 1, "VMName" : "Debian1", "VNCPort" : 15800, "VNCPassword" : "Password" } ] self.assertEquals(result, expectedResult, "getActiveVMsData does not work") if __name__ == "__main__": unittest.main()
[ "unittest.main", "database.systemStatusDB.systemStatusDBWriter.SystemStatusDatabaseWriter", "database.systemStatusDB.systemStatusDBReader.SystemStatusDatabaseReader", "database.utils.configuration.DBConfigurator" ]
[((5166, 5181), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5179, 5181), False, 'import unittest\n'), ((418, 436), 'database.utils.configuration.DBConfigurator', 'DBConfigurator', (['""""""'], {}), "('')\n", (432, 436), False, 'from database.utils.configuration import DBConfigurator\n'), ((726, 800), 'database.systemStatusDB.systemStatusDBReader.SystemStatusDatabaseReader', 'SystemStatusDatabaseReader', (['"""website"""', '"""cygnuscloud"""', '"""SystemStatusDBTest"""'], {}), "('website', 'cygnuscloud', 'SystemStatusDBTest')\n", (752, 800), False, 'from database.systemStatusDB.systemStatusDBReader import SystemStatusDatabaseReader\n'), ((857, 943), 'database.systemStatusDB.systemStatusDBWriter.SystemStatusDatabaseWriter', 'SystemStatusDatabaseWriter', (['"""statusDBUpdater"""', '"""cygnuscloud"""', '"""SystemStatusDBTest"""'], {}), "('statusDBUpdater', 'cygnuscloud',\n 'SystemStatusDBTest')\n", (883, 943), False, 'from database.systemStatusDB.systemStatusDBWriter import SystemStatusDatabaseWriter\n'), ((1092, 1110), 'database.utils.configuration.DBConfigurator', 'DBConfigurator', (['""""""'], {}), "('')\n", (1106, 1110), False, 'from database.utils.configuration import DBConfigurator\n')]
#! /usr/bin/env python3 import itertools as it import re import sys from typing import Generator, Iterable, List, Match, Optional, Tuple test = False if len(sys.argv) > 1: if sys.argv[1] == "--test": test = True # Utilities def rematch(pattern: str, string: str) -> Optional[Match]: return re.fullmatch(pattern, string) def pbits(num: int, pad: int = 32) -> str: return bin(num)[2:].zfill(pad) def sizezip(*iterables: Iterable) -> Generator[Tuple, None, None]: """ Same as the :class:`zip` function, but verifies that the lengths of the :class:`list`s or :class:`set`s are the same. """ assert len(set(len(x) for x in iterables)) == 1 # type: ignore yield from zip(*iterables) # Input parsing with open("inputs/14.txt") as f: lines: List[str] = [l.strip() for l in f.readlines()] # Here's a bit of code that I wrote after solving to figure out the bounds of my input. max_mem_loc = 0 for l in lines: if rematch(r"mem\[(\d+)\] = (\d+)", l): max_mem_loc = max(max_mem_loc, int(rematch(r"mem\[(\d+)\] = \d+", l).group(1))) print("The maximum memory location was:", max_mem_loc) ######################################################################################## print(f"\n{'=' * 30}\n") print("Part 1:") def part1(): """ Part 1, I did some bit-hacking instead of doing it stringly typed which was a mistake as it screwed me for part 2, and it was also way more difficult than it was worth. I also need to be better at Python's built-in integer conversion libraries. """ # Using a dict to store memory because I don't want to figure out much memory to # pre-allocate. mem = {} # andmask and ormask start out as the identity for each of the corresponding # operations. # For any given bit, X & 1 = X # For any given bit, X | 0 = X andmask = 1 ormask = 0 for line in lines: if rematch("mask.*", line): # Updating the masks. andmask = 1 ormask = 0 # Go through all of the characters in the mask. for x in rematch("mask = (.*)", line).group(1): # First, binary left shift the andmask and ormask by one. This moves all # the bits over to the left by 1, leaving a 0 at the least-significant # bit (LSB). # This is equivalent to multiplying both of these by 2. andmask = andmask << 1 ormask = ormask << 1 if x != "0": # Either X or 1, we need to preserve the value, so set the andmask # LSB to 1, which is the identity. If it is 0, then we let it stay # because we want it to override the existing value. andmask |= 1 if x == "1": # If it's a 1, then set the ormask to 1, so that it overrides it. In # all other casse, we just leave the ormask as is since it is # already the identity. ormask |= 1 else: # Destructure the memory location and apply the andmask and ormask to the # value before setting it. loc, val = map(int, rematch(r"mem\[(\d+)\] = (\d+)", line).groups()) mem[loc] = val & andmask | ormask return sum(mem.values()) ans_part1 = part1() print(ans_part1) # Store the attempts that failed here. tries = [16073107098] print("Tries Part 1:", tries) assert ans_part1 not in tries, "Same as an incorrect answer!" # Regression Test assert test or ans_part1 == 6386593869035 ######################################################################################## print("\nPart 2:") def part2(): """ In this one, I converted to use stringly typed masks instead of the crazy bithacking that I did in Part 1. """ mem = {} max_count_x = 0 # this is just here to make my linter happy because it doesn't know that curmask is # going to be set on the first iteration of the loop. curmask = "" for line in lines: if rematch("mask.*", line): curmask = rematch("mask = (.*)", line).group(1) else: loc, val = map(int, rematch(r"mem\[(\d+)\] = (\d+)", line).groups()) # Pad the memory access location so that it is the same size as the curmask. # This allows me to zip and not loose values. access = pbits(loc, len(curmask)) # Compute the result bits as a string (this allows us to have the Xs). result_bits = "" for access_loc_bit, curmask_bit in sizezip(access, curmask): if curmask_bit == "0": result_bits += access_loc_bit elif curmask_bit == "1": result_bits += "1" else: result_bits += "X" # Compute all of the bit combos with itertools.product. This gives all of # the "permutations with replacement" of 0 and 1 of size N where N is the # number of Xs in the result. num_xs = result_bits.count("X") max_count_x = max(max_count_x, num_xs) for bit_combo in it.product("01", repeat=num_xs): # Compute the actual memory location to store the value to by going # through all of the bits in the result and if it's an X, using the # corresponding value in the bit_combo instead of X. real_loc = "" combo_idx = 0 for b in result_bits: if b in "01": real_loc += b else: real_loc += bit_combo[combo_idx] combo_idx += 1 # Interpret it as binary mem[real_loc] = val print( "Max number of Xs in any given string: {} so only ever {} tuples out of\n" " the call to `it.product`".format(max_count_x, 2 ** max_count_x) ) return sum(mem.values()) ans_part2 = part2() print(ans_part2) # Store the attempts that failed here. tries2 = [] print("Tries Part 2:", tries2) assert ans_part2 not in tries2, "Same as an incorrect answer!" # Regression Test assert test or ans_part2 == 4288986482164
[ "re.fullmatch", "itertools.product" ]
[((310, 339), 're.fullmatch', 're.fullmatch', (['pattern', 'string'], {}), '(pattern, string)\n', (322, 339), False, 'import re\n'), ((5258, 5289), 'itertools.product', 'it.product', (['"""01"""'], {'repeat': 'num_xs'}), "('01', repeat=num_xs)\n", (5268, 5289), True, 'import itertools as it\n')]
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, "README.md"), encoding = "utf-8") as f: long_description = f.read() setup( name = "synpp", version = "1.5.0", description = "Synthetic population pipeline package for eqasim", long_description = long_description, long_description_content_type = "text/markdown", url = "https://github.com/eqasim-org/synpp", author = "<NAME>", author_email = "<EMAIL>", keywords = "pipeline automation synthetic population dependency management transport", package_dir = { "": "src" }, packages = find_packages(where = "src"), python_requires='>=3.0', install_requires = ["networkx>=2.4", "PyYAML>=5.1.2", "pyzmq>=18.1.0"], extras_require = { "test": ["pytest>=5.3.1"], "example": ["pandas>=0.25.3"] }, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", ], )
[ "os.path.dirname", "os.path.join", "setuptools.find_packages" ]
[((85, 107), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'from os import path\n'), ((120, 148), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (129, 148), False, 'from os import path\n'), ((664, 690), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (677, 690), False, 'from setuptools import setup, find_packages\n')]
############################################################### # pytest -v --capture=no tests/test_storage_azure.py # pytest -v tests/test_storage_azure.py # pytest -v --capture=no tests/test_storage_azure..py::TestAzureStorage::<METHODNAME> ############################################################### import os from pathlib import Path from pprint import pprint from cloudmesh.common.Benchmark import Benchmark from cloudmesh.common.Shell import Shell from cloudmesh.common.StopWatch import StopWatch from cloudmesh.common.util import HEADING from cloudmesh.common.util import path_expand from cloudmesh.common.util import writefile print("this seems to be the same as in test_05_storage") print("I sugget to delete all tests that are covered " "by 05 and only include tests here that are unique") print("we exist now to makes usre this gets your attention") sys.exit() # cms set storage=parallelaws3 Benchmark.debug() user = Config()["cloudmesh.profile.user"] variables = Variables() VERBOSE(variables.dict()) service = variables.parameter('storage') print(f"Test run for {service}") if service is None: raise ValueError("storage is not set") provider = Provider(service=service) print('provider:', provider, provider.kind) @pytest.mark.incremental class TestStorageParallelaws3(object): def create_file(self, location, content): Shell.mkdir(os.dirname(path_expand(location))) writefile(location, content) # # BUG: Please use location /tmp just like in 03 test # def test_create_local_source(self): HEADING() StopWatch.start("create source") self.sourcedir = path_expand("~/.cloudmesh/storage/test/") self.create_local_file("~/.cloudmesh/storage/test/a/a.txt", "content of a") self.create_local_file("~/.cloudmesh/storage/test/a/b/b.txt", "content of b") self.create_local_file("~/.cloudmesh/storage/test/a/b/c/c.txt", "content of c") StopWatch.stop("create source") # test if the files are ok assert True def test_put(self): HEADING() # root="~/.cloudmesh" # src = "storage/test/a/a.txt" # src = f"local:{src}" # dst = f"aws:{src}" # test_file = self.p.put(src, dst) # src = "storage_a:test/a/a.txt" src = "~/.cloudmesh/storage/test/" dst = '/' StopWatch.start("put") test_file = provider.put(src, dst) StopWatch.stop("put") pprint(test_file) assert test_file is not None def test_put_recursive(self): HEADING() # root="~/.cloudmesh" # src = "storage/test/a/a.txt" # source = f"local:{src}" # destination = f"aws:{src}" # test_file = self.p.put(src, dst) # src = "storage_a:test/a/a.txt" src = "~/.cloudmesh/storage/test/" dst = '/' StopWatch.start("put") test_file = provider.put(src, dst, True) StopWatch.stop("put") pprint(test_file) assert test_file is not None def test_get(self): HEADING() src = "/a.txt" dst = "~/.cloudmesh/storage/test" StopWatch.start("get") file = provider.get(src, dst) StopWatch.stop("get") pprint(file) assert file is not None def test_list(self): HEADING() src = '/' StopWatch.start("list") contents = provider.list(src) StopWatch.stop("list") for c in contents: pprint(c) assert len(contents) > 0 def test_list_dir_only(self): HEADING() src = '/' dir = "a" StopWatch.start("list") contents = provider.list(src, dir, True) StopWatch.stop("list") for c in contents: pprint(c) assert len(contents) > 0 def test_search(self): HEADING() src = '/' filename = "a.txt" StopWatch.start("search") search_files = provider.search(src, filename, True) StopWatch.stop("search") pprint(search_files) assert len(search_files) > 0 # assert filename in search_files[0]['cm']["name"] def test_create_dir(self): HEADING() src = 'created_dir' StopWatch.start("create dir") directory = provider.create_dir(src) StopWatch.stop("create dir") pprint(directory) assert directory is not None def test_delete(self): HEADING() src = '/created_dir' StopWatch.start("delete") provider.delete(src) StopWatch.stop("delete") def test_benchmark(self): Benchmark.print(sysinfo=False, csv=True, tag=service)
[ "cloudmesh.common.util.HEADING", "cloudmesh.common.StopWatch.StopWatch.stop", "cloudmesh.common.util.path_expand", "cloudmesh.common.Benchmark.Benchmark.print", "cloudmesh.common.util.writefile", "cloudmesh.common.StopWatch.StopWatch.start", "pprint.pprint", "cloudmesh.common.Benchmark.Benchmark.debug" ]
[((922, 939), 'cloudmesh.common.Benchmark.Benchmark.debug', 'Benchmark.debug', ([], {}), '()\n', (937, 939), False, 'from cloudmesh.common.Benchmark import Benchmark\n'), ((1431, 1459), 'cloudmesh.common.util.writefile', 'writefile', (['location', 'content'], {}), '(location, content)\n', (1440, 1459), False, 'from cloudmesh.common.util import writefile\n'), ((1578, 1587), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (1585, 1587), False, 'from cloudmesh.common.util import HEADING\n'), ((1596, 1628), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""create source"""'], {}), "('create source')\n", (1611, 1628), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((1654, 1695), 'cloudmesh.common.util.path_expand', 'path_expand', (['"""~/.cloudmesh/storage/test/"""'], {}), "('~/.cloudmesh/storage/test/')\n", (1665, 1695), False, 'from cloudmesh.common.util import path_expand\n'), ((2055, 2086), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""create source"""'], {}), "('create source')\n", (2069, 2086), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((2176, 2185), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (2183, 2185), False, 'from cloudmesh.common.util import HEADING\n'), ((2472, 2494), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""put"""'], {}), "('put')\n", (2487, 2494), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((2546, 2567), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""put"""'], {}), "('put')\n", (2560, 2567), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((2577, 2594), 'pprint.pprint', 'pprint', (['test_file'], {}), '(test_file)\n', (2583, 2594), False, 'from pprint import pprint\n'), ((2676, 2685), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (2683, 2685), False, 'from cloudmesh.common.util import HEADING\n'), ((2983, 3005), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""put"""'], {}), "('put')\n", (2998, 3005), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3063, 3084), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""put"""'], {}), "('put')\n", (3077, 3084), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3094, 3111), 'pprint.pprint', 'pprint', (['test_file'], {}), '(test_file)\n', (3100, 3111), False, 'from pprint import pprint\n'), ((3183, 3192), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (3190, 3192), False, 'from cloudmesh.common.util import HEADING\n'), ((3266, 3288), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""get"""'], {}), "('get')\n", (3281, 3288), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3335, 3356), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""get"""'], {}), "('get')\n", (3349, 3356), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3365, 3377), 'pprint.pprint', 'pprint', (['file'], {}), '(file)\n', (3371, 3377), False, 'from pprint import pprint\n'), ((3445, 3454), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (3452, 3454), False, 'from cloudmesh.common.util import HEADING\n'), ((3481, 3504), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""list"""'], {}), "('list')\n", (3496, 3504), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3551, 3573), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""list"""'], {}), "('list')\n", (3565, 3573), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3700, 3709), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (3707, 3709), False, 'from cloudmesh.common.util import HEADING\n'), ((3754, 3777), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""list"""'], {}), "('list')\n", (3769, 3777), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3835, 3857), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""list"""'], {}), "('list')\n", (3849, 3857), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((3977, 3986), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (3984, 3986), False, 'from cloudmesh.common.util import HEADING\n'), ((4040, 4065), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""search"""'], {}), "('search')\n", (4055, 4065), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4134, 4158), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""search"""'], {}), "('search')\n", (4148, 4158), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4167, 4187), 'pprint.pprint', 'pprint', (['search_files'], {}), '(search_files)\n', (4173, 4187), False, 'from pprint import pprint\n'), ((4324, 4333), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (4331, 4333), False, 'from cloudmesh.common.util import HEADING\n'), ((4370, 4399), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""create dir"""'], {}), "('create dir')\n", (4385, 4399), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4453, 4481), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""create dir"""'], {}), "('create dir')\n", (4467, 4481), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4491, 4508), 'pprint.pprint', 'pprint', (['directory'], {}), '(directory)\n', (4497, 4508), False, 'from pprint import pprint\n'), ((4583, 4592), 'cloudmesh.common.util.HEADING', 'HEADING', ([], {}), '()\n', (4590, 4592), False, 'from cloudmesh.common.util import HEADING\n'), ((4630, 4655), 'cloudmesh.common.StopWatch.StopWatch.start', 'StopWatch.start', (['"""delete"""'], {}), "('delete')\n", (4645, 4655), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4693, 4717), 'cloudmesh.common.StopWatch.StopWatch.stop', 'StopWatch.stop', (['"""delete"""'], {}), "('delete')\n", (4707, 4717), False, 'from cloudmesh.common.StopWatch import StopWatch\n'), ((4757, 4810), 'cloudmesh.common.Benchmark.Benchmark.print', 'Benchmark.print', ([], {'sysinfo': '(False)', 'csv': '(True)', 'tag': 'service'}), '(sysinfo=False, csv=True, tag=service)\n', (4772, 4810), False, 'from cloudmesh.common.Benchmark import Benchmark\n'), ((3613, 3622), 'pprint.pprint', 'pprint', (['c'], {}), '(c)\n', (3619, 3622), False, 'from pprint import pprint\n'), ((3897, 3906), 'pprint.pprint', 'pprint', (['c'], {}), '(c)\n', (3903, 3906), False, 'from pprint import pprint\n'), ((1399, 1420), 'cloudmesh.common.util.path_expand', 'path_expand', (['location'], {}), '(location)\n', (1410, 1420), False, 'from cloudmesh.common.util import path_expand\n')]
import pytest from aiohttp import web from pytest_toolbox import mktree from aiohttp_devtools.exceptions import AiohttpDevConfigError from aiohttp_devtools.runserver.config import Config from .conftest import SIMPLE_APP, get_if_boxed if_boxed = get_if_boxed(pytest) async def test_load_simple_app(tmpworkdir): mktree(tmpworkdir, SIMPLE_APP) Config(app_path='app.py') async def test_create_app_wrong_name(tmpworkdir, loop): mktree(tmpworkdir, SIMPLE_APP) config = Config(app_path='app.py', app_factory_name='missing') with pytest.raises(AiohttpDevConfigError) as excinfo: config.import_app_factory() assert excinfo.value.args[0] == 'Module "app.py" does not define a "missing" attribute/class' @if_boxed async def test_no_loop_coroutine(tmpworkdir): mktree(tmpworkdir, { 'app.py': """\ from aiohttp import web async def hello(request): return web.Response(text='<h1>hello world</h1>', content_type='text/html') async def app_factory(): a = web.Application() a.router.add_get('/', hello) return a """ }) config = Config(app_path='app.py') app = await config.load_app() assert isinstance(app, web.Application) @if_boxed async def test_not_app(tmpworkdir): mktree(tmpworkdir, { 'app.py': """\ def app_factory(): return 123 """ }) config = Config(app_path='app.py') with pytest.raises(AiohttpDevConfigError): await config.load_app()
[ "pytest.raises", "aiohttp_devtools.runserver.config.Config", "pytest_toolbox.mktree" ]
[((319, 349), 'pytest_toolbox.mktree', 'mktree', (['tmpworkdir', 'SIMPLE_APP'], {}), '(tmpworkdir, SIMPLE_APP)\n', (325, 349), False, 'from pytest_toolbox import mktree\n'), ((354, 379), 'aiohttp_devtools.runserver.config.Config', 'Config', ([], {'app_path': '"""app.py"""'}), "(app_path='app.py')\n", (360, 379), False, 'from aiohttp_devtools.runserver.config import Config\n'), ((442, 472), 'pytest_toolbox.mktree', 'mktree', (['tmpworkdir', 'SIMPLE_APP'], {}), '(tmpworkdir, SIMPLE_APP)\n', (448, 472), False, 'from pytest_toolbox import mktree\n'), ((486, 539), 'aiohttp_devtools.runserver.config.Config', 'Config', ([], {'app_path': '"""app.py"""', 'app_factory_name': '"""missing"""'}), "(app_path='app.py', app_factory_name='missing')\n", (492, 539), False, 'from aiohttp_devtools.runserver.config import Config\n'), ((794, 1069), 'pytest_toolbox.mktree', 'mktree', (['tmpworkdir', '{\'app.py\':\n """from aiohttp import web\n\nasync def hello(request):\n return web.Response(text=\'<h1>hello world</h1>\', content_type=\'text/html\')\n\nasync def app_factory():\n a = web.Application()\n a.router.add_get(\'/\', hello)\n return a\n"""\n }'], {}), '(tmpworkdir, {\'app.py\':\n """from aiohttp import web\n\nasync def hello(request):\n return web.Response(text=\'<h1>hello world</h1>\', content_type=\'text/html\')\n\nasync def app_factory():\n a = web.Application()\n a.router.add_get(\'/\', hello)\n return a\n"""\n })\n', (800, 1069), False, 'from pytest_toolbox import mktree\n'), ((1090, 1115), 'aiohttp_devtools.runserver.config.Config', 'Config', ([], {'app_path': '"""app.py"""'}), "(app_path='app.py')\n", (1096, 1115), False, 'from aiohttp_devtools.runserver.config import Config\n'), ((1246, 1318), 'pytest_toolbox.mktree', 'mktree', (['tmpworkdir', '{\'app.py\': """def app_factory():\n return 123\n"""}'], {}), '(tmpworkdir, {\'app.py\': """def app_factory():\n return 123\n"""})\n', (1252, 1318), False, 'from pytest_toolbox import mktree\n'), ((1348, 1373), 'aiohttp_devtools.runserver.config.Config', 'Config', ([], {'app_path': '"""app.py"""'}), "(app_path='app.py')\n", (1354, 1373), False, 'from aiohttp_devtools.runserver.config import Config\n'), ((549, 585), 'pytest.raises', 'pytest.raises', (['AiohttpDevConfigError'], {}), '(AiohttpDevConfigError)\n', (562, 585), False, 'import pytest\n'), ((1383, 1419), 'pytest.raises', 'pytest.raises', (['AiohttpDevConfigError'], {}), '(AiohttpDevConfigError)\n', (1396, 1419), False, 'import pytest\n')]
# simple example for saving to multiedgelists from py3plex.core import multinet multilayer_network = multinet.multi_layer_network().load_network( "../datasets/goslim_mirna.gpickle", directed=False, input_type="gpickle_biomine") # save to string-based representation multilayer_network.save_network("../datasets/mirna_multiedgelist.list", output_type="multiedgelist") # encode each node-layer pair with an int multilayer_network.save_network("../datasets/mirna_edgelist.list", output_type="edgelist") # save to string-based representation multilayer_network.save_network("../datasets/mirna_multiedgelist_encoded.list", output_type="multiedgelist_encoded") # mappings are saved into the main object! # print(multilayer_network.node_map) # print(multilayer_network.layer_map)
[ "py3plex.core.multinet.multi_layer_network" ]
[((102, 132), 'py3plex.core.multinet.multi_layer_network', 'multinet.multi_layer_network', ([], {}), '()\n', (130, 132), False, 'from py3plex.core import multinet\n')]
from tests import BaseTestCase, authenticated_user from redash import redis_connection from redash.models import User, db from redash.utils import dt_from_timestamp from redash.models.users import sync_last_active_at, update_user_active_at, LAST_ACTIVE_KEY class TestUserUpdateGroupAssignments(BaseTestCase): def test_default_group_always_added(self): user = self.factory.create_user() user.update_group_assignments(["g_unknown"]) db.session.refresh(user) self.assertCountEqual([user.org.default_group.id], user.group_ids) def test_update_group_assignments(self): user = self.factory.user new_group = self.factory.create_group(name="g1") user.update_group_assignments(["g1"]) db.session.refresh(user) self.assertCountEqual([user.org.default_group.id, new_group.id], user.group_ids) class TestUserFindByEmail(BaseTestCase): def test_finds_users(self): user = self.factory.create_user(email='<EMAIL>') user2 = self.factory.create_user(email='<EMAIL>', org=self.factory.create_org()) users = User.find_by_email(user.email) self.assertIn(user, users) self.assertIn(user2, users) def test_finds_users_case_insensitive(self): user = self.factory.create_user(email='<EMAIL>') users = User.find_by_email('<EMAIL>') self.assertIn(user, users) class TestUserGetByEmailAndOrg(BaseTestCase): def test_get_user_by_email_and_org(self): user = self.factory.create_user(email='<EMAIL>') found_user = User.get_by_email_and_org(user.email, user.org) self.assertEqual(user, found_user) def test_get_user_by_email_and_org_case_insensitive(self): user = self.factory.create_user(email='<EMAIL>') found_user = User.get_by_email_and_org("<EMAIL>", user.org) self.assertEqual(user, found_user) class TestUserSearch(BaseTestCase): def test_non_unicode_search_string(self): user = self.factory.create_user(name='אריק') assert user in User.search(User.all(user.org), term='א') class TestUserRegenerateApiKey(BaseTestCase): def test_regenerate_api_key(self): user = self.factory.user before_api_key = user.api_key user.regenerate_api_key() # check committed by research user = User.query.get(user.id) self.assertNotEqual(user.api_key, before_api_key) class TestUserDetail(BaseTestCase): # def setUp(self): # super(TestUserDetail, self).setUp() # # redis_connection.flushdb() def test_userdetail_db_default(self): with authenticated_user(self.client) as user: self.assertEqual(user.details, {}) self.assertIsNone(user.active_at) def test_userdetail_db_default_save(self): with authenticated_user(self.client) as user: user.details['test'] = 1 db.session.commit() user_reloaded = User.query.filter_by(id=user.id).first() self.assertEqual(user.details['test'], 1) self.assertEqual( user_reloaded, User.query.filter( User.details['test'].astext.cast(db.Integer) == 1 ).first() ) def test_sync(self): with authenticated_user(self.client) as user: rv = self.client.get('/default/') timestamp = dt_from_timestamp(redis_connection.hget(LAST_ACTIVE_KEY, user.id)) sync_last_active_at() user_reloaded = User.query.filter(User.id==user.id).first() self.assertIn('active_at', user_reloaded.details) self.assertEqual(user_reloaded.active_at, timestamp)
[ "redash.models.User.query.get", "redash.models.User.find_by_email", "redash.models.User.query.filter_by", "tests.authenticated_user", "redash.models.User.get_by_email_and_org", "redash.models.db.session.refresh", "redash.models.db.session.commit", "redash.models.users.sync_last_active_at", "redash.models.User.query.filter", "redash.models.User.all", "redash.redis_connection.hget" ]
[((462, 486), 'redash.models.db.session.refresh', 'db.session.refresh', (['user'], {}), '(user)\n', (480, 486), False, 'from redash.models import User, db\n'), ((754, 778), 'redash.models.db.session.refresh', 'db.session.refresh', (['user'], {}), '(user)\n', (772, 778), False, 'from redash.models import User, db\n'), ((1107, 1137), 'redash.models.User.find_by_email', 'User.find_by_email', (['user.email'], {}), '(user.email)\n', (1125, 1137), False, 'from redash.models import User, db\n'), ((1333, 1362), 'redash.models.User.find_by_email', 'User.find_by_email', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (1351, 1362), False, 'from redash.models import User, db\n'), ((1571, 1618), 'redash.models.User.get_by_email_and_org', 'User.get_by_email_and_org', (['user.email', 'user.org'], {}), '(user.email, user.org)\n', (1596, 1618), False, 'from redash.models import User, db\n'), ((1805, 1851), 'redash.models.User.get_by_email_and_org', 'User.get_by_email_and_org', (['"""<EMAIL>"""', 'user.org'], {}), "('<EMAIL>', user.org)\n", (1830, 1851), False, 'from redash.models import User, db\n'), ((2344, 2367), 'redash.models.User.query.get', 'User.query.get', (['user.id'], {}), '(user.id)\n', (2358, 2367), False, 'from redash.models import User, db\n'), ((2628, 2659), 'tests.authenticated_user', 'authenticated_user', (['self.client'], {}), '(self.client)\n', (2646, 2659), False, 'from tests import BaseTestCase, authenticated_user\n'), ((2823, 2854), 'tests.authenticated_user', 'authenticated_user', (['self.client'], {}), '(self.client)\n', (2841, 2854), False, 'from tests import BaseTestCase, authenticated_user\n'), ((2913, 2932), 'redash.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2930, 2932), False, 'from redash.models import User, db\n'), ((3302, 3333), 'tests.authenticated_user', 'authenticated_user', (['self.client'], {}), '(self.client)\n', (3320, 3333), False, 'from tests import BaseTestCase, authenticated_user\n'), ((3492, 3513), 'redash.models.users.sync_last_active_at', 'sync_last_active_at', ([], {}), '()\n', (3511, 3513), False, 'from redash.models.users import sync_last_active_at, update_user_active_at, LAST_ACTIVE_KEY\n'), ((2068, 2086), 'redash.models.User.all', 'User.all', (['user.org'], {}), '(user.org)\n', (2076, 2086), False, 'from redash.models import User, db\n'), ((3431, 3478), 'redash.redis_connection.hget', 'redis_connection.hget', (['LAST_ACTIVE_KEY', 'user.id'], {}), '(LAST_ACTIVE_KEY, user.id)\n', (3452, 3478), False, 'from redash import redis_connection\n'), ((2962, 2994), 'redash.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'user.id'}), '(id=user.id)\n', (2982, 2994), False, 'from redash.models import User, db\n'), ((3543, 3580), 'redash.models.User.query.filter', 'User.query.filter', (['(User.id == user.id)'], {}), '(User.id == user.id)\n', (3560, 3580), False, 'from redash.models import User, db\n')]
import time import json import boto3 # import pandas as pd from io import StringIO from webdriver_wrapper import WebDriverWrapper from selenium.webdriver.common.keys import Keys from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException, WebDriverException from selenium.webdriver.common.action_chains import ActionChains import requests import re from datetime import datetime from multiprocessing import Process, Pipe from multiprocessing.pool import ThreadPool import threading # import pandas as pd # threadLocal = threading.local() # Global array # big = [] # Global response responses = [] # WebDriver browser = WebDriverWrapper() driver = browser._driver def s3_handler(full_path, data): s3 = boto3.client('s3') bucket = 'freshket-marketprice' # csv_buffer = StringIO() # df.to_csv(csv_buffer) uploadByteStream = json.dumps(data, indent=4, sort_keys=True, default=str) # uploadByteStream = bytes(json.dumps(data).encode('UTF-8'))efault).encode('UTF-8') # response = s3.put_object(Bucket=bucket, Key=fileName, Body=csv_buffer.getvalue()) response = s3.put_object(Bucket=bucket, Key=full_path, Body=uploadByteStream) return response def parsing(category_name): list_product_id = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div[3]/div/div[2]/div[1]/div[2]/div/div/div/div/div/div[1]") list_product_name = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div[3]/div/div[2]/div[1]/div[2]/div/div/div/div/div/a/div") list_product_unit_price = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div[3]/div/div[2]/div[1]/div[2]/div/div/div/div/div/div[3]/div") list_product_image = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div[3]/div/div[2]/div[1]/div[2]/div/div/div/div/a/div/div/div/img") list_product_price = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div[3]/div/div[2]/div[1]/div[2]/div/div/div/div/div/div[2]/div[1]/div[2]") data = [] for raw_product_id, raw_product_name, raw_product_price, raw_product_unit_price, raw_product_image in zip(list_product_id, list_product_name, list_product_price, list_product_unit_price, list_product_image): product_id = raw_product_id.text.split(' ')[-1] product_name = raw_product_name.text product_price = raw_product_price.text.split(' ')[0].strip() product_unit_price = raw_product_unit_price.text.split(' ')[2].strip() product_image = raw_product_image.get_attribute("src") now = datetime.now() collect_date = now.strftime("%Y-%m-%d %H:%M:%S") data.append({"category_name_th": category_name, "product_id": product_id,"product_name": product_name, "product_price": product_price, "unit_price": product_unit_price, "product_image": product_image, "collect_date": collect_date}) # print(product_id, product_name, product_price, product_unit_price) # print(data) # Return data from each page return data def parsing_next(category_name): list_product_id = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[1]/div[2]/div/div/div/div/div/div[1]") list_product_name = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[1]/div[2]/div/div/div/div/div/a/div") list_product_unit_price = getElements(driver, "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[1]/div[2]/div/div/div/div/div/div[3]/div") list_product_image = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[1]/div[2]/div/div/div/div/a/div/div/div/img") list_product_price = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[1]/div[2]") data = [] for raw_product_id, raw_product_name, raw_product_price, raw_product_unit_price, raw_product_image in zip(list_product_id, list_product_name, list_product_price, list_product_unit_price, list_product_image): product_id = raw_product_id.text.split(' ')[-1] product_name = raw_product_name.text product_price = raw_product_price.text.split(' ')[0].strip() product_unit_price = raw_product_unit_price.text.split(' ')[2].strip() product_image = raw_product_image.get_attribute("src") data.append({"category_name_th": category_name, "makroClick_id": product_id,"product_name": product_name, "product_price": product_price, "unit_price": product_unit_price, "product_image": product_image, "collect_date": datetime.now()}) # print(product_id, product_name, product_price, product_unit_price) # print(data) # Return data from each page return data def getElements(driver, XPATH): try: delay = 3 # seconds # Explicit wait with waiting until all elements in XPATH is located elements = WebDriverWait(driver, delay).until(EC.presence_of_all_elements_located((By.XPATH, XPATH))) # print("Page is ready!") # print(elements) return elements except TimeoutException: # print("Loading took too much time!") # Return empty array return [None]*10 def scrollDown(driver): driver.maximize_window() y = 500 for timer in range(0,10): driver.execute_script("window.scrollTo(0, "+str(y)+")") y += 500 time.sleep(1) def getPages(is_firstPage): try: delay = 3 # Get all page links XPATH_PAGE_MAIN = "" if(is_firstPage): XPATH_PAGE_MAIN = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div[3]/div/div[2]/div[2]/div/div" else: XPATH_PAGE_MAIN = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[2]/div/div" pages = WebDriverWait(driver, delay).until(EC.presence_of_all_elements_located((By.XPATH, XPATH_PAGE_MAIN))) return pages except TimeoutException: print("Cannot load all pages") # Return -1 return [None]*10 def getNumberOfLastPage(): # Get elements of pages # page = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div/div[3]/div/div[2]/div/div") # print(page[-1].text) # print(driver.current_url) try: delay = 3 pages = getPages(True) size = len(pages) # print(size) XPATH_PAGE_MAIN = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div[3]/div/div[2]/div[2]/div/div" XPATH_LAST_PAGE = XPATH_PAGE_MAIN + "[{0}]".format(size) # print(XPATH_LAST_PAGE) last_page = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.XPATH, XPATH_LAST_PAGE))) # last_page = driver.find_element_by_xpath(XPATH_LAST_PAGE) # Click on the last icon to go to the last page driver.execute_script("arguments[0].click()", last_page) time.sleep(delay) # Go to the last page by selecting the last elements # print(driver.current_url) res = re.split('[?&]', driver.current_url) # Get the number of the last page (page=XX) numberOfLastPage = int(res[1].split("page=")[-1]) # print(numberOfLastPage) # Get elements of the last pages return numberOfLastPage except TimeoutException: print("Cannot load last page") # Return -1 return -1 def getNumberOfCategories(driver): category_main = driver.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div[1]/div/div[2]/div/div/div[2]/div[2]/div/div/div[2]/div[1]/div") hover = ActionChains(driver).move_to_element(category_main) hover.perform() XPATH = "/html/body/div[1]/div/div/div[1]/div[1]/div/div[2]/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/p" category_elements = driver.find_elements_by_xpath(XPATH) return len(category_elements) def findPossiblePage(is_firstPage): arr = [] pages = getPages(is_firstPage) for page in pages: if(page.text.isdigit()): arr.append(page.text) return arr def getCurrentPage(driver): try: XPATH_PAGE_MAIN = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div/div/div[3]/div/div[2]/div/div[7]" current_page = WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.XPATH, XPATH_PAGE_MAIN))) return current_page except TimeoutException: return def extractData(category_url): try: # Set up driver # driver = get_driver() # Go to the main page for each category driver.get(category_url) # Get the total pages for each category total_page = getNumberOfLastPage() # Go back to the main page driver.back() # print("======Total Page======") # print(total_page) # Get the category name category_name = getCategoryName(category_url) temp = [] # Scroll down # element = driver.find_element_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]") # driver.execute_script("arguments[0].scrollIntoView();", element) # Collect data from the first page data = parsing(category_name) temp.append(data) # print(driver.current_url) for i in range(total_page-1): if(i==0): XPATH_NEXT_PAGE = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div[3]/div/div[2]/div[2]/div/div[6]" else: XPATH_NEXT_PAGE = "/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[2]/div[1]/div/div[3]/div/div[2]/div/div[7]" next_button = WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.XPATH, XPATH_NEXT_PAGE))) # print(seocond_page) driver.execute_script("arguments[0].click()", next_button) time.sleep(1) # print(driver.current_url) data_second = parsing_next(category_name) temp.append(data_second) # print(big) # df = pd.DataFrame(temp) # print(df) # df.to_csv("src/csv/makroClick/makroClick_{0}.csv".format(category_name), index=False) # now = datetime.now() # dt_tring = now.strftime("%Y_%m_%d_%H_%M_S") fileName = "{}.json".format(category_name) # full_path = "csv/makroClick/{}".format(fileName) # Upload data as .csv file into S3 response = s3_handler(fileName, temp) responses.append(response) except Exception as e: print("Extract Error:"+str(e)) # Skip that category when any errors occur return def getCategoryLink(): driver.get("https://www.makroclick.com/th/category/vegetable-and-fruit?menuFlagId=8&flag=true") elements = driver.find_elements_by_xpath("/html/body/div[1]/div/div/div[3]/div/div/div[1]/div[2]/div[1]/div/div[2]/div/a") links = [] for e in elements: url = e.get_attribute("href") links.append(url) return links # def get_driver(): # driver = getattr(threadLocal, 'driver', None) # if driver is None: # browser = WebDriverWrapper() # driver = browser._driver # setattr(threadLocal, 'driver', driver) # return driver def getCategoryName(url): category_name = url.split("https://www.makroclick.com/th/category/")[-1] return category_name def run(): links = getCategoryLink() # responses = [] # print(links) # extractData(links) # for url in links: # response = extractData(driver, url) # responses.append(response) for url in links: extractData(url) def lambda_handler(event, context): run() # print(big) # print(driver.title) # run(driver) # data = {url: page_title} # data = {} # data['url'] = url # data['page_title'] = page_title # print(data) # df = pd.DataFrame(data.items(), columns=['url','page_title']) # print(df) # fileType = '.json' # fileName = "data" + fileType # # Upload data as .csv file into S3 # response = s3_handler(fileName, data) return responses
[ "webdriver_wrapper.WebDriverWrapper", "re.split", "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "boto3.client", "selenium.webdriver.common.action_chains.ActionChains", "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located", "json.dumps", "time.sleep", "datetime.datetime.now", "selenium.webdriver.support.ui.WebDriverWait" ]
[((875, 893), 'webdriver_wrapper.WebDriverWrapper', 'WebDriverWrapper', ([], {}), '()\n', (891, 893), False, 'from webdriver_wrapper import WebDriverWrapper\n'), ((962, 980), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (974, 980), False, 'import boto3\n'), ((1099, 1154), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)', 'sort_keys': '(True)', 'default': 'str'}), '(data, indent=4, sort_keys=True, default=str)\n', (1109, 1154), False, 'import json\n'), ((2925, 2939), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2937, 2939), False, 'from datetime import datetime\n'), ((5909, 5922), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5919, 5922), False, 'import time\n'), ((7502, 7519), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (7512, 7519), False, 'import time\n'), ((7640, 7676), 're.split', 're.split', (['"""[?&]"""', 'driver.current_url'], {}), "('[?&]', driver.current_url)\n", (7648, 7676), False, 'import re\n'), ((5454, 5508), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'EC.presence_of_all_elements_located', (['(By.XPATH, XPATH)'], {}), '((By.XPATH, XPATH))\n', (5489, 5508), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6393, 6457), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'EC.presence_of_all_elements_located', (['(By.XPATH, XPATH_PAGE_MAIN)'], {}), '((By.XPATH, XPATH_PAGE_MAIN))\n', (6428, 6457), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((7239, 7294), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, XPATH_LAST_PAGE)'], {}), '((By.XPATH, XPATH_LAST_PAGE))\n', (7265, 7294), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((8201, 8221), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (8213, 8221), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((8896, 8951), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, XPATH_PAGE_MAIN)'], {}), '((By.XPATH, XPATH_PAGE_MAIN))\n', (8922, 8951), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((10489, 10502), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10499, 10502), False, 'import time\n'), ((5092, 5106), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5104, 5106), False, 'from datetime import datetime\n'), ((5419, 5447), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'delay'], {}), '(driver, delay)\n', (5432, 5447), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((6358, 6386), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'delay'], {}), '(driver, delay)\n', (6371, 6386), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((7204, 7232), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'delay'], {}), '(driver, delay)\n', (7217, 7232), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((8865, 8889), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(1)'], {}), '(driver, 1)\n', (8878, 8889), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((10315, 10370), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, XPATH_NEXT_PAGE)'], {}), '((By.XPATH, XPATH_NEXT_PAGE))\n', (10341, 10370), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((10284, 10308), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(3)'], {}), '(driver, 3)\n', (10297, 10308), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')]
''' 堆排序 ''' # 1、描述 ''' 堆排序(Heapsort)是指利用堆积树(堆)这种数据结构所设计的一种排序算法,它是选择排序的一种。 可以利用数组的特点快速定位指定索引的元素。堆分为大根堆和小根堆,是近似完全二叉树的结构。 大根堆:每个结点的值都大于或等于左右子结点,在堆排序算法中用于升序排列。 小根堆:每个结点的值都小于或等于左右子结点,在堆排序算法中用于降序排列。 大根堆的要求是每个节点的值都不大于其父节点的值,即A[PARENT[i]] >= A[i]。 在数组的非降序排序中,需要使用的就是大根堆,因为根据大根堆的要求可知,最大的值一定在堆顶。 堆排序的平均时间复杂度为 Ο(nlogn)。 ''' # 2、算法步骤 ''' (1)创建一个堆 H[0……n-1]; (2)把堆首(最大值)和堆尾互换; (3)把堆的尺寸缩小 1,并调用 shift_down(0),目的是把新的数组顶端数据调整到相应位置; (4)重复步骤 2,直到堆的尺寸为 1。 ''' # 3、代码实现 # 【方法一】 # 原理:(1)根据待排序列构造一个大根堆; # (2)取出这个大根堆的堆顶节点(最大值),与堆的最右下元素进行交换,然后把去掉最大值的剩余元素再构造成一个大根堆(重复第一步); # (3)重复第二步,知道这个大根堆的长度为1,此时即为完成排序。 # 利用链表结构 deque 将待排数组初始化成一个无序序列,再追加一个辅助位 "0" def arr_deque(arr): from collections import deque L = deque(arr) L.appendleft(0) return L # 堆排序 def heap_sort(arr): arr_length = len(arr) L = arr_deque(arr) first_sort_count = arr_length // 2 for i in range(first_sort_count): heap_adjust(L, first_sort_count - i, arr_length) for i in range(arr_length - 1): swap_parameter(L, 1, arr_length - i) heap_adjust(L, 1, arr_length - i - 1) return [L[i] for i in range(1, len(L))] # 交换元素 def swap_parameter(arr, i, j): arr[i], arr[j] = arr[j], arr[i] # 堆调整 def heap_adjust(arr, start, end): temp = arr[start] i = start j = 2 * i while j <= end: if j < end and arr[j] < arr[j + 1]: j += 1 if temp < arr[j]: arr[i] = arr[j] i = j j = 2 * i else: break arr[i] = temp # 【方法二】 ''' # 创建大根堆 def build_max_Heap(arr): import math for i in range(math.floor(len(arr)/2), -1, -1): heapify(arr, i) # 调整堆 def heapify(arr, i): left = 2 * i + 1 right = 2 * i + 2 largest = i if left < arrLen and arr[left] > arr[largest]: largest = left if right < arrLen and arr[right] > arr[largest]: largest = right if largest != i: swap_parameter(arr, i, largest) heapify(arr, largest) # 交换元素 def swap_parameter(arr, i, j): arr[i], arr[j] = arr[j], arr[i] # 堆排序 def heap_sort(arr): global arrLen arrLen = len(arr) build_max_Heap(arr) for i in range(len(arr)-1, 0, -1): swap_parameter(arr, 0, i) arrLen -=1 heapify(arr, 0) return arr ''' # 【方法三】 Ps:有错误,暂未修改 ''' # 调整堆 def adjust_heap(arr, i, count): lChild = 2 * i + 1 rChild = 2 * i + 2 max = i gap = count / 2 if i < gap: if lChild < count and arr[lChild] > arr[max]: max = lChild if rChild < count and arr[rChild] > arr[max]: max = rChild if max != i: arr[max], arr[i] = arr[i], arr[max] adjust_heap(arr, max, count) # 创建堆 def build_heap(arr, count): gap = count / 2 # while gap > 0: for i in range(0, gap)[::-1]: # [::-1]表示读取从后向前(相反)的元素;[-1]表示读取最后一个元素;[:-1]表示读取除最后一个的全部元素;[2::-1]表示获取从下标为2的元素开始翻转读取。 adjust_heap(arr, i, count) # 堆排序 def heap_sort(arr): count = len(arr) build_heap(arr, count) for i in range(0, count)[::-1]: arr[0], arr[i] = arr[i], arr[0] adjust_heap(arr, 0, i) ''' # 示例 arr = [10, 23, 6, 8, 2, 16, 0, 9] print(heap_sort(arr)) # 输出结果为 [0, 2, 6, 8, 9, 10, 16, 23]
[ "collections.deque" ]
[((711, 721), 'collections.deque', 'deque', (['arr'], {}), '(arr)\n', (716, 721), False, 'from collections import deque\n')]
from datamodel import DB if __name__ == "__main__": DB.bind(provider="sqlite", filename="db_model.sqlite", create_db=True) DB.generate_mapping(create_tables=True)
[ "datamodel.DB.generate_mapping", "datamodel.DB.bind" ]
[((57, 127), 'datamodel.DB.bind', 'DB.bind', ([], {'provider': '"""sqlite"""', 'filename': '"""db_model.sqlite"""', 'create_db': '(True)'}), "(provider='sqlite', filename='db_model.sqlite', create_db=True)\n", (64, 127), False, 'from datamodel import DB\n'), ((132, 171), 'datamodel.DB.generate_mapping', 'DB.generate_mapping', ([], {'create_tables': '(True)'}), '(create_tables=True)\n', (151, 171), False, 'from datamodel import DB\n')]
# Generated by Django 2.0.2 on 2018-03-30 14:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('student', '0012_auto_20180330_0607'), ] operations = [ migrations.CreateModel( name='Time', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('firstname', models.CharField(default='Null', max_length=20)), ], ), migrations.AlterField( model_name='teacherreg', name='subject', field=models.CharField(default='NULL', max_length=20), ), ]
[ "django.db.models.CharField", "django.db.models.AutoField" ]
[((640, 687), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""NULL"""', 'max_length': '(20)'}), "(default='NULL', max_length=20)\n", (656, 687), False, 'from django.db import migrations, models\n'), ((328, 421), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (344, 421), False, 'from django.db import migrations, models\n'), ((450, 497), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Null"""', 'max_length': '(20)'}), "(default='Null', max_length=20)\n", (466, 497), False, 'from django.db import migrations, models\n')]
import importlib import os import pkgutil from setuptools import find_packages from django_spring.utils.logger import get_logger log = get_logger("[PRELOAD]") ROOT_DIR = os.getcwd() def preload_views(): """ File watchers only watch files that have been loaded Calling `preload_all_modules` loads all the modules so that changes are detected properly """ log("Starting `preload_views`") modules = set() for pkg in find_packages(ROOT_DIR): pkgpath = ROOT_DIR + "/" + pkg.replace(".", "/") for info in pkgutil.iter_modules([pkgpath]): if info.ispkg: continue if info.name != "views": continue modules.add(pkg + "." + info.name) for module in modules: try: importlib.import_module(module) except Exception as e: # pylint: disable=broad-except log("{} failed to load: {}".format(module, e)) log("Done `preload_views`")
[ "importlib.import_module", "os.getcwd", "pkgutil.iter_modules", "django_spring.utils.logger.get_logger", "setuptools.find_packages" ]
[((138, 161), 'django_spring.utils.logger.get_logger', 'get_logger', (['"""[PRELOAD]"""'], {}), "('[PRELOAD]')\n", (148, 161), False, 'from django_spring.utils.logger import get_logger\n'), ((173, 184), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (182, 184), False, 'import os\n'), ((451, 474), 'setuptools.find_packages', 'find_packages', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (464, 474), False, 'from setuptools import find_packages\n'), ((553, 584), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['[pkgpath]'], {}), '([pkgpath])\n', (573, 584), False, 'import pkgutil\n'), ((799, 830), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (822, 830), False, 'import importlib\n')]
"""Handles all messages coming from trading channel""" # pylint: disable=no-member import json import logging from bpprosdk.websockets.account.account_state import AccountState from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered LOG = logging.getLogger(__name__) LOG.addHandler(logging.StreamHandler()) class TradingMessageHandler: """Handles trading channel messages""" def __init__(self, state: AccountState = None): self.state = state def handle_message(self, json_message: json): """Updates the state depending on the received message""" message = json.dumps(json_message) if json_message["type"] == "BOOKED": booked = Booked.from_json(message) self.state.update_open_orders(booked) elif json_message["type"] == "FILL": fill = Fill.from_json(message) self.state.fill_open_order(fill) elif json_message["type"] == "DONE": done = Done.from_json(message) if done.status == "CANCELLED" or done.status == "FILLED_FULLY": self.state.order_done(done) elif json_message["type"] == "TRACKED": tracked = Tracked.from_json(message) LOG.debug("Stop Order tracked: %s", tracked) self.state.update_open_orders_on_stop_order(tracked) elif json_message["type"] == "TRIGGERED": triggered = Triggered.from_json(message) LOG.debug("Stop Order triggered %s", triggered) else: LOG.warning("Unhandled event from TRADING channel %s", message)
[ "bpprosdk.websockets.trading.trading_channel_events.Fill.from_json", "bpprosdk.websockets.trading.trading_channel_events.Done.from_json", "bpprosdk.websockets.trading.trading_channel_events.Booked.from_json", "logging.StreamHandler", "json.dumps", "bpprosdk.websockets.trading.trading_channel_events.Triggered.from_json", "bpprosdk.websockets.trading.trading_channel_events.Tracked.from_json", "logging.getLogger" ]
[((287, 314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (304, 314), False, 'import logging\n'), ((330, 353), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (351, 353), False, 'import logging\n'), ((644, 668), 'json.dumps', 'json.dumps', (['json_message'], {}), '(json_message)\n', (654, 668), False, 'import json\n'), ((735, 760), 'bpprosdk.websockets.trading.trading_channel_events.Booked.from_json', 'Booked.from_json', (['message'], {}), '(message)\n', (751, 760), False, 'from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered\n'), ((875, 898), 'bpprosdk.websockets.trading.trading_channel_events.Fill.from_json', 'Fill.from_json', (['message'], {}), '(message)\n', (889, 898), False, 'from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered\n'), ((1008, 1031), 'bpprosdk.websockets.trading.trading_channel_events.Done.from_json', 'Done.from_json', (['message'], {}), '(message)\n', (1022, 1031), False, 'from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered\n'), ((1222, 1248), 'bpprosdk.websockets.trading.trading_channel_events.Tracked.from_json', 'Tracked.from_json', (['message'], {}), '(message)\n', (1239, 1248), False, 'from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered\n'), ((1445, 1473), 'bpprosdk.websockets.trading.trading_channel_events.Triggered.from_json', 'Triggered.from_json', (['message'], {}), '(message)\n', (1464, 1473), False, 'from bpprosdk.websockets.trading.trading_channel_events import Fill, Booked, Done, Tracked, Triggered\n')]
# Raspberry Pi Physical Dashboard LED Backpack Widget Tests # Author: <NAME> # # The MIT License (MIT) # # Copyright (c) 2016 Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import time import unittest import led_backpacks class TestSevenSegmentWidget(unittest.TestCase): def tearDown(self): time.sleep(2.0) # 2 second delay between tests to see results on hardware. def test_colon_on(self): widget = led_backpacks.SevenSegmentWidget(address='0x76') widget.set_value('colon_on') def test_colon_off(self): widget = led_backpacks.SevenSegmentWidget(address='0x76') widget.set_value('colon_off') def test_number(self): widget = led_backpacks.SevenSegmentWidget(address='0x76') widget.set_value('-10.25') def test_decimal_digits(self): widget = led_backpacks.SevenSegmentWidget(address='0x76', decimal_digits='1') widget.set_value('-10.25') def test_justify_left(self): widget = led_backpacks.SevenSegmentWidget(address='0x76', justify_right='False') widget.set_value('-10.25') def test_invert(self): widget = led_backpacks.SevenSegmentWidget(address='0x76', invert='True') widget.set_value('-10.25') def test_brightness(self): widget = led_backpacks.SevenSegmentWidget(address='0x76', brightness='1') widget.set_value('-10.25') def test_invalid_value_fails(self): widget = led_backpacks.SevenSegmentWidget(address='0x76') with self.assertRaises(ValueError): widget.set_value('foo') class TestAlphaNum4Widget(unittest.TestCase): def tearDown(self): time.sleep(2.0) # 2 second delay between tests to see results on hardware. def test_number(self): widget = led_backpacks.AlphaNum4Widget(address='0x73') widget.set_value('-10.25') def test_small_string(self): widget = led_backpacks.AlphaNum4Widget(address='0x73') widget.set_value('foo') def test_long_string(self): widget = led_backpacks.AlphaNum4Widget(address='0x73') widget.set_value('foobar') def test_justify_left(self): widget = led_backpacks.AlphaNum4Widget(address='0x73', justify_right='False') widget.set_value('foo') def test_brightness(self): widget = led_backpacks.AlphaNum4Widget(address='0x73', brightness='1') widget.set_value('-10.25') class TestBicolorBargraph24Widget(unittest.TestCase): def tearDown(self): time.sleep(2.0) # 2 second delay between tests to see results on hardware. def test_0(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('0.0') def test_25(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('25.0') def test_50(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('50.0') def test_100(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('100.0') def test_brightness(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72', brightness='1') widget.set_value('100.0') def test_yellow(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('250.0') def test_red(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') widget.set_value('450.0') def test_justify_left(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72', justify_right='False') widget.set_value('50.0') def test_invalid_value(self): widget = led_backpacks.BicolorBargraph24Widget(address='0x72') with self.assertRaises(ValueError): widget.set_value('foo')
[ "led_backpacks.BicolorBargraph24Widget", "led_backpacks.SevenSegmentWidget", "led_backpacks.AlphaNum4Widget", "time.sleep" ]
[((1336, 1351), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1346, 1351), False, 'import time\n'), ((1459, 1507), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""'}), "(address='0x76')\n", (1491, 1507), False, 'import led_backpacks\n'), ((1593, 1641), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""'}), "(address='0x76')\n", (1625, 1641), False, 'import led_backpacks\n'), ((1725, 1773), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""'}), "(address='0x76')\n", (1757, 1773), False, 'import led_backpacks\n'), ((1862, 1930), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""', 'decimal_digits': '"""1"""'}), "(address='0x76', decimal_digits='1')\n", (1894, 1930), False, 'import led_backpacks\n'), ((2017, 2088), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""', 'justify_right': '"""False"""'}), "(address='0x76', justify_right='False')\n", (2049, 2088), False, 'import led_backpacks\n'), ((2169, 2232), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""', 'invert': '"""True"""'}), "(address='0x76', invert='True')\n", (2201, 2232), False, 'import led_backpacks\n'), ((2317, 2381), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""', 'brightness': '"""1"""'}), "(address='0x76', brightness='1')\n", (2349, 2381), False, 'import led_backpacks\n'), ((2475, 2523), 'led_backpacks.SevenSegmentWidget', 'led_backpacks.SevenSegmentWidget', ([], {'address': '"""0x76"""'}), "(address='0x76')\n", (2507, 2523), False, 'import led_backpacks\n'), ((2683, 2698), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2693, 2698), False, 'import time\n'), ((2804, 2849), 'led_backpacks.AlphaNum4Widget', 'led_backpacks.AlphaNum4Widget', ([], {'address': '"""0x73"""'}), "(address='0x73')\n", (2833, 2849), False, 'import led_backpacks\n'), ((2936, 2981), 'led_backpacks.AlphaNum4Widget', 'led_backpacks.AlphaNum4Widget', ([], {'address': '"""0x73"""'}), "(address='0x73')\n", (2965, 2981), False, 'import led_backpacks\n'), ((3064, 3109), 'led_backpacks.AlphaNum4Widget', 'led_backpacks.AlphaNum4Widget', ([], {'address': '"""0x73"""'}), "(address='0x73')\n", (3093, 3109), False, 'import led_backpacks\n'), ((3196, 3264), 'led_backpacks.AlphaNum4Widget', 'led_backpacks.AlphaNum4Widget', ([], {'address': '"""0x73"""', 'justify_right': '"""False"""'}), "(address='0x73', justify_right='False')\n", (3225, 3264), False, 'import led_backpacks\n'), ((3346, 3407), 'led_backpacks.AlphaNum4Widget', 'led_backpacks.AlphaNum4Widget', ([], {'address': '"""0x73"""', 'brightness': '"""1"""'}), "(address='0x73', brightness='1')\n", (3375, 3407), False, 'import led_backpacks\n'), ((3530, 3545), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (3540, 3545), False, 'import time\n'), ((3646, 3699), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (3683, 3699), False, 'import led_backpacks\n'), ((3773, 3826), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (3810, 3826), False, 'import led_backpacks\n'), ((3901, 3954), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (3938, 3954), False, 'import led_backpacks\n'), ((4030, 4083), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (4067, 4083), False, 'import led_backpacks\n'), ((4167, 4236), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""', 'brightness': '"""1"""'}), "(address='0x72', brightness='1')\n", (4204, 4236), False, 'import led_backpacks\n'), ((4316, 4369), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (4353, 4369), False, 'import led_backpacks\n'), ((4446, 4499), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (4483, 4499), False, 'import led_backpacks\n'), ((4585, 4661), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""', 'justify_right': '"""False"""'}), "(address='0x72', justify_right='False')\n", (4622, 4661), False, 'import led_backpacks\n'), ((4747, 4800), 'led_backpacks.BicolorBargraph24Widget', 'led_backpacks.BicolorBargraph24Widget', ([], {'address': '"""0x72"""'}), "(address='0x72')\n", (4784, 4800), False, 'import led_backpacks\n')]
#!/usr/bin/env python3 from lelantos import tomographic_objects import argparse import numpy as np parser = argparse.ArgumentParser(description='Cut the QSO catalog') parser.add_argument('-i', '--input', help='Input QSO catalog',required=True) parser.add_argument('-o', '--output', help='Output QSO catalog',required=True) parser.add_argument('-zname', '--redshift-name', help='Pixel file name', default="Z",required=False) parser.add_argument('-ramin', help='min RA coord to cut', default=-np.inf,required=False) parser.add_argument('-ramax', help='max RA coord to cut', default=np.inf,required=False) parser.add_argument('-decmin', help='min RA coord to cut', default=-np.inf,required=False) parser.add_argument('-decmax', help='max DEC coord to cut', default=np.inf,required=False) parser.add_argument('-zmin', help='min z coord to cut', default=-np.inf,required=False) parser.add_argument('-zmax', help='max z coord to cut', default=np.inf,required=False) args = vars(parser.parse_args()) cat = tomographic_objects.QSOCatalog.init_from_fits(args["input"],redshift_name=args["redshift_name"]) print("Number of quasars before cut", cat.coord.shape[0]) mask = cat.cut_catalog(coord_min=(float(args["ramin"]),float(args["decmin"]),float(args["zmin"])), coord_max=(float(args["ramax"]),float(args["decmax"]),float(args["zmax"])), center_x_coord=True) cat.apply_mask(mask) print("Number of quasars after cut", cat.coord.shape[0]) cat.name = args["output"] cat.write(convert=False)
[ "lelantos.tomographic_objects.QSOCatalog.init_from_fits", "argparse.ArgumentParser" ]
[((109, 167), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Cut the QSO catalog"""'}), "(description='Cut the QSO catalog')\n", (132, 167), False, 'import argparse\n'), ((1065, 1167), 'lelantos.tomographic_objects.QSOCatalog.init_from_fits', 'tomographic_objects.QSOCatalog.init_from_fits', (["args['input']"], {'redshift_name': "args['redshift_name']"}), "(args['input'], redshift_name=\n args['redshift_name'])\n", (1110, 1167), False, 'from lelantos import tomographic_objects\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.SimpleMockModel import SimpleMockModel class ComplextMockModel(object): def __init__(self): self._biz_model = None self._biz_num = None self._biz_type = None @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): if isinstance(value, SimpleMockModel): self._biz_model = value else: self._biz_model = SimpleMockModel.from_alipay_dict(value) @property def biz_num(self): return self._biz_num @biz_num.setter def biz_num(self, value): self._biz_num = value @property def biz_type(self): return self._biz_type @biz_type.setter def biz_type(self, value): self._biz_type = value def to_alipay_dict(self): params = dict() if self.biz_model: if hasattr(self.biz_model, 'to_alipay_dict'): params['biz_model'] = self.biz_model.to_alipay_dict() else: params['biz_model'] = self.biz_model if self.biz_num: if hasattr(self.biz_num, 'to_alipay_dict'): params['biz_num'] = self.biz_num.to_alipay_dict() else: params['biz_num'] = self.biz_num if self.biz_type: if hasattr(self.biz_type, 'to_alipay_dict'): params['biz_type'] = self.biz_type.to_alipay_dict() else: params['biz_type'] = self.biz_type return params @staticmethod def from_alipay_dict(d): if not d: return None o = ComplextMockModel() if 'biz_model' in d: o.biz_model = d['biz_model'] if 'biz_num' in d: o.biz_num = d['biz_num'] if 'biz_type' in d: o.biz_type = d['biz_type'] return o
[ "alipay.aop.api.domain.SimpleMockModel.SimpleMockModel.from_alipay_dict" ]
[((581, 620), 'alipay.aop.api.domain.SimpleMockModel.SimpleMockModel.from_alipay_dict', 'SimpleMockModel.from_alipay_dict', (['value'], {}), '(value)\n', (613, 620), False, 'from alipay.aop.api.domain.SimpleMockModel import SimpleMockModel\n')]
""" Sync method tests. """ import pytest from aiosmtplib.sync import async_to_sync def test_sendmail_sync( event_loop, smtp_client_threaded, sender_str, recipient_str, message_str ): errors, response = smtp_client_threaded.sendmail_sync( sender_str, [recipient_str], message_str ) assert not errors assert isinstance(errors, dict) assert response != "" def test_sendmail_sync_when_connected( event_loop, smtp_client_threaded, sender_str, recipient_str, message_str ): event_loop.run_until_complete(smtp_client_threaded.connect()) errors, response = smtp_client_threaded.sendmail_sync( sender_str, [recipient_str], message_str ) assert not errors assert isinstance(errors, dict) assert response != "" def test_send_message_sync(event_loop, smtp_client_threaded, message): errors, response = smtp_client_threaded.send_message_sync(message) assert not errors assert isinstance(errors, dict) assert response != "" def test_send_message_sync_when_connected(event_loop, smtp_client_threaded, message): event_loop.run_until_complete(smtp_client_threaded.connect()) errors, response = smtp_client_threaded.send_message_sync(message) assert not errors assert isinstance(errors, dict) assert response != "" def test_async_to_sync_without_loop(event_loop): async def test_func(): return 7 result = async_to_sync(test_func()) assert result == 7 def test_async_to_sync_with_exception(event_loop): async def test_func(): raise ZeroDivisionError with pytest.raises(ZeroDivisionError): async_to_sync(test_func(), loop=event_loop) @pytest.mark.asyncio async def test_async_to_sync_with_running_loop(event_loop): with pytest.raises(RuntimeError): async_to_sync(None)
[ "pytest.raises", "aiosmtplib.sync.async_to_sync" ]
[((1598, 1630), 'pytest.raises', 'pytest.raises', (['ZeroDivisionError'], {}), '(ZeroDivisionError)\n', (1611, 1630), False, 'import pytest\n'), ((1776, 1803), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1789, 1803), False, 'import pytest\n'), ((1813, 1832), 'aiosmtplib.sync.async_to_sync', 'async_to_sync', (['None'], {}), '(None)\n', (1826, 1832), False, 'from aiosmtplib.sync import async_to_sync\n')]
import biostats as bs import pandas as pd # --------------------------------------------------------------- #Basic # Numeral data = pd.read_csv("biostats/dataset/numeral.csv") r1, r2, r3, r4 = bs.numeral(data, ["Fish", "Crab", "Temperature"]) print(r1) print(r2) print(r3) print(r4)
[ "pandas.read_csv", "biostats.numeral" ]
[((134, 177), 'pandas.read_csv', 'pd.read_csv', (['"""biostats/dataset/numeral.csv"""'], {}), "('biostats/dataset/numeral.csv')\n", (145, 177), True, 'import pandas as pd\n'), ((195, 244), 'biostats.numeral', 'bs.numeral', (['data', "['Fish', 'Crab', 'Temperature']"], {}), "(data, ['Fish', 'Crab', 'Temperature'])\n", (205, 244), True, 'import biostats as bs\n')]
from genericpath import exists import os PRODUCTS = [] # product class class Product: def __init__(self,id,name,quantity,price): self.id = id self.name = name self.quantity = quantity self.price =price def set_name(self,name): self.name = name def get_name(self): return self.name def set_quantity(self,quantity): self.quantity = quantity def get_quantity(self): return self.quantity def set_price(self,price): self.price = price def get_price(self): return self.price # magic method repr for representing objects def __repr__(self): return f"('{self.id}','{self.name}','{self.quantity}','{self.price}')" # required functions # func to list all customers def list_products(): products = [] products_list = [] with open('product.txt','r') as reader: for line in reader.readlines(): products.append(line) # remove \n for cust in products: lists = cust.replace('\n','') products_list.append(lists) print(products_list) # function to add a product # fields(product_id(unique),product_name,amount,price) def add_product(): #open the file in append mode (add to file, we don't wish to overwrite fo = open('product.txt','a+',newline='') product_id = input("Enter Product id : ") # check for unique id with open("product.txt",'r') as fo_r: for line in fo_r.readlines(): if product_id in line: print() print("Product id already exists!!Please enter a unique id!!") print() return add_product() product_name = input("Enter Product name: ") amount = int(input("Enter product amount: ")) assert amount >=0, f"{amount} is not greater or equal to zero!! " price = float(input("Enter the product price: ")) assert price >= 0, f"Price {price} is not greater or equal to zero!! " fo.write(product_id + ',' + product_name + ',' + str(amount) + ',' + str(price) + "\n") fo.close() if(fo): print("Product added successfully!!!") print() output = {"id": product_id, "name": product_name, "amount": amount, "price ": price} return output # file handling def handle_file(): with open('product.txt','a+',newline='') as fo: for p in PRODUCTS: print(p.id+','+p.name+','+str(p.quantity)+','+str(p.price),file=fo) break # function to edit customer with customer id def edit_product(): file = open('product.txt','r') temp = open('temp.txt','w') id = input("Enter Product id to change: ") s = ' ' while(s): s = file.readline() L = s.split(',') if len(s)>0: if L[0] == id: name = input("Enter Product name: ") amount = input("Enter product amount : ") assert amount >=0, f"{amount} is not greater or equal to zero!! " price = input("Enter the product price : ") assert price >= 0, f"Price {price} is not greater or equal to zero!! " temp.write(str(id) + ',' + name + ',' + amount + ',' + price + "\n") else: temp.write(s) temp.close() file.close() os.remove('product.txt') os.rename('temp.txt','product.txt') print("Product updated successfuly!!") list_products() # function to delete a customer with customer id def delete_product(): product = open("product.txt",'r') temp = open("temp.txt",'w') id = input("Enter product id to delete: ") s = ' ' while(s): s = product.readline() L = s.split(',') if len(s)>0: if L[0] != id: temp.write(s) product.close() temp.close() os.remove('product.txt') os.rename('temp.txt','product.txt') print(" Product deleted successfuly!! ") list_products() # function to search for customer with id and name def search_product(): product = open('product.txt','r') id = input("Enter product id to search: ") print() s = ' ' while(s): s = product.readline() L = s.split(",") if len(s)>0: if L[0] == id: print("Product details") print("------------------------------") print("Product id: ",L[0]) print("Product Name: ",L[1]) print("Product amount: ",L[2]) print("Product price: ",L[3]) # break # else: # print("Product not available!! ") # break # load products def load_products(): product = open("product.txt","r") for p in product: try: prod = p.split(',') id = prod[0] name = prod[1] quantity = int(prod[2]) price = prod[3] output = Product(id,name,quantity,price) PRODUCTS.append(output) except: print() # product menu def display_product_menu(): # creating options while True: print(""" Product menu 1. List all Products 2. Add Product 3. Edit Product 4. Delete Product 5. Search for a product 6. Quit """) choice2 = int(input("Select Product option:")) # choice 1 if choice2 == 1: print() list_products() elif choice2 == 2: print() add_product() elif choice2 == 3: print() edit_product() elif choice2 == 4: print() delete_product() elif choice2 == 5: print() search_product() elif choice2 == 6: print() break else: print() print('Oops! Incorrect choice. Please try again! ') if __name__ == "__main__": display_product_menu()
[ "os.rename", "os.remove" ]
[((3356, 3380), 'os.remove', 'os.remove', (['"""product.txt"""'], {}), "('product.txt')\n", (3365, 3380), False, 'import os\n'), ((3385, 3421), 'os.rename', 'os.rename', (['"""temp.txt"""', '"""product.txt"""'], {}), "('temp.txt', 'product.txt')\n", (3394, 3421), False, 'import os\n'), ((3877, 3901), 'os.remove', 'os.remove', (['"""product.txt"""'], {}), "('product.txt')\n", (3886, 3901), False, 'import os\n'), ((3906, 3942), 'os.rename', 'os.rename', (['"""temp.txt"""', '"""product.txt"""'], {}), "('temp.txt', 'product.txt')\n", (3915, 3942), False, 'import os\n')]
from unittest.mock import patch from .conftest import file_contents from civic_scraper.base.cache import Cache def test_default_cache_dir(monkeypatch): target = "civic_scraper.utils.expanduser" with patch(target) as mock_method: mock_method.return_value = "/Users/you" cache = Cache() assert cache.path == "/Users/you/.civic-scraper" def test_custom_cache_path(tmpdir): from civic_scraper.base.cache import Cache cache = Cache(tmpdir) assert tmpdir == cache.path def test_write(tmpdir): from civic_scraper.base.cache import Cache cache = Cache(tmpdir) content = "<h1>some content</h1>" file_path = "html/search_results_page.html" outfile = cache.write(file_path, content) scrape_dir = tmpdir.join("html") files = [f.basename for f in scrape_dir.listdir()] assert "search_results_page.html" in files actual_contents = file_contents(outfile) assert actual_contents == content
[ "unittest.mock.patch", "civic_scraper.base.cache.Cache" ]
[((468, 481), 'civic_scraper.base.cache.Cache', 'Cache', (['tmpdir'], {}), '(tmpdir)\n', (473, 481), False, 'from civic_scraper.base.cache import Cache\n'), ((600, 613), 'civic_scraper.base.cache.Cache', 'Cache', (['tmpdir'], {}), '(tmpdir)\n', (605, 613), False, 'from civic_scraper.base.cache import Cache\n'), ((211, 224), 'unittest.mock.patch', 'patch', (['target'], {}), '(target)\n', (216, 224), False, 'from unittest.mock import patch\n'), ((305, 312), 'civic_scraper.base.cache.Cache', 'Cache', ([], {}), '()\n', (310, 312), False, 'from civic_scraper.base.cache import Cache\n')]
#!/usr/bin/python3 import os import sqlite3 def get_connection(): connection = sqlite3.connect('db.sqlite') return connection def read_albums(filepath): albums = [] with open(filepath, "r") as f: for line in f: artist, title, year = line.strip().split("|") albums.append( { "artiste": artist, "titre": title, "annee": year, } ) return albums def get_artist_by_name(connection, artist_name): cursor = connection.cursor() cursor.execute( "select " "id, nom, est_solo, nombre_individus " "from artiste " "where nom = ?", ( artist_name, ), ) row = cursor.fetchone() artist = None if row: artist = { "id": row[0], "nom": row[1], "est_solo": row[2], "nombre_individus": row[3], } return artist def create_artist(connection, nom, est_solo, nombre_individus): cursor = connection.cursor() cursor.execute( "insert into artiste " "(nom, est_solo, nombre_individus) " "VALUES " "(?, ?, ?);", ( nom, str(est_solo).upper(), str(nombre_individus), ), ) connection.commit() return get_artist_by_name(connection, nom) def get_or_create_artist(connection, artist_name): existing_artist = get_artist_by_name(connection, artist_name) if existing_artist: return existing_artist created_artist = create_artist( connection, artist_name, True, 1, ) return created_artist def create_album(connection, album): print( "Creating album {titre}...".format( titre=album["titre"], ) ) artist = get_or_create_artist(connection, album["artiste"]) cursor = connection.cursor() cursor.execute( "insert into album " "(titre, annee, artiste_id, maison_disque_id) " "VALUES " "(?, ?, ?, ?);", ( album["titre"], str(album["annee"]), str(artist["id"]), "1", ), ) connection.commit() def main(): current_directory = os.path.dirname(os.path.realpath(__file__)) input_filepath = os.path.join(current_directory, "input.txt") albums = read_albums(input_filepath) connection = get_connection() for album in albums: create_album(connection, album) if __name__ == "__main__": main()
[ "os.path.realpath", "sqlite3.connect", "os.path.join" ]
[((86, 114), 'sqlite3.connect', 'sqlite3.connect', (['"""db.sqlite"""'], {}), "('db.sqlite')\n", (101, 114), False, 'import sqlite3\n'), ((2398, 2442), 'os.path.join', 'os.path.join', (['current_directory', '"""input.txt"""'], {}), "(current_directory, 'input.txt')\n", (2410, 2442), False, 'import os\n'), ((2349, 2375), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2365, 2375), False, 'import os\n')]
# Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file long_description = "see https://github.com/JoinVerse/vid for more info." setup( name='vid', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version='1.0.0', description='Python Vid Implementation', long_description=long_description, # The project's main homepage. url='https://github.com/JoinVerse/vid', # Author details author='JoinVerse', author_email='', # Choose your license license='MIT', py_modules=['vid', 'base32hex'], download_url='', )
[ "os.path.dirname", "setuptools.setup" ]
[((330, 595), 'setuptools.setup', 'setup', ([], {'name': '"""vid"""', 'version': '"""1.0.0"""', 'description': '"""Python Vid Implementation"""', 'long_description': 'long_description', 'url': '"""https://github.com/JoinVerse/vid"""', 'author': '"""JoinVerse"""', 'author_email': '""""""', 'license': '"""MIT"""', 'py_modules': "['vid', 'base32hex']", 'download_url': '""""""'}), "(name='vid', version='1.0.0', description='Python Vid Implementation',\n long_description=long_description, url=\n 'https://github.com/JoinVerse/vid', author='JoinVerse', author_email='',\n license='MIT', py_modules=['vid', 'base32hex'], download_url='')\n", (335, 595), False, 'from setuptools import setup, find_packages\n'), ((183, 205), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (195, 205), False, 'from os import path\n')]
# coding=utf-8 # Copyright 2018 Google LLC & <NAME>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of the Frechet Inception Distance. Implemented as a wrapper around the tf.contrib.gan library. The details can be found in "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium", Heusel et al. [https://arxiv.org/abs/1706.08500]. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging from compare_gan.metrics import eval_task import tensorflow as tf import tensorflow_gan as tfgan # Special value returned when FID code returned exception. FID_CODE_FAILED = 4242.0 class FIDScoreTask(eval_task.EvalTask): """Evaluation task for the FID score.""" _LABEL = "fid_score" def run_after_session(self, fake_dset, real_dset): logging.info("Calculating FID.") with tf.Graph().as_default(): fake_activations = tf.convert_to_tensor(fake_dset.activations) real_activations = tf.convert_to_tensor(real_dset.activations) fid = tfgan.eval.frechet_classifier_distance_from_activations( real_activations=real_activations, generated_activations=fake_activations) with self._create_session() as sess: fid = sess.run(fid) logging.info("Frechet Inception Distance: %.3f.", fid) return {self._LABEL: fid} def compute_fid_from_activations(fake_activations, real_activations): """Returns the FID based on activations. Args: fake_activations: NumPy array with fake activations. real_activations: NumPy array with real activations. Returns: A float, the Frechet Inception Distance. """ logging.info("Computing FID score.") assert fake_activations.shape == real_activations.shape with tf.Session(graph=tf.Graph()) as sess: fake_activations = tf.convert_to_tensor(fake_activations) real_activations = tf.convert_to_tensor(real_activations) fid = tfgan.eval.frechet_classifier_distance_from_activations( real_activations=real_activations, generated_activations=fake_activations) return sess.run(fid)
[ "absl.logging.info", "tensorflow.convert_to_tensor", "tensorflow_gan.eval.frechet_classifier_distance_from_activations", "tensorflow.Graph" ]
[((2204, 2240), 'absl.logging.info', 'logging.info', (['"""Computing FID score."""'], {}), "('Computing FID score.')\n", (2216, 2240), False, 'from absl import logging\n'), ((1369, 1401), 'absl.logging.info', 'logging.info', (['"""Calculating FID."""'], {}), "('Calculating FID.')\n", (1381, 1401), False, 'from absl import logging\n'), ((2367, 2405), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['fake_activations'], {}), '(fake_activations)\n', (2387, 2405), True, 'import tensorflow as tf\n'), ((2429, 2467), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['real_activations'], {}), '(real_activations)\n', (2449, 2467), True, 'import tensorflow as tf\n'), ((2478, 2613), 'tensorflow_gan.eval.frechet_classifier_distance_from_activations', 'tfgan.eval.frechet_classifier_distance_from_activations', ([], {'real_activations': 'real_activations', 'generated_activations': 'fake_activations'}), '(real_activations=\n real_activations, generated_activations=fake_activations)\n', (2533, 2613), True, 'import tensorflow_gan as tfgan\n'), ((1461, 1504), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['fake_dset.activations'], {}), '(fake_dset.activations)\n', (1481, 1504), True, 'import tensorflow as tf\n'), ((1530, 1573), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['real_dset.activations'], {}), '(real_dset.activations)\n', (1550, 1573), True, 'import tensorflow as tf\n'), ((1586, 1721), 'tensorflow_gan.eval.frechet_classifier_distance_from_activations', 'tfgan.eval.frechet_classifier_distance_from_activations', ([], {'real_activations': 'real_activations', 'generated_activations': 'fake_activations'}), '(real_activations=\n real_activations, generated_activations=fake_activations)\n', (1641, 1721), True, 'import tensorflow_gan as tfgan\n'), ((1815, 1869), 'absl.logging.info', 'logging.info', (['"""Frechet Inception Distance: %.3f."""', 'fid'], {}), "('Frechet Inception Distance: %.3f.', fid)\n", (1827, 1869), False, 'from absl import logging\n'), ((2323, 2333), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow as tf\n'), ((1411, 1421), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1419, 1421), True, 'import tensorflow as tf\n')]
#!/usr/bin/env python import copy import itertools import pytest import math TEST_DATA = [ 1, 0, 0, 3, 1, 1, 2, 3, 1, 3, 4, 3, 1, 5, 0, 3, 2, 13, 1, 19, 1, 6, 19, 23, 2, 6, 23, 27, 1, 5, 27, 31, 2, 31, 9, 35, 1, 35, 5, 39, 1, 39, 5, 43, 1, 43, 10, 47, 2, 6, 47, 51, 1, 51, 5, 55, 2, 55, 6, 59, 1, 5, 59, 63, 2, 63, 6, 67, 1, 5, 67, 71, 1, 71, 6, 75, 2, 75, 10, 79, 1, 79, 5, 83, 2, 83, 6, 87, 1, 87, 5, 91, 2, 9, 91, 95, 1, 95, 6, 99, 2, 9, 99, 103, 2, 9, 103, 107, 1, 5, 107, 111, 1, 111, 5, 115, 1, 115, 13, 119, 1, 13, 119, 123, 2, 6, 123, 127, 1, 5, 127, 131, 1, 9, 131, 135, 1, 135, 9, 139, 2, 139, 6, 143, 1, 143, 5, 147, 2, 147, 6, 151, 1, 5, 151, 155, 2, 6, 155, 159, 1, 159, 2, 163, 1, 9, 163, 0, 99, 2, 0, 14, 0, ] def solve1(arr): max_iters = math.ceil(len(arr) / 4) # print(f"{max_iters}") for i in range(max_iters): op_code = arr[i * 4] if op_code == 99: break operand1 = arr[arr[i * 4 + 1]] operand2 = arr[arr[i * 4 + 2]] dest = arr[i * 4 + 3] if op_code == 1: arr[dest] = operand1 + operand2 elif op_code == 2: arr[dest] = operand1 * operand2 else: print(f"Invalid op_code found. {op_code}") break return arr def solve2(data_im): for noun, verb in itertools.product(range(100), range(100)): data = copy.copy(data_im) data[1] = noun data[2] = verb if 19690720 == solve1(data)[0]: print(f"Sol2=(noun={noun}, verb={verb}) ✔") break del data else: print("Solution not found. 😐") def apply_before_steps(data): data[1] = 12 data[2] = 2 def main(input_data): data = copy.copy(input_data) apply_before_steps(data) print("Sol1=" + str(solve1(data)[0])) solve2(input_data) @pytest.mark.parametrize( "x,y", [ ([1, 0, 0, 0, 99], [2, 0, 0, 0, 99]), ([2, 3, 0, 3, 99], [2, 3, 0, 6, 99]), ([2, 4, 4, 5, 99, 0], [2, 4, 4, 5, 99, 9801]), ([1, 1, 1, 4, 99, 5, 6, 0, 99], [30, 1, 1, 4, 2, 5, 6, 0, 99]), ], ) def test_solve(x, y): assert y == solve1(x) if __name__ == "__main__": main(TEST_DATA)
[ "pytest.mark.parametrize", "copy.copy" ]
[((2527, 2755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x,y"""', '[([1, 0, 0, 0, 99], [2, 0, 0, 0, 99]), ([2, 3, 0, 3, 99], [2, 3, 0, 6, 99]),\n ([2, 4, 4, 5, 99, 0], [2, 4, 4, 5, 99, 9801]), ([1, 1, 1, 4, 99, 5, 6, \n 0, 99], [30, 1, 1, 4, 2, 5, 6, 0, 99])]'], {}), "('x,y', [([1, 0, 0, 0, 99], [2, 0, 0, 0, 99]), ([2, \n 3, 0, 3, 99], [2, 3, 0, 6, 99]), ([2, 4, 4, 5, 99, 0], [2, 4, 4, 5, 99,\n 9801]), ([1, 1, 1, 4, 99, 5, 6, 0, 99], [30, 1, 1, 4, 2, 5, 6, 0, 99])])\n", (2550, 2755), False, 'import pytest\n'), ((2408, 2429), 'copy.copy', 'copy.copy', (['input_data'], {}), '(input_data)\n', (2417, 2429), False, 'import copy\n'), ((2063, 2081), 'copy.copy', 'copy.copy', (['data_im'], {}), '(data_im)\n', (2072, 2081), False, 'import copy\n')]
"""StyleGAN. This module implements teh Generative Adversarial Network described in: A Style-Based Generator Architecture for Generative Adversarial Networks <NAME> (NVIDIA), <NAME> (NVIDIA), <NAME> (NVIDIA) http://stylegan.xyz/paper Code derived from: https://github.com/SsnL/stylegan """ import collections import os import re import numpy as np import torch import torch.nn as nn import torch.nn.functional as F DIM_Z = collections.defaultdict(lambda: 512) FULL_RESOLUTIONS = { 'lsun_car': (512, 384), 'ff_hq': (1024, 1024), 'celeba_hq': (1024, 1024), 'lsun_bedroom': (256, 256), 'lsun_cat': (256, 256), } RESOLUTIONS = { 'ff_hq': 1024, 'celeba_hq': 1024, 'lsun_bedroom': 256, 'lsun_car': 512, 'lsun_cat': 256, } ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) root_url = 'http://pretorched-x.csail.mit.edu/gans/StyleGAN' model_urls = { 'celeba_hq': { 1024: { 'G': os.path.join(root_url, 'celeba_hq_1024x1024_G-c8acef81.pth'), }, }, 'ff_hq': { 1024: { 'G': os.path.join(root_url, 'ff_hq_1024x1024_G-21a7044d.pth'), } }, 'lsun_bedroom': { 256: { 'G': os.path.join(root_url, 'lsun_bedroom_256x256_G-da907d98.pth'), }, }, 'lsun_car': { 512: { 'G': os.path.join(root_url, 'lsun_car_512x384_G-d2188b0a.pth'), }, }, 'lsun_cat': { 256: { 'G': os.path.join(root_url, 'lsun_cat_256x256_G-384e9e73.pth'), }, }, } def stylegan(pretrained='ff_hq', resolution=None): if pretrained is not None: resolution = RESOLUTIONS.get(pretrained) if resolution is None else resolution url = model_urls[pretrained][resolution]['G'] state_dict = torch.hub.load_state_dict_from_url(url) net = G(out_res=resolution) net.load_state_dict(state_dict) else: assert resolution is not None, 'Must specify pretrained model or resolution!' net = G(out_res=max(resolution)) return net class NonLinearityMeta(type): def __call__(cls, *args, **kwargs): return cls.activate(*args, **kwargs) class NonLinearity(object, metaclass=NonLinearityMeta): gain = NotImplemented activate = NotImplemented class ReLU(NonLinearity): gain = np.sqrt(2) activate = F.relu class LeakyReLU(NonLinearity): gain = np.sqrt(2) @staticmethod def activate(x, inplace=False): return F.leaky_relu(x, negative_slope=0.2, inplace=inplace) class ScaledParamModule(nn.Module): # linear w: [ fan_out, fan_in ] # conv w: [ nc_out, nc_in, k1, k2 ] # convT w: [ nc_in, nc_out, k1, k2 ], but let's ignore this case because # (1) the tf impl doesn't special-case # (2) convT is only used for fusing Upsample & Conv2d, and in that case, the # weight should be done as if it is for a Conv2d. # # NB: in tf code, use_wscale has default value False, but for StyleGAN it is # True everywhere, so I changed it. def scale_weight(self, gain=np.sqrt(2), use_wscale=True, lrmul=1, new_name='_weight'): weight = self.weight assert isinstance(weight, nn.Parameter) fan_in = np.prod(weight.shape[1:]) he_std = gain / np.sqrt(fan_in) # He init # Equalized learning rate and custom learning rate multiplier. if use_wscale: init_std = 1.0 / lrmul runtime_coef = he_std * lrmul else: init_std = he_std / lrmul runtime_coef = lrmul # Init variable using He init. weight.data.normal_(0, init_std) # add scale hook self.add_scale_hook('weight', new_name, runtime_coef) def scale_bias(self, lrmul=1, new_name='_bias'): if self.bias is None: assert not hasattr(self, new_name) # do not delete so we don't have to restore in forward # del self.bias self.register_parameter(new_name, None) return bias = self.bias assert isinstance(bias, nn.Parameter) # zero out bias.data.zero_() # add scale hook self.add_scale_hook('bias', new_name, lrmul) def add_scale_hook(self, name, new_name, coef): param = getattr(self, name) assert isinstance(param, nn.Parameter) assert not hasattr(self, new_name) delattr(self, name) self.register_parameter(new_name, param) # Note that the following line uses `m` rather than `self`, and thus # doesn't maintaing the reference and allows for deep copying. self.register_forward_pre_hook(lambda m, inp: setattr(m, name, getattr(m, new_name) * coef)) class ScaledParamLinear(nn.Linear, ScaledParamModule): def __init__(self, *args, gain=np.sqrt(2), use_wscale=True, lrmul=1, **kwargs): super().__init__(*args, **kwargs) self.scale_weight(gain, use_wscale, lrmul) self.scale_bias(lrmul) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format( self.in_features, self.out_features, self._bias is not None # use the _real param ) class ScaledParamConv2d(nn.Conv2d, ScaledParamModule): def __init__(self, *args, gain=np.sqrt(2), use_wscale=True, lrmul=1, **kwargs): super().__init__(*args, **kwargs) self.scale_weight(gain, use_wscale, lrmul) self.scale_bias(lrmul) def extra_repr(self): s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' ', stride={stride}') if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self._bias is None: # use the _real param s += ', bias=False' return s.format(**self.__dict__) class UpConv2d(ScaledParamConv2d): # Fuse Upsample 2x and Conv2d if desirable. # NOTE [ Fusing Nearest Neighbor Upsampling and Conv2d to a ConvTranspose2d ] # # For exact match, we should flip the kernel along the spatial # dimensions, e.g., with a `.flip(2, 3)`. # This is because we will calculate the sum combinations in kernel # and then apply convT with stride so that each input pixel hits the # exact same kernel values as it would with upsample + conv, but # now summed as a single value. In ConvT, kernel and input are in # reversed space, in the sense that the top-left input pixel sees # top-left kernel region in conv, but bottom-right in convT. # However, the tf code doesn't do this and this is also a problem in # tf, so to keep trained weights compatibility, we don't flip. def __init__(self, in_res, in_channels, out_channels, kernel_size, padding=0, bias=True, gain=np.sqrt(2), use_wscale=True, lrmul=1): super().__init__(in_channels, out_channels, kernel_size, padding=padding, bias=bias, gain=gain, use_wscale=use_wscale, lrmul=lrmul) self.do_fuse = in_res * 2 >= 128 def forward(self, x): if self.do_fuse: w = F.pad(self.weight.transpose(0, 1), (1, 1, 1, 1)) w = w[:, :, 1:, 1:] + w[:, :, 1:, :-1] + w[:, :, :-1, 1:] + w[:, :, :-1, :-1] return F.conv_transpose2d(x, w, self.bias, stride=(2, 2), padding=self.padding) else: return nn.Conv2d.forward(self, F.interpolate(x, scale_factor=2)) class Blur2d(nn.Module): def __init__(self, kernel=[1, 2, 1], padding=1, normalize=True, flip=False, stride=1): super().__init__() self.stride = stride self.padding = padding # build kernel kernel = torch.as_tensor(kernel, dtype=torch.get_default_dtype()) if kernel.dim() == 1: kernel = kernel[:, None] * kernel assert kernel.dim() == 2 if normalize: kernel /= kernel.sum() if flip: kernel = kernel.flip(0, 1) if kernel.numel() == 1 and kernel[0, 0].item() == 1: # No-op => early exit. self.no_conv = True else: self.no_conv = False # prepare for conv2d # use single channel (merge nc to batch dimension) self.register_buffer('kernel', kernel.expand(1, 1, -1, -1).contiguous()) def forward(self, x): if self.no_conv: if self.stride == 1: return x else: return x[:, :, ::self.stride, ::self.stride] else: b, nc, h, w = x.size() y = F.conv2d(x.reshape(-1, 1, h, w), self.kernel, bias=None, stride=self.stride, padding=self.padding) return y.view(b, nc, y.size(2), y.size(3)) class MappingG(nn.Module): def __init__(self, z_dim=512, w_dim=512, dim_latent=512, n_layers=8, nonlinearity=LeakyReLU, use_wscale=True, use_class_labels=False, nlabels=1, embed_size=0, lrmul=0.01): super().__init__() self.z_dim = z_dim self.w_dim = w_dim self.dim_latent = dim_latent assert n_layers >= 1 self.n_layers = n_layers self.act = nonlinearity scale_param_opt = dict(gain=self.act.gain, lrmul=lrmul, use_wscale=use_wscale) self.use_class_labels = use_class_labels if self.use_class_labels: self.embedding = nn.Embedding(nlabels, embed_size) dim = z_dim + embed_size if use_class_labels else z_dim self.fcs = nn.ModuleList() for i in range(n_layers): self.fcs.append( ScaledParamLinear(dim, dim_latent if i < (n_layers - 1) else w_dim, **scale_param_opt), ) dim = dim_latent def forward(self, z, y=None): if self.use_class_labels: yembed = self.embedding(y) yembed = yembed / torch.norm(yembed, p=2, dim=1, keepdim=True) z = torch.cat([z, yembed], dim=1) # NB: this is not z.norm(p=2, dim=1, keepdim=True)!!!! z = z * z.pow(2).mean(dim=1, keepdim=True).add_(1e-8).rsqrt() for fc in self.fcs: z = self.act(fc(z)) return z def __repr__(self): return '{}(z_dim={}, w_dim={}, dim_latent={}, n_layers={}, ...)'.format( self.__class__.__name__, self.z_dim, self.w_dim, self.dim_latent, self.n_layers) class SynthesisG(nn.Module): class AddNoise(ScaledParamModule): # `B` block + Noise in Fig1 def __init__(self, nc, res): super().__init__() self.res = res self.weight = nn.Parameter(torch.zeros(nc, 1, 1, requires_grad=True)) self.bias = nn.Parameter(torch.zeros(nc, 1, 1, requires_grad=True)) self.scale_bias(lrmul=1) def forward(self, x, noise=None): if noise is None: noise = torch.randn(x.size(0), 1, self.res, self.res, device=x.device, dtype=x.dtype) return x + noise * self.weight + self.bias class AffineStyle(nn.Module): # `A` block + AdaIN in Fig1 def __init__(self, w_dim, nc): super().__init__() self.nc = nc self.fc = ScaledParamLinear(w_dim, nc * 2, gain=1) def forward(self, x, w): normalized = F.instance_norm(x, weight=None, bias=None, eps=1e-8) affine_params = self.fc(w).view(-1, 2, self.nc, 1, 1) return normalized * (affine_params[:, 0].add_(1)) + affine_params[:, 1] class Block(nn.Module): def __init__(self, w_dim, in_res, in_nc, out_res, out_nc, blur_filter=[1, 2, 1], nonlinearity=LeakyReLU, use_wscale=True, lrmul=1, skip_first_layer=False): # skip_first_layer skips the upsample & first conv, used for first block super().__init__() self.skip_first_layer = skip_first_layer self.act = nonlinearity scale_param_opt = dict(gain=self.act.gain, lrmul=lrmul, use_wscale=use_wscale) # NB: the following (up)conv* layers have bias=False, because we # assume that we are always using noise, and the bias is applied # in noise* layers. This is still consistent with official tf # code. if not self.skip_first_layer: self.upconv1 = UpConv2d(in_res, in_nc, out_nc, 3, padding=1, bias=False, **scale_param_opt) assert len(blur_filter) % 2 == 1 self.blur1 = Blur2d(blur_filter, padding=(len(blur_filter) >> 1)) self.noise1 = SynthesisG.AddNoise(out_nc, out_res) self.style1 = SynthesisG.AffineStyle(w_dim, out_nc) self.conv2 = ScaledParamConv2d(out_nc, out_nc, 3, padding=1, bias=False, **scale_param_opt) self.noise2 = SynthesisG.AddNoise(out_nc, out_res) self.style2 = SynthesisG.AffineStyle(w_dim, out_nc) def forward(self, x, ws, noises=(None, None)): if not self.skip_first_layer: x = self.blur1(self.upconv1(x)) x = self.noise1(x, noises[0]) x = self.act(x) x = self.style1(x, ws[0]) x = self.conv2(x) x = self.noise2(x, noises[1]) x = self.act(x) x = self.style2(x, ws[1]) return x def __init__(self, w_dim=512, image_out_nc=3, image_out_res=1024, nc_base=8192, nc_decay=1.0, nc_max=512, nonlinearity=LeakyReLU, use_wscale=True, lrmul=1): super().__init__() self.out_res = image_out_res log_image_out_res = int(np.log2(image_out_res)) assert image_out_res == 2 ** log_image_out_res and image_out_res >= 4 # output nc of a block. # # log_res refers to the input to the block, which is immediately # upsampled. # # In the first block, there is no upsample, and input is directly 4x4, # but you should still treat as if it is upsampled from 2x2 and use # log_res=1. def get_out_nc(log_res): return min(int(nc_base / 2 ** (log_res * nc_decay)), nc_max) self.const = nn.Parameter(torch.ones(1, get_out_nc(1), 4, 4, requires_grad=True)) # start at 4x4 in_res = 2 in_nc = None # first shouldn't matter for in_log_res in range(1, log_image_out_res): out_res = in_res * 2 out_nc = get_out_nc(in_log_res) b = SynthesisG.Block( w_dim, in_res, in_nc, out_res, out_nc, skip_first_layer=(in_log_res == 1), nonlinearity=nonlinearity, use_wscale=use_wscale, lrmul=lrmul, ) self.add_module('{res}x{res}'.format(res=out_res), b) to_rgb = ScaledParamConv2d(out_nc, image_out_nc, 1, gain=1, use_wscale=use_wscale, lrmul=lrmul) out_log_res = in_log_res + 1 self.add_module('{res}x{res}_to_rgb_lod{lod}'.format( res=out_res, lod=(log_image_out_res - out_log_res)), to_rgb) in_res = out_res in_nc = out_nc assert in_res == image_out_res self.num_blocks = len(self.blocks) self.num_layers = self.num_blocks * 2 @property def blocks(self): blocks = [] children_dict = {} for name, module in self.named_children(): children_dict[name] = module log_out_res = int(np.log2(self.out_res)) out_res = 4 for _ in range(1, log_out_res): name = '{res}x{res}'.format(res=out_res) module = children_dict[name] blocks.append(module) out_res = out_res * 2 return blocks @property def rgb_convs(self): rgb_convs = [] children_dict = {} for name, module in self.named_children(): children_dict[name] = module log_out_res = int(np.log2(self.out_res)) out_res = 4 for in_log_res in range(1, log_out_res): out_log_res = in_log_res + 1 name = '{res}x{res}_to_rgb_lod{lod}'.format(res=out_res, lod=(log_out_res - out_log_res)) module = children_dict[name] rgb_convs.append(module) out_res = out_res * 2 return rgb_convs # allow taking in a list of W for style mixing def forward(self, ws, lod=0, alpha=1, noises=None): blocks = self.blocks rgb_convs = self.rgb_convs assert 0 <= lod < len(blocks) stop_after = len(blocks) - lod - 1 num_layers = (stop_after + 1) * 2 if isinstance(ws, torch.Tensor) and ws.dim() == 3: ws = ws.unbind(dim=1) # assuming its [batch x num_layer x w] if not isinstance(ws, collections.abc.Sequence): ws = [ws for _ in range(num_layers)] if not isinstance(noises, collections.abc.Sequence): noises = [noises for _ in range(num_layers)] x = self.const.expand(ws[0].size(0), -1, -1, -1) for i, b in enumerate(blocks): block_extra_inp_indices = slice(i * 2, i * 2 + 2) x = b(x, ws[block_extra_inp_indices], noises=noises[block_extra_inp_indices]) if i == stop_after - 1: y = F.interpolate(x, scale_factor=2) y = rgb_convs[stop_after - 1](y) if i == stop_after: x = rgb_convs[i](x) return x if stop_after == 0 else (1 - alpha) * y + alpha * x class G(nn.Module): def __init__(self, z_dim=512, w_dim=512, out_nc=3, out_res=1024, use_class_labels=False, w_avg_beta=0.995, # find moving average of w, in training style_mixing_prob=0.995, # prob of applying style mixing, in training truncation_psi=0.7, # mixing rate to w_avg for truncation, in eval truncation_cutoff=8, # layer cutoff index index for truncation, in eval nlabels=1, # number of classes (if using class labels) embed_size=256, # embedding size for encoding class label information nonlinearity=LeakyReLU, use_wscale=True, **kwargs): super().__init__() self.register_buffer('w_avg', torch.zeros(w_dim)) self.z_dim = z_dim self.w_dim = w_dim self.out_nc = out_nc self.out_res = out_res self.w_avg_beta = w_avg_beta self.style_mixing_prob = style_mixing_prob self.truncation_psi = truncation_psi self.truncation_cutoff = truncation_cutoff self.mapping = MappingG(z_dim, w_dim, nonlinearity=nonlinearity, use_wscale=use_wscale, use_class_labels=use_class_labels, nlabels=nlabels, embed_size=embed_size) self.synthesis = SynthesisG(w_dim, out_nc, out_res, nonlinearity=nonlinearity, use_wscale=use_wscale) def forward(self, z, y=None, lod=0, alpha=1, w=None, noises=None, get_w=False, w_avg_beta=None, style_mixing_prob=None, truncation_psi=None, truncation_cutoff=None): # really is the total number of layers for the synthesis network total_num_layers = self.synthesis.num_layers forward_num_layers = total_num_layers - lod * 2 if w is None: assert z is not None and z.dim() == 2 and z.size(1) == self.z_dim w = self.mapping(z, y) else: assert w.dim() == 2 and w.size(1) == self.w_dim if get_w: mapping_output_w = w.clone() ws = [w for _ in range(total_num_layers)] if self.training: # update moving average if w_avg_beta is None: w_avg_beta = self.w_avg_beta if w_avg_beta != 1: with torch.no_grad(): torch.lerp(w.mean(0), self.w_avg, self.w_avg_beta, out=self.w_avg) # style mixing if style_mixing_prob is None: style_mixing_prob = self.style_mixing_prob if style_mixing_prob > 0 and torch.rand((), device='cpu').item() < style_mixing_prob: w2 = self.mapping(torch.randn_like(z), y) cutoff = int(torch.randint(low=1, high=forward_num_layers, size=(), device='cpu').item()) # w for < cutoff; w2 for >= cutoff ws = ws[:cutoff] + [w2 for _ in range(forward_num_layers - cutoff)] else: # truncation if truncation_psi is None: truncation_psi = self.truncation_psi if truncation_cutoff is None: truncation_cutoff = self.truncation_cutoff # truncate for < cutoff if truncation_cutoff > 0 and truncation_psi != 1: expanded_avg_w = self.w_avg.expand_as(ws[0]) # in eval part, current code implies that ws is a list of the same w # tensor repeated many times since there is no style mixing, but # let's be general and detect before optimizing for that. if all(_w is w for _w in ws): truncate_w = torch.lerp(expanded_avg_w, w, truncation_psi) ws = [truncate_w for _ in range(truncation_cutoff)] + ws[:(forward_num_layers - truncation_cutoff)] else: for i in range(truncation_cutoff): # use out-of-place because these ws may be references to the # same tensor ws[i] = torch.lerp(expanded_avg_w, w[i], truncation_psi) ims = self.synthesis(ws, lod=lod, noises=noises, alpha=alpha) if get_w: return mapping_output_w, ims else: return ims def __repr__(self): return '{}(z_dim={}, w_dim={}, out_nc={}, out_res={}, ...)'.format( self.__class__.__name__, self.z_dim, self.w_dim, self.out_nc, self.out_res) def convert_tf_weights_to_state_dict(self, tf_net, device=torch.device('cpu')): # full of hacks state_dict = {} def raise_unexpected(tf_k, v): raise RuntimeError("Unexpected key '{}' with shape: {}".format(tf_k, v.size())) def transform_conv_w(v): # tf: [ k1, k2, nc_in, nc_out ] # pt: [ nc_out, nc_in, k1, k2 ] return v.permute(3, 2, 0, 1) def transform_fc_w(v): # tf: [ fan_in, fan_out ] # pt: [ fan_out, fan_in ] return v.t() def sub_synthesis(k, tf_k, v): k = k.replace('G_synthesis.', 'synthesis.') def replace_tail_if_match(pattern, new): nonlocal k if k.endswith(pattern): k = k[:-len(pattern)] + new return True return False # deal with the first block if k == 'synthesis.4x4.Const.const': return 'synthesis.const', v elif k.startswith('synthesis.4x4.'): if 'synthesis.4x4.Const' in k: k = k.replace('synthesis.4x4.Const', 'synthesis.4x4.Conv0_up') elif 'synthesis.4x4.Conv' in k: k = k.replace('synthesis.4x4.Conv', 'synthesis.4x4.Conv1') else: raise_unexpected(tf_k, v) if replace_tail_if_match('.Conv0_up.weight', '.upconv1._weight'): # noqa: E241, E202 return k, transform_conv_w(v) if replace_tail_if_match('.Conv0_up.Noise.weight', '.noise1.weight'): # noqa: E241, E202 return k, v.view(-1, 1, 1) if replace_tail_if_match('.Conv0_up.bias', '.noise1._bias'): # noqa: E241, E202 return k, v.view(-1, 1, 1) if replace_tail_if_match('.Conv0_up.StyleMod.weight', '.style1.fc._weight'): # noqa: E241, E202 return k, transform_fc_w(v) if replace_tail_if_match('.Conv0_up.StyleMod.bias', '.style1.fc._bias'): # noqa: E241, E202 return k, v if 'Conv0_up' in k: raise_unexpected(tf_k, v) if replace_tail_if_match('.Conv1.weight', '.conv2._weight'): # noqa: E241, E202 return k, transform_conv_w(v) if replace_tail_if_match('.Conv1.Noise.weight', '.noise2.weight'): # noqa: E241, E202 return k, v.view(-1, 1, 1) if replace_tail_if_match('.Conv1.bias', '.noise2._bias'): # noqa: E241, E202 return k, v.view(-1, 1, 1) if replace_tail_if_match('.Conv1.StyleMod.weight', '.style2.fc._weight'): # noqa: E241, E202 return k, transform_fc_w(v) if replace_tail_if_match('.Conv1.StyleMod.bias', '.style2.fc._bias'): # noqa: E241, E202 return k, v if 'Conv1' in k: raise_unexpected(tf_k, v) m = re.match(r'^synthesis\.ToRGB_lod(\d+)\.(weight|bias)$', k) if m: lod = int(m.group(1)) k = 'synthesis.{res}x{res}_to_rgb_lod{lod}._{name}'.format( res=int(self.out_res / 2 ** lod), lod=lod, name=m.group(2)) if m.group(2) == 'weight': v = transform_conv_w(v) return k, v raise_unexpected(tf_k, v) for tf_k, tf_v in tf_net.vars.items(): assert '.' not in tf_k k = tf_k.replace('/', '.') v = torch.as_tensor(tf_v.eval()) if k in {'lod', 'G_synthesis.lod'} or k.startswith('G_synthesis.noise'): # no input buffer continue elif k == 'dlatent_avg': k = 'w_avg' elif k.startswith('G_synthesis.'): k, v = sub_synthesis(k, tf_k, v) elif k.startswith('G_mapping.'): m = re.match(r'^G_mapping\.Dense(\d+)\.(weight|bias)$', k) if not m: raise_unexpected(tf_k, v) k = 'mapping.fcs.{}._{}'.format(m.group(1), m.group(2)) if m.group(2) == 'weight': v = transform_fc_w(v) else: raise_unexpected(tf_k, v) state_dict[k] = v # tf state dict doesn't have the blur kernels, but pytorch wants to see # them for k, v in self.state_dict().items(): if re.match(r'^synthesis\.\d+x\d+\.blur1\.kernel$', k): state_dict[k] = v.detach() # device & contiguity for k in state_dict: state_dict[k] = state_dict[k].to(device).contiguous() return state_dict @property def dim_z(self): return self.z_dim class D(nn.Module): # Discriminator from https://arxiv.org/pdf/1710.10196.pdf class MinibatchStddev(nn.Module): def __init__(self): super().__init__() def forward(self, x, group_size=4): ''' Implements the MinibatchStddevLayer from https://arxiv.org/pdf/1710.10196.pdf group size: int, must divide the batch size in: BS x C x H x W out: BS x (C + 1) x H x W ''' s = x.size() group_size = min(group_size, s[0]) y = x.view(group_size, -1, s[1], s[2], s[3]) y = y - torch.mean(y, 0, keepdim=True) y = (y**2).mean(0, keepdim=False) y = torch.sqrt(y + 10**-8) # y = y.mean((1, 2, 3), keepdim=True).expand_as(x) y = y.mean((1, 2, 3), keepdim=True).repeat(group_size, 1, s[2], s[3]) return torch.cat([x, y], 1) class ConvBlock(nn.Module): def __init__(self, in_nc, out_nc, last_layer=False, nonlinearity=LeakyReLU, use_wscale=True, lrmul=1.0): super().__init__() self.act = nonlinearity self.last_layer = last_layer scale_param_opt = dict(gain=self.act.gain, lrmul=lrmul, use_wscale=use_wscale) if not self.last_layer: self.blur = Blur2d(kernel=[1, 2, 1], normalize=True, stride=1, padding=1) self.pool = Blur2d(kernel=[0.5, 0.5], normalize=False, stride=2, padding=0) self.conv1 = ScaledParamConv2d(in_nc, in_nc, 3, padding=1, bias=True, **scale_param_opt) self.conv2 = ScaledParamConv2d(in_nc, out_nc, 3, padding=1, bias=True, **scale_param_opt) else: self.minibatch_stddev = D.MinibatchStddev() self.conv1 = ScaledParamConv2d(in_nc + 1, in_nc, 3, padding=1, bias=True, **scale_param_opt) self.conv2 = ScaledParamLinear(in_nc * 16, out_nc, bias=True, **scale_param_opt) def forward(self, x): if self.last_layer: x = self.minibatch_stddev(x) out = self.act(self.conv1(x)) if self.last_layer: out = out.view(out.size(0), -1) else: out = self.blur(out) # out = self.act(self.conv2(out)) #possible change in order out = self.conv2(out) if not self.last_layer: out = self.pool(out) return out def __init__(self, image_in_nc=3, out_res=1024, nc_base=16, nc_decay=1.0, nc_max=512, nonlinearity=LeakyReLU, use_wscale=True, use_class_labels=False, nlabels=None, lrmul=1, **kwargs): super().__init__() self.out_res = out_res log_out_res = int(np.log2(out_res)) assert out_res == 2 ** log_out_res and out_res >= 4 # output nc of a block. # # log_res refers to the input to the block, which is immediately # upsampled. # # In the first block, there is no upsample, and input is directly 4x4, # but you should still treat as if it is upsampled from 2x2 and use # log_res=1. def get_in_nc(log_res): return min(int(nc_base * 2**(log_res - 1)), nc_max) def get_out_nc(log_res): return min(int(nc_base * 2**log_res), nc_max) # plain list # we will register them using more meaningful names, mainly to be easier # loading tf weights, which are stored in namespaces alike 4x4, 16x16, # etc. # start at 4x4 in_res = 2 # first shouldn't matter for in_log_res in reversed(range(1, log_out_res)): out_res = in_res * 2 in_nc = get_in_nc(in_log_res) out_nc = get_out_nc(in_log_res) from_rgb = ScaledParamConv2d(image_in_nc, in_nc, kernel_size=1, gain=nonlinearity.gain, bias=True, use_wscale=use_wscale, lrmul=lrmul) b = D.ConvBlock(in_nc, out_nc, last_layer=(in_log_res == log_out_res - 1), nonlinearity=nonlinearity, use_wscale=use_wscale, lrmul=lrmul) self.add_module('{res}x{res}'.format(res=out_res), b) out_log_res = in_log_res + 1 self.add_module('{res}x{res}_from_rgb_lod{lod}'.format( res=out_res, lod=(out_log_res - 2)), from_rgb) in_res = out_res assert in_res == out_res self.num_blocks = len(self.blocks) self.num_layers = self.num_blocks * 2 self.use_class_labels = use_class_labels self.avgpool = Blur2d(kernel=[0.5, 0.5], normalize=False, stride=2, padding=0) if self.use_class_labels: self.fc = ScaledParamLinear(min(get_out_nc(log_out_res - 1), nc_max), nlabels, gain=1.0, bias=True, use_wscale=True, lrmul=lrmul) else: self.fc = ScaledParamLinear(min(get_out_nc(log_out_res - 1), nc_max), 1, gain=1.0, bias=True, use_wscale=True, lrmul=lrmul) @property def blocks(self): blocks = [] children_dict = {} for name, module in self.named_children(): children_dict[name] = module log_out_res = int(np.log2(self.out_res)) out_res = 4 for _ in reversed(range(1, log_out_res)): name = '{res}x{res}'.format(res=out_res) module = children_dict[name] blocks.append(module) out_res = out_res * 2 return blocks @property def rgb_convs(self): rgb_convs = [] children_dict = {} for name, module in self.named_children(): children_dict[name] = module log_out_res = int(np.log2(self.out_res)) out_res = 4 for in_log_res in reversed(range(1, log_out_res)): out_log_res = in_log_res + 1 name = '{res}x{res}_from_rgb_lod{lod}'.format(res=out_res, lod=(out_log_res - 2)) module = children_dict[name] rgb_convs.append(module) out_res = out_res * 2 return rgb_convs def forward(self, x, labels=None, lod=0, alpha=1): blocks = self.blocks rgb_convs = self.rgb_convs assert 0 <= lod < len(blocks) stop_after = len(blocks) - lod - 1 if stop_after != 0: y = self.avgpool(x) y = rgb_convs[stop_after - 1](y) x = rgb_convs[stop_after](x) x = blocks[stop_after](x) # x = alpha * x + (1 - alpha) * y x = torch.lerp(y, x, alpha) stop_after -= 1 else: x = rgb_convs[stop_after](x) for i, b in reversed(list(enumerate(blocks))): if i <= stop_after: x = b(x) x = x.view(x.size(0), -1) out = self.fc(x) if self.use_class_labels: if labels.dim() != 2: labels = labels.unsqueeze(1) out = out.gather(1, labels) # index = Variable(torch.LongTensor(range(out.size(0)))) # if labels.is_cuda: # index = index.cuda() # out = out[index, labels] return out def convert_tf_weights_to_state_dict(self, tf_net, device=torch.device('cpu')): # full of hacks state_dict = {} def raise_unexpected(tf_k, v): raise RuntimeError("Unexpected key '{}' with shape: {}".format(tf_k, v.size())) def transform_conv_w(v): # tf: [ k1, k2, nc_in, nc_out ] # pt: [ nc_out, nc_in, k1, k2 ] return v.permute(3, 2, 0, 1) def transform_fc_w(v): # tf: [ fan_in, fan_out ] # pt: [ fan_out, fan_in ] return v.t() def from_rgb(k, tf_k, v): m = re.match(r'^FromRGB_lod(\d+)\.(weight|bias)$', k) if m: lod = int(m.group(1)) k = '{res}x{res}_from_rgb_lod{lod}._{name}'.format( res=int(self.out_res / 2 ** lod), lod=lod, name=m.group(2)) if m.group(2) == 'weight': v = transform_conv_w(v) else: v = v.view(-1) return k, v def sub_synthesis(k, tf_k, v): def replace_tail_if_match(pattern, new): nonlocal k if k.endswith(pattern): k = k[:-len(pattern)] + new return True return False if k.startswith('4x4.'): if '4x4.Conv' in k: k = k.replace('4x4.Conv', '4x4.Conv0') elif '4x4.Dense0' in k: if k == '4x4.Dense0.weight': return '4x4.conv2._weight', transform_fc_w(v) if k == '4x4.Dense0.bias': return '4x4.conv2._bias', v.view(-1) raise_unexpected(tf_k, v) elif '4x4.Dense1' in k: k = k.replace('4x4.Dense1', 'fc') if k.endswith('weight'): k = k.replace('weight', '_weight') return k, transform_fc_w(v) if k.endswith('bias'): k = k.replace('bias', '_bias') return k, v raise_unexpected(tf_k, v) else: raise_unexpected(tf_k, v) if replace_tail_if_match('.Conv0.weight', '.conv1._weight'): # noqa: E241, E202 return k, transform_conv_w(v) if replace_tail_if_match('.Conv0.bias', '.conv1._bias'): # noqa: E241, E202 return k, v.view(-1) if 'Conv0' in k: raise_unexpected(tf_k, v) if replace_tail_if_match('.Conv1_down.weight', '.conv2._weight'): # noqa: E241, E202 return k, transform_conv_w(v) if replace_tail_if_match('.Conv1_down.bias', '.conv2._bias'): # noqa: E241, E202 return k, v.view(-1) if 'Conv1' in k: raise_unexpected(tf_k, v) raise_unexpected(tf_k, v) for tf_k, tf_v in tf_net.vars.items(): assert '.' not in tf_k k = tf_k.replace('/', '.') v = torch.as_tensor(tf_v.eval()) if k in {'lod'}: # no input buffer continue elif k.startswith('FromRGB'): k, v = from_rgb(k, tf_k, v) elif 'weight' in k or 'bias' in k: k, v = sub_synthesis(k, tf_k, v) else: raise_unexpected(tf_k, v) state_dict[k] = v # tf state dict doesn't have the blur kernels, but pytorch wants to see # them for k, v in self.state_dict().items(): if re.match(r'\d+x\d+\.blur\.kernel$', k): state_dict[k] = v.detach() if re.match(r'\d+x\d+\.pool\.kernel$', k): state_dict[k] = v.detach() if k == 'avgpool.kernel': state_dict[k] = v.detach() # device & contiguity for k in state_dict: state_dict[k] = state_dict[k].to(device).contiguous() return state_dict
[ "torch.sqrt", "torch.nn.Embedding", "torch.cat", "collections.defaultdict", "torch.nn.functional.leaky_relu", "torch.device", "torch.no_grad", "os.path.join", "numpy.prod", "os.path.dirname", "torch.lerp", "torch.zeros", "torch.hub.load_state_dict_from_url", "torch.mean", "torch.randint", "torch.randn_like", "torch.nn.ModuleList", "numpy.log2", "torch.norm", "re.match", "torch.rand", "torch.nn.functional.instance_norm", "torch.nn.functional.conv_transpose2d", "torch.nn.functional.interpolate", "torch.get_default_dtype", "numpy.sqrt" ]
[((428, 465), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 512)'], {}), '(lambda : 512)\n', (451, 465), False, 'import collections\n'), ((790, 815), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (805, 815), False, 'import os\n'), ((2324, 2334), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2331, 2334), True, 'import numpy as np\n'), ((2401, 2411), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2408, 2411), True, 'import numpy as np\n'), ((1786, 1825), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['url'], {}), '(url)\n', (1820, 1825), False, 'import torch\n'), ((2482, 2534), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {'negative_slope': '(0.2)', 'inplace': 'inplace'}), '(x, negative_slope=0.2, inplace=inplace)\n', (2494, 2534), True, 'import torch.nn.functional as F\n'), ((3087, 3097), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3094, 3097), True, 'import numpy as np\n'), ((3241, 3266), 'numpy.prod', 'np.prod', (['weight.shape[1:]'], {}), '(weight.shape[1:])\n', (3248, 3266), True, 'import numpy as np\n'), ((4836, 4846), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4843, 4846), True, 'import numpy as np\n'), ((5299, 5309), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5306, 5309), True, 'import numpy as np\n'), ((7067, 7077), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7074, 7077), True, 'import numpy as np\n'), ((9766, 9781), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9779, 9781), True, 'import torch.nn as nn\n'), ((22423, 22442), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (22435, 22442), False, 'import torch\n'), ((34470, 34489), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (34482, 34489), False, 'import torch\n'), ((946, 1006), 'os.path.join', 'os.path.join', (['root_url', '"""celeba_hq_1024x1024_G-c8acef81.pth"""'], {}), "(root_url, 'celeba_hq_1024x1024_G-c8acef81.pth')\n", (958, 1006), False, 'import os\n'), ((1074, 1130), 'os.path.join', 'os.path.join', (['root_url', '"""ff_hq_1024x1024_G-21a7044d.pth"""'], {}), "(root_url, 'ff_hq_1024x1024_G-21a7044d.pth')\n", (1086, 1130), False, 'import os\n'), ((1203, 1264), 'os.path.join', 'os.path.join', (['root_url', '"""lsun_bedroom_256x256_G-da907d98.pth"""'], {}), "(root_url, 'lsun_bedroom_256x256_G-da907d98.pth')\n", (1215, 1264), False, 'import os\n'), ((1334, 1391), 'os.path.join', 'os.path.join', (['root_url', '"""lsun_car_512x384_G-d2188b0a.pth"""'], {}), "(root_url, 'lsun_car_512x384_G-d2188b0a.pth')\n", (1346, 1391), False, 'import os\n'), ((1461, 1518), 'os.path.join', 'os.path.join', (['root_url', '"""lsun_cat_256x256_G-384e9e73.pth"""'], {}), "(root_url, 'lsun_cat_256x256_G-384e9e73.pth')\n", (1473, 1518), False, 'import os\n'), ((3291, 3306), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (3298, 3306), True, 'import numpy as np\n'), ((7538, 7610), 'torch.nn.functional.conv_transpose2d', 'F.conv_transpose2d', (['x', 'w', 'self.bias'], {'stride': '(2, 2)', 'padding': 'self.padding'}), '(x, w, self.bias, stride=(2, 2), padding=self.padding)\n', (7556, 7610), True, 'import torch.nn.functional as F\n'), ((9647, 9680), 'torch.nn.Embedding', 'nn.Embedding', (['nlabels', 'embed_size'], {}), '(nlabels, embed_size)\n', (9659, 9680), True, 'import torch.nn as nn\n'), ((10191, 10220), 'torch.cat', 'torch.cat', (['[z, yembed]'], {'dim': '(1)'}), '([z, yembed], dim=1)\n', (10200, 10220), False, 'import torch\n'), ((11548, 11601), 'torch.nn.functional.instance_norm', 'F.instance_norm', (['x'], {'weight': 'None', 'bias': 'None', 'eps': '(1e-08)'}), '(x, weight=None, bias=None, eps=1e-08)\n', (11563, 11601), True, 'import torch.nn.functional as F\n'), ((13904, 13926), 'numpy.log2', 'np.log2', (['image_out_res'], {}), '(image_out_res)\n', (13911, 13926), True, 'import numpy as np\n'), ((15731, 15752), 'numpy.log2', 'np.log2', (['self.out_res'], {}), '(self.out_res)\n', (15738, 15752), True, 'import numpy as np\n'), ((16208, 16229), 'numpy.log2', 'np.log2', (['self.out_res'], {}), '(self.out_res)\n', (16215, 16229), True, 'import numpy as np\n'), ((18615, 18633), 'torch.zeros', 'torch.zeros', (['w_dim'], {}), '(w_dim)\n', (18626, 18633), False, 'import torch\n'), ((25326, 25386), 're.match', 're.match', (['"""^synthesis\\\\.ToRGB_lod(\\\\d+)\\\\.(weight|bias)$"""', 'k'], {}), "('^synthesis\\\\.ToRGB_lod(\\\\d+)\\\\.(weight|bias)$', k)\n", (25334, 25386), False, 'import re\n'), ((26828, 26883), 're.match', 're.match', (['"""^synthesis\\\\.\\\\d+x\\\\d+\\\\.blur1\\\\.kernel$"""', 'k'], {}), "('^synthesis\\\\.\\\\d+x\\\\d+\\\\.blur1\\\\.kernel$', k)\n", (26836, 26883), False, 'import re\n'), ((27855, 27879), 'torch.sqrt', 'torch.sqrt', (['(y + 10 ** -8)'], {}), '(y + 10 ** -8)\n', (27865, 27879), False, 'import torch\n'), ((28042, 28062), 'torch.cat', 'torch.cat', (['[x, y]', '(1)'], {}), '([x, y], 1)\n', (28051, 28062), False, 'import torch\n'), ((29917, 29933), 'numpy.log2', 'np.log2', (['out_res'], {}), '(out_res)\n', (29924, 29933), True, 'import numpy as np\n'), ((32466, 32487), 'numpy.log2', 'np.log2', (['self.out_res'], {}), '(self.out_res)\n', (32473, 32487), True, 'import numpy as np\n'), ((32954, 32975), 'numpy.log2', 'np.log2', (['self.out_res'], {}), '(self.out_res)\n', (32961, 32975), True, 'import numpy as np\n'), ((33778, 33801), 'torch.lerp', 'torch.lerp', (['y', 'x', 'alpha'], {}), '(y, x, alpha)\n', (33788, 33801), False, 'import torch\n'), ((35019, 35069), 're.match', 're.match', (['"""^FromRGB_lod(\\\\d+)\\\\.(weight|bias)$"""', 'k'], {}), "('^FromRGB_lod(\\\\d+)\\\\.(weight|bias)$', k)\n", (35027, 35069), False, 'import re\n'), ((38074, 38115), 're.match', 're.match', (['"""\\\\d+x\\\\d+\\\\.blur\\\\.kernel$"""', 'k'], {}), "('\\\\d+x\\\\d+\\\\.blur\\\\.kernel$', k)\n", (38082, 38115), False, 'import re\n'), ((38172, 38213), 're.match', 're.match', (['"""\\\\d+x\\\\d+\\\\.pool\\\\.kernel$"""', 'k'], {}), "('\\\\d+x\\\\d+\\\\.pool\\\\.kernel$', k)\n", (38180, 38213), False, 'import re\n'), ((7668, 7700), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)'}), '(x, scale_factor=2)\n', (7681, 7700), True, 'import torch.nn.functional as F\n'), ((7979, 8004), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8002, 8004), False, 'import torch\n'), ((10130, 10174), 'torch.norm', 'torch.norm', (['yembed'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(yembed, p=2, dim=1, keepdim=True)\n', (10140, 10174), False, 'import torch\n'), ((10870, 10911), 'torch.zeros', 'torch.zeros', (['nc', '(1)', '(1)'], {'requires_grad': '(True)'}), '(nc, 1, 1, requires_grad=True)\n', (10881, 10911), False, 'import torch\n'), ((10950, 10991), 'torch.zeros', 'torch.zeros', (['nc', '(1)', '(1)'], {'requires_grad': '(True)'}), '(nc, 1, 1, requires_grad=True)\n', (10961, 10991), False, 'import torch\n'), ((17543, 17575), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)'}), '(x, scale_factor=2)\n', (17556, 17575), True, 'import torch.nn.functional as F\n'), ((27762, 27792), 'torch.mean', 'torch.mean', (['y', '(0)'], {'keepdim': '(True)'}), '(y, 0, keepdim=True)\n', (27772, 27792), False, 'import torch\n'), ((20213, 20228), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20226, 20228), False, 'import torch\n'), ((20578, 20597), 'torch.randn_like', 'torch.randn_like', (['z'], {}), '(z)\n', (20594, 20597), False, 'import torch\n'), ((21556, 21601), 'torch.lerp', 'torch.lerp', (['expanded_avg_w', 'w', 'truncation_psi'], {}), '(expanded_avg_w, w, truncation_psi)\n', (21566, 21601), False, 'import torch\n'), ((21954, 22002), 'torch.lerp', 'torch.lerp', (['expanded_avg_w', 'w[i]', 'truncation_psi'], {}), '(expanded_avg_w, w[i], truncation_psi)\n', (21964, 22002), False, 'import torch\n'), ((20487, 20515), 'torch.rand', 'torch.rand', (['()'], {'device': '"""cpu"""'}), "((), device='cpu')\n", (20497, 20515), False, 'import torch\n'), ((20631, 20699), 'torch.randint', 'torch.randint', ([], {'low': '(1)', 'high': 'forward_num_layers', 'size': '()', 'device': '"""cpu"""'}), "(low=1, high=forward_num_layers, size=(), device='cpu')\n", (20644, 20699), False, 'import torch\n'), ((26293, 26349), 're.match', 're.match', (['"""^G_mapping\\\\.Dense(\\\\d+)\\\\.(weight|bias)$"""', 'k'], {}), "('^G_mapping\\\\.Dense(\\\\d+)\\\\.(weight|bias)$', k)\n", (26301, 26349), False, 'import re\n')]
"""Install packages as defined in this file into the Python environment.""" from setuptools import setup, find_namespace_packages import ps2mqtt with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setup( name="ps2mqtt", version=ps2mqtt.__version__, author="<NAME>", author_email="<EMAIL>", url="https://github.com/dgomes/ps2mqtt", description="Python daemon that gets information from psutil to an mqtt broker for integration with Home Assistant.", long_description=long_description, long_description_content_type="text/markdown", packages=["ps2mqtt"], install_requires=[ "setuptools==45.0", "paho-mqtt>=1.6.1", "python-slugify>=6.1.1", "psutil==5.9.0", "PyYAML==6.0", ], entry_points={ "console_scripts": [ "ps2mqtt=ps2mqtt.daemon:main", ] }, classifiers=[ "Programming Language :: Python :: 3.0", "Topic :: Utilities", "Environment :: No Input/Output (Daemon)", "Operating System :: POSIX", "Intended Audience :: System Administrators", ], )
[ "setuptools.setup" ]
[((233, 1015), 'setuptools.setup', 'setup', ([], {'name': '"""ps2mqtt"""', 'version': 'ps2mqtt.__version__', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/dgomes/ps2mqtt"""', 'description': '"""Python daemon that gets information from psutil to an mqtt broker for integration with Home Assistant."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'packages': "['ps2mqtt']", 'install_requires': "['setuptools==45.0', 'paho-mqtt>=1.6.1', 'python-slugify>=6.1.1',\n 'psutil==5.9.0', 'PyYAML==6.0']", 'entry_points': "{'console_scripts': ['ps2mqtt=ps2mqtt.daemon:main']}", 'classifiers': "['Programming Language :: Python :: 3.0', 'Topic :: Utilities',\n 'Environment :: No Input/Output (Daemon)', 'Operating System :: POSIX',\n 'Intended Audience :: System Administrators']"}), "(name='ps2mqtt', version=ps2mqtt.__version__, author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/dgomes/ps2mqtt',\n description=\n 'Python daemon that gets information from psutil to an mqtt broker for integration with Home Assistant.'\n , long_description=long_description, long_description_content_type=\n 'text/markdown', packages=['ps2mqtt'], install_requires=[\n 'setuptools==45.0', 'paho-mqtt>=1.6.1', 'python-slugify>=6.1.1',\n 'psutil==5.9.0', 'PyYAML==6.0'], entry_points={'console_scripts': [\n 'ps2mqtt=ps2mqtt.daemon:main']}, classifiers=[\n 'Programming Language :: Python :: 3.0', 'Topic :: Utilities',\n 'Environment :: No Input/Output (Daemon)', 'Operating System :: POSIX',\n 'Intended Audience :: System Administrators'])\n", (238, 1015), False, 'from setuptools import setup, find_namespace_packages\n')]
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for supcon.classification_head.""" from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from supcon import classification_head class ClassificationHeadTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('rank_1', 1), ('rank_4', 4), ('rank_8', 8), ) def testIncorrectRank(self, rank): inputs = tf.compat.v1.placeholder(tf.float32, shape=[10] * rank) with self.assertRaisesRegex(ValueError, 'is expected to have rank 2'): classifier = classification_head.ClassificationHead(num_classes=10) classifier(inputs) @parameterized.named_parameters( ('float32', tf.float32), ('float64', tf.float64), ('float16', tf.float16), ) def testConstructClassificationHead(self, dtype): batch_size = 3 num_classes = 10 input_shape = [batch_size, 4] expected_output_shape = [batch_size, num_classes] inputs = tf.random.uniform(input_shape, seed=1, dtype=dtype) classifier = classification_head.ClassificationHead(num_classes=num_classes) output = classifier(inputs) self.assertListEqual(expected_output_shape, output.shape.as_list()) self.assertEqual(inputs.dtype, output.dtype) def testGradient(self): inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1) classifier = classification_head.ClassificationHead(num_classes=10) output = classifier(inputs) gradient = tf.gradients(output, inputs) self.assertIsNotNone(gradient) def testCreateVariables(self): inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1) classifier = classification_head.ClassificationHead(num_classes=10) classifier(inputs) self.assertLen( [var for var in tf.trainable_variables() if 'kernel' in var.name], 1) self.assertLen( [var for var in tf.trainable_variables() if 'bias' in var.name], 1) def testInputOutput(self): batch_size = 3 num_classes = 10 expected_output_shape = (batch_size, num_classes) inputs = tf.random.uniform((batch_size, 4), dtype=tf.float64, seed=1) classifier = classification_head.ClassificationHead(num_classes=num_classes) output_tensor = classifier(inputs) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) outputs = sess.run(output_tensor) # Make sure that there are no NaNs self.assertFalse(np.isnan(outputs).any()) self.assertEqual(outputs.shape, expected_output_shape) if __name__ == '__main__': tf.test.main()
[ "supcon.classification_head.ClassificationHead", "tensorflow.compat.v1.random.uniform", "numpy.isnan", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.gradients", "absl.testing.parameterized.named_parameters", "tensorflow.compat.v1.compat.v1.global_variables_initializer", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.compat.v1.placeholder" ]
[((882, 957), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('rank_1', 1)", "('rank_4', 4)", "('rank_8', 8)"], {}), "(('rank_1', 1), ('rank_4', 4), ('rank_8', 8))\n", (912, 957), False, 'from absl.testing import parameterized\n'), ((1265, 1375), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('float32', tf.float32)", "('float64', tf.float64)", "('float16', tf.float16)"], {}), "(('float32', tf.float32), ('float64', tf.\n float64), ('float16', tf.float16))\n", (1295, 1375), False, 'from absl.testing import parameterized\n'), ((3175, 3189), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (3187, 3189), True, 'import tensorflow.compat.v1 as tf\n'), ((1031, 1086), 'tensorflow.compat.v1.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '([10] * rank)'}), '(tf.float32, shape=[10] * rank)\n', (1055, 1086), True, 'import tensorflow.compat.v1 as tf\n'), ((1587, 1638), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['input_shape'], {'seed': '(1)', 'dtype': 'dtype'}), '(input_shape, seed=1, dtype=dtype)\n', (1604, 1638), True, 'import tensorflow.compat.v1 as tf\n'), ((1656, 1719), 'supcon.classification_head.ClassificationHead', 'classification_head.ClassificationHead', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (1694, 1719), False, 'from supcon import classification_head\n'), ((1913, 1964), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['(3, 4)'], {'dtype': 'tf.float64', 'seed': '(1)'}), '((3, 4), dtype=tf.float64, seed=1)\n', (1930, 1964), True, 'import tensorflow.compat.v1 as tf\n'), ((1982, 2036), 'supcon.classification_head.ClassificationHead', 'classification_head.ClassificationHead', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (2020, 2036), False, 'from supcon import classification_head\n'), ((2084, 2112), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['output', 'inputs'], {}), '(output, inputs)\n', (2096, 2112), True, 'import tensorflow.compat.v1 as tf\n'), ((2195, 2246), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['(3, 4)'], {'dtype': 'tf.float64', 'seed': '(1)'}), '((3, 4), dtype=tf.float64, seed=1)\n', (2212, 2246), True, 'import tensorflow.compat.v1 as tf\n'), ((2264, 2318), 'supcon.classification_head.ClassificationHead', 'classification_head.ClassificationHead', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (2302, 2318), False, 'from supcon import classification_head\n'), ((2673, 2733), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['(batch_size, 4)'], {'dtype': 'tf.float64', 'seed': '(1)'}), '((batch_size, 4), dtype=tf.float64, seed=1)\n', (2690, 2733), True, 'import tensorflow.compat.v1 as tf\n'), ((2751, 2814), 'supcon.classification_head.ClassificationHead', 'classification_head.ClassificationHead', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (2789, 2814), False, 'from supcon import classification_head\n'), ((1181, 1235), 'supcon.classification_head.ClassificationHead', 'classification_head.ClassificationHead', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (1219, 1235), False, 'from supcon import classification_head\n'), ((2909, 2952), 'tensorflow.compat.v1.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2950, 2952), True, 'import tensorflow.compat.v1 as tf\n'), ((2386, 2410), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2408, 2410), True, 'import tensorflow.compat.v1 as tf\n'), ((2484, 2508), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2506, 2508), True, 'import tensorflow.compat.v1 as tf\n'), ((3058, 3075), 'numpy.isnan', 'np.isnan', (['outputs'], {}), '(outputs)\n', (3066, 3075), True, 'import numpy as np\n')]
from enum import IntFlag from enum import unique from misc.utils import escape_enum from misc.utils import pymysql_encode __all__ = ("ClientFlags",) @unique @pymysql_encode(escape_enum) class ClientFlags(IntFlag): # NOTE: many of these flags are quite outdated and/or # broken and are even known to false positive quite often. # they can be helpful; just take them with a grain of salt. """osu! anticheat <= 2016 (unsure of age)""" CLEAN = 0 # no flags sent # flags for timing errors or desync. SPEED_HACK_DETECTED = 1 << 1 # this is to be ignored by server implementations. osu! team trolling hard INCORRECT_MOD_VALUE = 1 << 2 MULTIPLE_OSU_CLIENTS = 1 << 3 CHECKSUM_FAILURE = 1 << 4 FLASHLIGHT_CHECKSUM_INCORRECT = 1 << 5 # these are only used on the osu!bancho official server. OSU_EXECUTABLE_CHECKSUM = 1 << 6 MISSING_PROCESSES_IN_LIST = 1 << 7 # also deprecated as of 2018 # flags for either: # 1. pixels that should be outside the visible radius # (and thus black) being brighter than they should be. # 2. from an internal alpha value being incorrect. FLASHLIGHT_IMAGE_HACK = 1 << 8 SPINNER_HACK = 1 << 9 TRANSPARENT_WINDOW = 1 << 10 # (mania) flags for consistently low press intervals. FAST_PRESS = 1 << 11 # from my experience, pretty decent # for detecting autobotted scores. RAW_MOUSE_DISCREPANCY = 1 << 12 RAW_KEYBOARD_DISCREPANCY = 1 << 13 """osu! anticheat 2019""" # XXX: the aqn flags were fixed within hours of the osu! # update, and vanilla hq is not so widely used anymore. RUN_WITH_LD_FLAG = 1 << 14 CONSOLE_OPEN = 1 << 15 EXTRA_THREADS = 1 << 16 HQ_ASSEMBLY = 1 << 17 HQ_FILE = 1 << 18 REGISTRY_EDITS = 1 << 19 SDL2_LIBRARY = 1 << 20 OPENSSL_LIBRARY = 1 << 21 AQN_MENU_SAMPLE = 1 << 22
[ "misc.utils.pymysql_encode" ]
[((162, 189), 'misc.utils.pymysql_encode', 'pymysql_encode', (['escape_enum'], {}), '(escape_enum)\n', (176, 189), False, 'from misc.utils import pymysql_encode\n')]
from django.http import HttpRequest from rest_framework.views import APIView from rest_framework.response import Response from genie.services import NotebookJobServices, Connections, NotebookTemplateService from rest_framework.decorators import api_view class NotebookOperationsView(APIView): """ Class to get notebooks from zeppelin server """ def post(self, request, notebookId): res = NotebookJobServices.cloneNotebook(notebookId, request.data) return Response(res.json()) def delete(self, request, notebookId): res = NotebookJobServices.deleteNotebook(notebookId) return Response(res.json()) class NotebookActionsView(APIView): """ Class to get notebooks from zeppelin server """ def post(self, request, notebookId): res = NotebookJobServices.runNotebookJob(notebookId) return Response(res.json()) def delete(self, request, notebookId): res = NotebookJobServices.stopNotebookJob(notebookId) return Response(res.json()) def put(self, request, notebookId): res = NotebookJobServices.clearNotebookResults(notebookId) return Response(res.json()) class NotebooksLight(APIView): """ Get concise notebook data """ def get(self, request): res = NotebookJobServices.getNotebooksLight() return Response(res.json()) class NotebookView(APIView): """ Class to get notebooks from zeppelin server """ def get(self, request, offset: int): res = NotebookJobServices.getNotebooks(offset) return Response(res.json()) def post(self, request): res = NotebookJobServices.addNotebook(request.data) return Response(res.json()) class NotebookJobView(APIView): """ Class to get, add and update a NotebookJob details The put and post methods only require request body and not path parameters The get method requires the notebookJobId as the path parameter """ def get(self, request, notebookId=None): offset = int(request.GET.get("offset", 0)) res = NotebookJobServices.getNotebookJobDetails(notebookId=notebookId, runStatusOffset=offset) return Response(res.json()) def post(self, request): notebookId = request.data["notebookId"] scheduleId = request.data["scheduleId"] res = NotebookJobServices.addNotebookJob(notebookId=notebookId, scheduleId=scheduleId) return Response(res.json()) def put(self, request): notebookId = request.data["notebookId"] if "scheduleId" in request.data: scheduleId = request.data["scheduleId"] res = NotebookJobServices.updateNotebookJob(notebookId=notebookId, scheduleId=scheduleId) elif "enabled" in request.data: enabled = request.data["enabled"] res = NotebookJobServices.toggleNotebookJob(notebookId=notebookId, enabled=enabled) return Response(res.json()) def delete(self, request, notebookId=None): res = NotebookJobServices.deleteNotebookJob(notebookId=notebookId) return Response(res.json()) class ScheduleView(APIView): """ Class to get and add available crontab schedules """ def get(self, request): res = NotebookJobServices.getSchedules() return Response(res.json()) def post(self, request): name = request.data["name"] cron = request.data["crontab"] timezone = request.data["timezone"] res = NotebookJobServices.addSchedule(cron=cron, timezone=timezone, name=name) return Response(res.json()) def put(self,request): id = request.data["id"] name = request.data["name"] cron = request.data["crontab"] timezone = request.data["timezone"] res = NotebookJobServices.updateSchedule(id=id, cron=cron, timezone=timezone, name=name) return Response(res.json()) @api_view(["GET", "PUT", "DELETE"]) def schedule(request: HttpRequest, scheduleId: int) -> Response: """ Method for crud operations on a single connection :param request: HttpRequest :param connection_id: Connection Id """ if request.method == "GET": res = NotebookJobServices.getSingleSchedule(scheduleId) return Response(res.json()) if request.method == "DELETE": res = NotebookJobServices.deleteSchedule(scheduleId) return Response(res.json()) class TimzoneView(APIView): """ Class to get standard pytz timezones """ def get(self, request): res = NotebookJobServices.getTimezones() return Response(res.json()) # TODO # Change connection views to class @api_view(["GET", "POST"]) def connections(request: HttpRequest) -> Response: """ Method to get or add connection :param request: HttpRequest """ if request.method == "GET": res = Connections.getConnections() return Response(res.json()) elif request.method == "POST": res = Connections.addConnection(request.data) return Response(res.json()) @api_view(["GET", "PUT", "DELETE"]) def connection(request: HttpRequest, connection_id: int) -> Response: """ Method for crud operations on a single connection :param request: HttpRequest :param connection_id: Connection Id """ if request.method == "GET": res = Connections.getConnection(connection_id) return Response(res.json()) elif request.method == "DELETE": res = Connections.removeConnection(connection_id) return Response(res.json()) elif request.method == "PUT": res = Connections.updateConnection(connection_id, request.data) return Response(res.json()) @api_view(["GET", "POST"]) def connectionTypes(request: HttpRequest) -> Response: """ Method to get all connection types :param request: HttpRequest """ if request.method == "GET": res = Connections.getConnectionTypes() return Response(res.json()) @api_view(["POST"]) def datasetDetails(request: HttpRequest) -> Response: """ Method to get dataset details from s3 location :param request: HttpRequest """ datasetLocation = request.data.get("datasetLocation") datasourceName = request.data.get("datasourceName") res = NotebookTemplateService.getDatasetDetails(datasetLocation, datasourceName) return Response(res.json()) class NotebookTemplateView(APIView): def get(self, request): res = NotebookTemplateService.getNotebookTemplates() return Response(res.json())
[ "genie.services.NotebookJobServices.getNotebookJobDetails", "genie.services.NotebookJobServices.updateSchedule", "genie.services.NotebookJobServices.deleteSchedule", "genie.services.NotebookJobServices.cloneNotebook", "genie.services.NotebookJobServices.stopNotebookJob", "genie.services.NotebookJobServices.deleteNotebook", "genie.services.NotebookTemplateService.getDatasetDetails", "genie.services.NotebookJobServices.getTimezones", "genie.services.NotebookJobServices.addSchedule", "genie.services.NotebookJobServices.getNotebooksLight", "genie.services.Connections.getConnectionTypes", "genie.services.NotebookJobServices.addNotebook", "rest_framework.decorators.api_view", "genie.services.NotebookJobServices.deleteNotebookJob", "genie.services.NotebookTemplateService.getNotebookTemplates", "genie.services.NotebookJobServices.getSingleSchedule", "genie.services.Connections.getConnection", "genie.services.Connections.removeConnection", "genie.services.Connections.addConnection", "genie.services.NotebookJobServices.toggleNotebookJob", "genie.services.NotebookJobServices.addNotebookJob", "genie.services.Connections.getConnections", "genie.services.Connections.updateConnection", "genie.services.NotebookJobServices.updateNotebookJob", "genie.services.NotebookJobServices.getSchedules", "genie.services.NotebookJobServices.getNotebooks", "genie.services.NotebookJobServices.runNotebookJob", "genie.services.NotebookJobServices.clearNotebookResults" ]
[((3938, 3972), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'PUT', 'DELETE']"], {}), "(['GET', 'PUT', 'DELETE'])\n", (3946, 3972), False, 'from rest_framework.decorators import api_view\n'), ((4688, 4713), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (4696, 4713), False, 'from rest_framework.decorators import api_view\n'), ((5088, 5122), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'PUT', 'DELETE']"], {}), "(['GET', 'PUT', 'DELETE'])\n", (5096, 5122), False, 'from rest_framework.decorators import api_view\n'), ((5734, 5759), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (5742, 5759), False, 'from rest_framework.decorators import api_view\n'), ((6020, 6038), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (6028, 6038), False, 'from rest_framework.decorators import api_view\n'), ((6316, 6390), 'genie.services.NotebookTemplateService.getDatasetDetails', 'NotebookTemplateService.getDatasetDetails', (['datasetLocation', 'datasourceName'], {}), '(datasetLocation, datasourceName)\n', (6357, 6390), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((413, 472), 'genie.services.NotebookJobServices.cloneNotebook', 'NotebookJobServices.cloneNotebook', (['notebookId', 'request.data'], {}), '(notebookId, request.data)\n', (446, 472), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((567, 613), 'genie.services.NotebookJobServices.deleteNotebook', 'NotebookJobServices.deleteNotebook', (['notebookId'], {}), '(notebookId)\n', (601, 613), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((807, 853), 'genie.services.NotebookJobServices.runNotebookJob', 'NotebookJobServices.runNotebookJob', (['notebookId'], {}), '(notebookId)\n', (841, 853), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((948, 995), 'genie.services.NotebookJobServices.stopNotebookJob', 'NotebookJobServices.stopNotebookJob', (['notebookId'], {}), '(notebookId)\n', (983, 995), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((1087, 1139), 'genie.services.NotebookJobServices.clearNotebookResults', 'NotebookJobServices.clearNotebookResults', (['notebookId'], {}), '(notebookId)\n', (1127, 1139), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((1305, 1344), 'genie.services.NotebookJobServices.getNotebooksLight', 'NotebookJobServices.getNotebooksLight', ([], {}), '()\n', (1342, 1344), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((1531, 1571), 'genie.services.NotebookJobServices.getNotebooks', 'NotebookJobServices.getNotebooks', (['offset'], {}), '(offset)\n', (1563, 1571), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((1652, 1697), 'genie.services.NotebookJobServices.addNotebook', 'NotebookJobServices.addNotebook', (['request.data'], {}), '(request.data)\n', (1683, 1697), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((2096, 2188), 'genie.services.NotebookJobServices.getNotebookJobDetails', 'NotebookJobServices.getNotebookJobDetails', ([], {'notebookId': 'notebookId', 'runStatusOffset': 'offset'}), '(notebookId=notebookId,\n runStatusOffset=offset)\n', (2137, 2188), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((2365, 2450), 'genie.services.NotebookJobServices.addNotebookJob', 'NotebookJobServices.addNotebookJob', ([], {'notebookId': 'notebookId', 'scheduleId': 'scheduleId'}), '(notebookId=notebookId, scheduleId=scheduleId\n )\n', (2399, 2450), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((3039, 3099), 'genie.services.NotebookJobServices.deleteNotebookJob', 'NotebookJobServices.deleteNotebookJob', ([], {'notebookId': 'notebookId'}), '(notebookId=notebookId)\n', (3076, 3099), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((3277, 3311), 'genie.services.NotebookJobServices.getSchedules', 'NotebookJobServices.getSchedules', ([], {}), '()\n', (3309, 3311), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((3511, 3583), 'genie.services.NotebookJobServices.addSchedule', 'NotebookJobServices.addSchedule', ([], {'cron': 'cron', 'timezone': 'timezone', 'name': 'name'}), '(cron=cron, timezone=timezone, name=name)\n', (3542, 3583), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((3817, 3903), 'genie.services.NotebookJobServices.updateSchedule', 'NotebookJobServices.updateSchedule', ([], {'id': 'id', 'cron': 'cron', 'timezone': 'timezone', 'name': 'name'}), '(id=id, cron=cron, timezone=timezone,\n name=name)\n', (3851, 3903), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((4226, 4275), 'genie.services.NotebookJobServices.getSingleSchedule', 'NotebookJobServices.getSingleSchedule', (['scheduleId'], {}), '(scheduleId)\n', (4263, 4275), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((4361, 4407), 'genie.services.NotebookJobServices.deleteSchedule', 'NotebookJobServices.deleteSchedule', (['scheduleId'], {}), '(scheduleId)\n', (4395, 4407), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((4571, 4605), 'genie.services.NotebookJobServices.getTimezones', 'NotebookJobServices.getTimezones', ([], {}), '()\n', (4603, 4605), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((4895, 4923), 'genie.services.Connections.getConnections', 'Connections.getConnections', ([], {}), '()\n', (4921, 4923), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((5381, 5421), 'genie.services.Connections.getConnection', 'Connections.getConnection', (['connection_id'], {}), '(connection_id)\n', (5406, 5421), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((5948, 5980), 'genie.services.Connections.getConnectionTypes', 'Connections.getConnectionTypes', ([], {}), '()\n', (5978, 5980), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((6505, 6551), 'genie.services.NotebookTemplateService.getNotebookTemplates', 'NotebookTemplateService.getNotebookTemplates', ([], {}), '()\n', (6549, 6551), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((2674, 2762), 'genie.services.NotebookJobServices.updateNotebookJob', 'NotebookJobServices.updateNotebookJob', ([], {'notebookId': 'notebookId', 'scheduleId': 'scheduleId'}), '(notebookId=notebookId, scheduleId=\n scheduleId)\n', (2711, 2762), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((5009, 5048), 'genie.services.Connections.addConnection', 'Connections.addConnection', (['request.data'], {}), '(request.data)\n', (5034, 5048), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((5509, 5552), 'genie.services.Connections.removeConnection', 'Connections.removeConnection', (['connection_id'], {}), '(connection_id)\n', (5537, 5552), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((2862, 2939), 'genie.services.NotebookJobServices.toggleNotebookJob', 'NotebookJobServices.toggleNotebookJob', ([], {'notebookId': 'notebookId', 'enabled': 'enabled'}), '(notebookId=notebookId, enabled=enabled)\n', (2899, 2939), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n'), ((5637, 5694), 'genie.services.Connections.updateConnection', 'Connections.updateConnection', (['connection_id', 'request.data'], {}), '(connection_id, request.data)\n', (5665, 5694), False, 'from genie.services import NotebookJobServices, Connections, NotebookTemplateService\n')]
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>> # # SPDX-License-Identifier: MIT import cdstoolbox as ct def get_daily_mean_for_year_and_month(year, month): print('get temperature', year, month) temperature = ct.catalogue.retrieve( 'reanalysis-era5-single-levels', { 'product_type': 'reanalysis', 'variable': '2m_temperature', 'year': str(year), 'month': str(month), 'day': [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', ], 'time': [ '00:00', '01:00', '02:00', '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00', '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00', ], } ) print('got') daily_mean = ct.climate.daily_mean(temperature) print('daily_mean', year, month) return daily_mean def get_gdd_for_year(year): by_month = [ get_daily_mean_for_year_and_month(year, month) for month in range(1, 12+1) ] daily_mean = ct.cube.concat(by_month, dim='time') print('daily_mean') gdd_min_temperature_celsius = 10 gdd = ct.cdstools.heuristics.growing_degree_days( daily_mean, gdd_min_temperature_celsius ) print('gdd') return gdd @ct.application() @ct.output.download() def gdd_app(): by_year = [get_gdd_for_year(year) for year in range(2003, 2020+1)] combined = ct.cube.concat(by_year, dim='time') print('combined', ct.cdm.get_coordinates(combined)) return combined
[ "cdstoolbox.output.download", "cdstoolbox.cdm.get_coordinates", "cdstoolbox.application", "cdstoolbox.cdstools.heuristics.growing_degree_days", "cdstoolbox.cube.concat", "cdstoolbox.climate.daily_mean" ]
[((1776, 1792), 'cdstoolbox.application', 'ct.application', ([], {}), '()\n', (1790, 1792), True, 'import cdstoolbox as ct\n'), ((1794, 1814), 'cdstoolbox.output.download', 'ct.output.download', ([], {}), '()\n', (1812, 1814), True, 'import cdstoolbox as ct\n'), ((1270, 1304), 'cdstoolbox.climate.daily_mean', 'ct.climate.daily_mean', (['temperature'], {}), '(temperature)\n', (1291, 1304), True, 'import cdstoolbox as ct\n'), ((1528, 1564), 'cdstoolbox.cube.concat', 'ct.cube.concat', (['by_month'], {'dim': '"""time"""'}), "(by_month, dim='time')\n", (1542, 1564), True, 'import cdstoolbox as ct\n'), ((1636, 1723), 'cdstoolbox.cdstools.heuristics.growing_degree_days', 'ct.cdstools.heuristics.growing_degree_days', (['daily_mean', 'gdd_min_temperature_celsius'], {}), '(daily_mean,\n gdd_min_temperature_celsius)\n', (1678, 1723), True, 'import cdstoolbox as ct\n'), ((1916, 1951), 'cdstoolbox.cube.concat', 'ct.cube.concat', (['by_year'], {'dim': '"""time"""'}), "(by_year, dim='time')\n", (1930, 1951), True, 'import cdstoolbox as ct\n'), ((1974, 2006), 'cdstoolbox.cdm.get_coordinates', 'ct.cdm.get_coordinates', (['combined'], {}), '(combined)\n', (1996, 2006), True, 'import cdstoolbox as ct\n')]
# coding: utf-8 import os import sys import re import pycparser.c_generator def parse_constant(node): if isinstance(node, pycparser.c_ast.Constant): return node.value elif isinstance(node, pycparser.c_ast.UnaryOp) and node.op == '-': return '-' + parse_constant(node.expr) else: raise TypeError(node) class PrintEnumsVisitor(pycparser.c_ast.NodeVisitor): def visit_Enum(self, node): value = 0 for enumerator in node.values.enumerators: if enumerator.value is not None: value_string = parse_constant(enumerator.value) value = int(value_string, 0) else: value_string = str(value) assert enumerator.name.startswith('CAIRO_') # len('CAIRO_') == 6 print('%s = %s' % (enumerator.name[6:], value_string)) value += 1 print('') def read_cairo_header(cairo_git_dir, suffix): filename = os.path.join(cairo_git_dir, 'src', 'cairo%s.h' % suffix) source = open(filename).read() source = re.sub( '/\*.*?\*/' '|CAIRO_(BEGIN|END)_DECLS' '|cairo_public ' r'|^\s*#.*?[^\\]\n', '', source, flags=re.DOTALL | re.MULTILINE) source = re.sub('\n{3,}', '\n\n', source) return source def generate(cairo_git_dir): # Remove comments, preprocessor instructions and macros. source = read_cairo_header(cairo_git_dir, '') source += read_cairo_header(cairo_git_dir, '-pdf') source += read_cairo_header(cairo_git_dir, '-ps') source += read_cairo_header(cairo_git_dir, '-svg') source += ''' typedef void* HDC; typedef void* HFONT; typedef void LOGFONTW; ''' source += read_cairo_header(cairo_git_dir, '-win32') source += ''' typedef void* CGContextRef; typedef void* CGFontRef; typedef void* ATSUFontID; ''' source += read_cairo_header(cairo_git_dir, '-quartz') ast = pycparser.CParser().parse(source) print('# *** Do not edit this file ***') print('# Generated by utils/mkconstants.py\n') PrintEnumsVisitor().visit(ast) print('_CAIRO_HEADERS = r"""%s"""' % source) source = read_cairo_header(cairo_git_dir, '-xcb') print('_CAIRO_XCB_HEADERS = r"""%s"""\n' % source) if __name__ == '__main__': if len(sys.argv) >= 2: generate(sys.argv[1]) else: print('Usage: %s path/to/cairo_source.git' % sys.argv[0])
[ "os.path.join", "re.sub" ]
[((991, 1047), 'os.path.join', 'os.path.join', (['cairo_git_dir', '"""src"""', "('cairo%s.h' % suffix)"], {}), "(cairo_git_dir, 'src', 'cairo%s.h' % suffix)\n", (1003, 1047), False, 'import os\n'), ((1098, 1225), 're.sub', 're.sub', (['"""/\\\\*.*?\\\\*/|CAIRO_(BEGIN|END)_DECLS|cairo_public |^\\\\s*#.*?[^\\\\\\\\]\\\\n"""', '""""""', 'source'], {'flags': '(re.DOTALL | re.MULTILINE)'}), "('/\\\\*.*?\\\\*/|CAIRO_(BEGIN|END)_DECLS|cairo_public |^\\\\s*#.*?[^\\\\\\\\]\\\\n',\n '', source, flags=re.DOTALL | re.MULTILINE)\n", (1104, 1225), False, 'import re\n'), ((1304, 1336), 're.sub', 're.sub', (['"""\n{3,}"""', '"""\n\n"""', 'source'], {}), "('\\n{3,}', '\\n\\n', source)\n", (1310, 1336), False, 'import re\n')]
import pytest from bottery.message import Message from bottery.telegram import reply from bottery.telegram.engine import TelegramChat, TelegramEngine, TelegramUser @pytest.fixture def engine(): return TelegramEngine @pytest.fixture def user(): return TelegramUser @pytest.fixture def chat(): return TelegramChat @pytest.fixture() def message(): return Message( id=1, platform='telegram', text='', user=user, chat=chat, timestamp='', raw='', ) @pytest.fixture def message_data(): return { 'message': { 'chat': { 'first_name': 'John', 'id': 12345678, 'last_name': 'Snow', 'type': 'private', 'username': 'johnsnow' }, 'date': 1516787847, 'from': { 'first_name': 'John', 'id': 12345678, 'is_bot': False, 'language_code': 'en-US', 'last_name': 'Snow', 'username': 'johnsnow' }, 'message_id': 2, 'text': 'Hi bot, how are you?' }, 'update_id': 987456321 } @pytest.fixture def edited_message_data(message_data): return {'edited_message': message_data['message']} @pytest.mark.parametrize('chat_type,id_expected', [ ('group', 456), ('private', 123), ]) def test_platform_telegram_engine_get_chat_id(chat_type, id_expected, engine, message): setattr(message.chat, 'id', id_expected) setattr(message.chat, 'type', chat_type) setattr(message.user, 'id', id_expected) assert engine.get_chat_id(engine, message) == id_expected @pytest.mark.parametrize('message_input,message_key,message_edited', [ (pytest.lazy_fixture('message_data'), 'message', False), (pytest.lazy_fixture('edited_message_data'), 'edited_message', True) ]) def test_build_message(engine, message_input, message_key, message_edited): message = engine.build_message(engine, message_input) assert message.id == message_input[message_key]['message_id'] assert message.text == message_input[message_key]['text'] assert message.timestamp == message_input[message_key]['date'] assert message.raw == message_input assert message.edited == message_edited def test_build_message_without_text(message_data, engine): ''' Telegram can send a message without text. For example, when a bot is added to a group. ''' message_data_without_text = message_data del message_data_without_text['message']['text'] message = engine.build_message(engine, message_data_without_text) assert message.id == message_data_without_text['message']['message_id'] assert message.text is not None assert message.text == '' assert message.timestamp == message_data_without_text['message']['date'] assert message.raw == message_data def test_reply_decorator(message): @reply() def view(message): return '' view(message) assert message._request_payload['reply_to_message_id'] == message.id def test_reply_decorator_to_previous_message(message): @reply(to=lambda message: message.id - 2) def view(message): return '' view(message) assert message._request_payload['reply_to_message_id'] == message.id - 2
[ "bottery.message.Message", "bottery.telegram.reply", "pytest.lazy_fixture", "pytest.fixture", "pytest.mark.parametrize" ]
[((334, 350), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (348, 350), False, 'import pytest\n'), ((1343, 1432), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chat_type,id_expected"""', "[('group', 456), ('private', 123)]"], {}), "('chat_type,id_expected', [('group', 456), (\n 'private', 123)])\n", (1366, 1432), False, 'import pytest\n'), ((377, 469), 'bottery.message.Message', 'Message', ([], {'id': '(1)', 'platform': '"""telegram"""', 'text': '""""""', 'user': 'user', 'chat': 'chat', 'timestamp': '""""""', 'raw': '""""""'}), "(id=1, platform='telegram', text='', user=user, chat=chat, timestamp\n ='', raw='')\n", (384, 469), False, 'from bottery.message import Message\n'), ((3036, 3043), 'bottery.telegram.reply', 'reply', ([], {}), '()\n', (3041, 3043), False, 'from bottery.telegram import reply\n'), ((3239, 3279), 'bottery.telegram.reply', 'reply', ([], {'to': '(lambda message: message.id - 2)'}), '(to=lambda message: message.id - 2)\n', (3244, 3279), False, 'from bottery.telegram import reply\n'), ((1848, 1883), 'pytest.lazy_fixture', 'pytest.lazy_fixture', (['"""message_data"""'], {}), "('message_data')\n", (1867, 1883), False, 'import pytest\n'), ((1909, 1951), 'pytest.lazy_fixture', 'pytest.lazy_fixture', (['"""edited_message_data"""'], {}), "('edited_message_data')\n", (1928, 1951), False, 'import pytest\n')]
""" This is a temporary module, used during (and for a while after) the transition to Python 3. This code is planned to be kept in place until the least version of Python supported no longer requires it (and of course until all callers no longer need it). This code should run as-is in 2.x and also run unedited after 2to3 in 3.x. $Id$ """ from __future__ import division # confidence high import os, sys PY3K = sys.version_info[0] > 2 def ndarr2str(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a string. This will prevent lots of if-checks in calling code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function still returns type 'bytes', not 'str' as it advertises. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if isinstance(retval, bytes)", but support 2.5. # could rm the if PY3K check, but it makes this faster on 2.x. if PY3K and not isinstance(retval, str): return retval.decode(encoding) else: # is str return retval def ndarr2bytes(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a *bytes* array in PY3K. See notes in ndarr2str above. Even though we consider it a bug that numpy's tostring() function returns a bytes array in PY3K, there are actually many instances where that is what we want - bytes, not unicode. So we use this function in those instances to ensure that when/if this numpy "bug" is "fixed", that our calling code still gets bytes where it needs/expects them. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if not isinstance(retval, bytes)", but support 2.5. if PY3K and isinstance(retval, str): # Take note if this ever gets used. If this ever occurs, it # is likely wildly inefficient since numpy.tostring() is now # returning unicode and numpy surely has a tobytes() func by now. # If so, add a code path to call its tobytes() func at our start. return retval.encode(encoding) else: # is str==bytes in 2.x return retval def tobytes(s, encoding='ascii'): """ Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. """ # NOTE: after we abandon 2.5, we might simply instead use "bytes(s)" # NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b' if PY3K: if isinstance(s, bytes): return s else: return s.encode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s def tostr(s, encoding='ascii'): """ Convert string-like-thing s to the 'str' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, str and bytes are the same type. In Python 3+, this may require a decoding step. """ if PY3K: if isinstance(s, str): # str == unicode in PY3K return s else: # s is type bytes return s.decode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s try: BNULLSTR = tobytes('') # after dropping 2.5, change to: b'' BNEWLINE = tobytes('\n') # after dropping 2.5, change to: b'\n' except: BNULLSTR = '' BNEWLINE = '\n' def bytes_read(fd, sz): """ Perform an os.read in a way that can handle both Python2 and Python3 IO. Assume we are always piping only ASCII characters (since that is all we have ever done with IRAF). Either way, return the data as bytes. """ # return tobytes(os.read(fd, sz)) return os.read(fd, sz) # already returns str in Py2.x and bytes in PY3K def bytes_write(fd, bufstr): """ Perform an os.write in a way that can handle both Python2 and Python3 IO. Assume we are always piping only ASCII characters (since that is all we have ever done with IRAF). Either way, write the binary data to fd. """ return os.write(fd, tobytes(bufstr))
[ "os.read" ]
[((4349, 4364), 'os.read', 'os.read', (['fd', 'sz'], {}), '(fd, sz)\n', (4356, 4364), False, 'import os, sys\n')]
#!/usr/bin/env python """ Copyright (c) 2019 CIIRC, CTU in Prague All rights reserved. This source code is licensed under the BSD-3-Clause license found in the LICENSE file in the root directory of this source tree. @author: <NAME> """ import logging from typing import Any, List from nltk import ParentedTree from nlp_crow.modules.CrowModule import CrowModule import nltk from nlp_crow.structures.tagging.MorphCategory import POS from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken from nlp_crow.structures.tagging.Tag import Tag #from scripts.test_grammar import get_parse_tree class NLTK: def tag(self, text : str) -> TaggedText: """ Tags a text. Parameters ---------- text an input text as string Returns ------- a tagged text object """ tagged_text = TaggedText() tokens = nltk.word_tokenize(text) for pair in nltk.pos_tag(tokens): tag = Tag() tag.pos = POS(value=pair[1]) tagged_text.add_tagged_token(token=pair[0], tag=tag) return tagged_text class GrammarParser(CrowModule): """ Tags and parses the text """ def __init__(self): self.logger = logging.getLogger(__name__) self.nltk_tagger = NLTK() def parse(self, sentence : str) -> ParsedText: """ Currently used for dummy text parsing. After the text is tagged, it is split on "and" and "." tokens into sentences. Each sentence has its tokens hanged under an "S" node. TODO: swap with the parse() method which relies on a grammar Parameters ---------- sentence an input sentence as a string Returns ------- parsed text """ # use NLTK for tagging tagged_text = self.nltk_tagger.tag(sentence) # create a new object for parsed text parsed_text = ParsedText() # save the original text parsed_text.orig_text = sentence # create the root of the tree root = ParseTreeNode(label="T") parsed_text.parse_tree = root # create a parent node for the first sentence sentence_node = ParseTreeNode(label="S") # sequentially process the tagged tokens for tagged_token in tagged_text.get_tokens_with_tags(): if tagged_token.token in ["and", "."]: # in case there is a previous sentence if sentence_node.subnodes: # append the previous sentence node under the root node root.subnodes.append(sentence_node) # and start a new sentence sentence_node = ParseTreeNode(label="S") # append the separating token under the root node root.subnodes.append(tagged_token) else: # append the token to the current sentence sentence_node.subnodes.append(tagged_token) if sentence_node.subnodes: # finalize the last sentence root.subnodes.append(sentence_node) self.logger.debug(f"Parsed text: {parsed_text}") return parsed_text # TODO this method should be used in the future, relies on a grammar # def parse(self, sentence : str) -> ParsedText: # tree = get_parse_tree(sentence) # # tokens = nltk.word_tokenize(sentence) # # root, _ = self.transform_recursive(tree, tokens) # # parsed_text = ParsedText() # parsed_text.orig_text = sentence # parsed_text.parse_tree = root # # self.logger.debug(f"Parsed text: {parsed_text}") # # return parsed_text def transform_recursive(self, node : Any, tokens : List): """ Recursively transforms the tree from the format of the grammar parser to the format used in the NL processing. Parameters ---------- node a node to be processed - can be either a ParentedTree object or a string (for the first call this should be the tree root) tokens a list of tokens (not provided in the tree from the grammar parser) Returns ------- the recursively transformed node, the list of remaining tokens """ if type(node) == ParentedTree: return self.transform_node(node, tokens) elif type(node) == str: return self.transform_tag(node, tokens[0]), tokens[1:] def transform_node(self, node, tokens): """ Transforms a node by recursively calling transform_recursive() on its subnodes. """ label = node._label parse_tree_node = ParseTreeNode(label=label) for subnode in node: parse_tree_subnode, tokens = self.transform_recursive(subnode, tokens) parse_tree_node.subnodes.append(parse_tree_subnode) return parse_tree_node, tokens def transform_tag(self, node, token): """ Transforms a single token and its tag (in the string form) into a tagged token. """ tagged_token = TaggedToken() tagged_token.token = token tagged_token.tag = Tag(pos=POS(node)) return tagged_token
[ "nlp_crow.structures.tagging.Tag.Tag", "nlp_crow.structures.tagging.ParsedText.ParsedText", "nlp_crow.structures.tagging.ParsedText.TaggedText", "nlp_crow.structures.tagging.ParsedText.ParseTreeNode", "nlp_crow.structures.tagging.MorphCategory.POS", "nlp_crow.structures.tagging.ParsedText.TaggedToken", "nltk.pos_tag", "nltk.word_tokenize", "logging.getLogger" ]
[((903, 915), 'nlp_crow.structures.tagging.ParsedText.TaggedText', 'TaggedText', ([], {}), '()\n', (913, 915), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((933, 957), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (951, 957), False, 'import nltk\n'), ((979, 999), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (991, 999), False, 'import nltk\n'), ((1285, 1312), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1302, 1312), False, 'import logging\n'), ((1977, 1989), 'nlp_crow.structures.tagging.ParsedText.ParsedText', 'ParsedText', ([], {}), '()\n', (1987, 1989), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((2119, 2143), 'nlp_crow.structures.tagging.ParsedText.ParseTreeNode', 'ParseTreeNode', ([], {'label': '"""T"""'}), "(label='T')\n", (2132, 2143), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((2261, 2285), 'nlp_crow.structures.tagging.ParsedText.ParseTreeNode', 'ParseTreeNode', ([], {'label': '"""S"""'}), "(label='S')\n", (2274, 2285), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((4762, 4788), 'nlp_crow.structures.tagging.ParsedText.ParseTreeNode', 'ParseTreeNode', ([], {'label': 'label'}), '(label=label)\n', (4775, 4788), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((5185, 5198), 'nlp_crow.structures.tagging.ParsedText.TaggedToken', 'TaggedToken', ([], {}), '()\n', (5196, 5198), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n'), ((1019, 1024), 'nlp_crow.structures.tagging.Tag.Tag', 'Tag', ([], {}), '()\n', (1022, 1024), False, 'from nlp_crow.structures.tagging.Tag import Tag\n'), ((1047, 1065), 'nlp_crow.structures.tagging.MorphCategory.POS', 'POS', ([], {'value': 'pair[1]'}), '(value=pair[1])\n', (1050, 1065), False, 'from nlp_crow.structures.tagging.MorphCategory import POS\n'), ((5270, 5279), 'nlp_crow.structures.tagging.MorphCategory.POS', 'POS', (['node'], {}), '(node)\n', (5273, 5279), False, 'from nlp_crow.structures.tagging.MorphCategory import POS\n'), ((2764, 2788), 'nlp_crow.structures.tagging.ParsedText.ParseTreeNode', 'ParseTreeNode', ([], {'label': '"""S"""'}), "(label='S')\n", (2777, 2788), False, 'from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken\n')]
import urllib.request as ur import urllib """ 2017 - 4 - 10 neko34 从网络中获取对应的数据,调用对应的API """ def openUrl(urlString): html = ur.urlopen(urlString).read() return html
[ "urllib.request.urlopen" ]
[((138, 159), 'urllib.request.urlopen', 'ur.urlopen', (['urlString'], {}), '(urlString)\n', (148, 159), True, 'import urllib.request as ur\n')]
# Author: Fayas (https://github.com/FayasNoushad) (@FayasNoushad) from .admin import * from pyrogram import Client, filters from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton START_TEXT = """Hello {} 😌 I am a link shortner telegram bot. >> `I can short any type of link` Made by @FayasNoushad""" HELP_TEXT = """**Hey, Follow these steps:** ➠ Just send a link for shorting. ➠ I will send the shorted links. **Available Commands** /start - Checking Bot Online /help - For more help /about - For more about me /status - For bot status /settings - For bot settings /reset - For reset bot settings Made by @FayasNoushad""" ABOUT_TEXT = """--**About Me 😎**-- 🤖 **Name :** [Link shortner](https://telegram.me/{}) 👨‍💻 **Developer :** [Fayas](https://github.com/FayasNoushad) 📢 **Channel :** [Fayas Noushad](https://telegram.me/FayasNoushad) 👥 **Group :** [Developer Team](https://telegram.me/TheDeveloperTeam) 🌐 **Source :** [👉 Click here](https://github.com/FayasNoushad/URL-Shortner) 📝 **Language :** [Python3](https://python.org) 🧰 **Framework :** [Pyrogram](https://pyrogram.org) 📡 **Server :** [Heroku](https://heroku.com)""" SETTINGS_TEXT = "**Settings**" RESET_TEXT = "**Are you sure for reset.**" START_BUTTONS = InlineKeyboardMarkup( [[ InlineKeyboardButton('⚙ Help', callback_data='help'), InlineKeyboardButton('About 🔰', callback_data='about'), InlineKeyboardButton('Close ⛔️', callback_data='close') ]] ) HELP_BUTTONS = InlineKeyboardMarkup( [[ InlineKeyboardButton('🏘 Home', callback_data='home'), InlineKeyboardButton('About 🔰', callback_data='about') ],[ InlineKeyboardButton('⚒ Settings', callback_data='settings'), InlineKeyboardButton('Close ⛔️', callback_data='close') ]] ) ABOUT_BUTTONS = InlineKeyboardMarkup( [[ InlineKeyboardButton('🏘 Home', callback_data='home'), InlineKeyboardButton('Help ⚙', callback_data='help') ],[ InlineKeyboardButton('Close ⛔️', callback_data='close') ]] ) SETTINGS_BUTTONS = [ [ InlineKeyboardButton('🏘 Home', callback_data='home'), InlineKeyboardButton('Help ⚙', callback_data='help') ], [ InlineKeyboardButton('🔄 Reset', callback_data='reset'), InlineKeyboardButton('Close ⛔️', callback_data='close') ] ] RESET_BUTTONS = InlineKeyboardMarkup( [[ InlineKeyboardButton(text="Yes ✅", callback_data="confirm_reset"), InlineKeyboardButton(text="No ❌", callback_data="cancel_reset") ]] ) @Client.on_message(filters.private & filters.command(["start"])) async def start(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) await update.reply_text( text=START_TEXT.format(update.from_user.mention), reply_markup=START_BUTTONS, disable_web_page_preview=True, quote=True ) @Client.on_message(filters.private & filters.command(["help"])) async def help(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) await update.reply_text( text=HELP_TEXT, disable_web_page_preview=True, reply_markup=HELP_BUTTONS, quote=True ) @Client.on_message(filters.private & filters.command(["about"])) async def about(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) await update.reply_text( text=ABOUT_TEXT.format((await bot.get_me()).username), disable_web_page_preview=True, reply_markup=ABOUT_BUTTONS, quote=True ) @Client.on_message(filters.private & filters.command(["reset"])) async def reset(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) await update.reply_text( text=RESET_TEXT, disable_web_page_preview=True, reply_markup=RESET_BUTTONS, quote=True ) @Client.on_message(filters.private & filters.command(["status"])) async def status(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) total_users = await db.total_users_count() text = "**Bot Status**\n" text += f"\n**Total Users:** `{total_users}`" await update.reply_text( text=text, quote=True, disable_web_page_preview=True ) @Client.on_message(filters.private & filters.command(["settings"])) async def settings(bot, update): if not await db.is_user_exist(update.from_user.id): await db.add_user(update.from_user.id) await display_settings(bot, update, db) async def display_settings(bot, update, db, cb=False): chat_id = update.from_user.id text = SETTINGS_TEXT buttons = [] if await db.allow_domain(chat_id, domain="gplinks.in"): buttons.append([InlineKeyboardButton(text="Gplinks.in ✅", callback_data="set+gplinks.in")]) else: buttons.append([InlineKeyboardButton(text="Gplinks.in ❌", callback_data="set+gplinks.in")]) if await db.allow_domain(chat_id, domain="bit.ly"): buttons.append([InlineKeyboardButton(text="Bit.ly ✅", callback_data="set+bit.ly")]) else: buttons.append([InlineKeyboardButton(text="Bit.ly ❌", callback_data="set+bit.ly")]) if await db.allow_domain(chat_id, domain="chilp.it"): buttons.append([InlineKeyboardButton(text="Chilp.it ✅", callback_data="set+chilp.it")]) else: buttons.append([InlineKeyboardButton(text="Chilp.it ❌", callback_data="set+chilp.it")]) if await db.allow_domain(chat_id, domain="click.ru"): buttons.append([InlineKeyboardButton(text="Click.ru ✅", callback_data="set+click.ru")]) else: buttons.append([InlineKeyboardButton(text="Click.ru ❌", callback_data="set+click.ru")]) if await db.allow_domain(chat_id, domain="cutt.ly"): buttons.append([InlineKeyboardButton(text="Cutt.ly ✅", callback_data="set+cutt.ly")]) else: buttons.append([InlineKeyboardButton(text="Cutt.ly ❌", callback_data="set+cutt.ly")]) if await db.allow_domain(chat_id, domain="da.gd"): buttons.append([InlineKeyboardButton(text="Da.gd ✅", callback_data="set+da.gd")]) else: buttons.append([InlineKeyboardButton(text="Da.gd ❌", callback_data="set+da.gd")]) if await db.allow_domain(chat_id, domain="git.io"): buttons.append([InlineKeyboardButton(text="Git.io ✅", callback_data="set+git.io")]) else: buttons.append([InlineKeyboardButton(text="Git.io ❌", callback_data="set+git.io")]) if await db.allow_domain(chat_id, domain="is.gd"): buttons.append([InlineKeyboardButton(text="Is.gd ✅", callback_data="set+is.gd")]) else: buttons.append([InlineKeyboardButton(text="Is.gd ❌", callback_data="set+is.gd")]) if await db.allow_domain(chat_id, domain="osdb.link"): buttons.append([InlineKeyboardButton(text="Osdb.link ✅", callback_data="set+osdb.link")]) else: buttons.append([InlineKeyboardButton(text="Osdb.link ❌", callback_data="set+osdb.link")]) if await db.allow_domain(chat_id, domain="ow.ly"): buttons.append([InlineKeyboardButton(text="Ow.ly ✅", callback_data="set+ow.ly")]) else: buttons.append([InlineKeyboardButton(text="Ow.ly ❌", callback_data="set+ow.ly")]) if await db.allow_domain(chat_id, domain="po.st"): buttons.append([InlineKeyboardButton(text="Po.st ✅", callback_data="set+po.st")]) else: buttons.append([InlineKeyboardButton(text="Po.st ❌", callback_data="set+po.st")]) if await db.allow_domain(chat_id, domain="qps.ru"): buttons.append([InlineKeyboardButton(text="Qps.ru ✅", callback_data="set+qps.ru")]) else: buttons.append([InlineKeyboardButton(text="Qps.ru ❌", callback_data="set+qps.ru")]) if await db.allow_domain(chat_id, domain="short.cm"): buttons.append([InlineKeyboardButton(text="Short.cm ✅", callback_data="set+short.cm")]) else: buttons.append([InlineKeyboardButton(text="Short.cm ❌", callback_data="set+short.cm")]) if await db.allow_domain(chat_id, domain="tinyurl.com"): buttons.append([InlineKeyboardButton(text="Tinyurl.com ✅", callback_data="set+tinyurl.com")]) else: buttons.append([InlineKeyboardButton(text="Tinyurl.com ❌", callback_data="set+tinyurl.com")]) if await db.allow_domain(chat_id, domain="0x0.st"): buttons.append([InlineKeyboardButton(text="0x0.st ✅", callback_data="set+0x0.st")]) else: buttons.append([InlineKeyboardButton(text="0x0.st ❌", callback_data="set+0x0.st")]) if await db.allow_domain(chat_id, domain="ttm.sh"): buttons.append([InlineKeyboardButton(text="ttm.sh ✅", callback_data="set+ttm.sh")]) else: buttons.append([InlineKeyboardButton(text="ttm.sh ❌", callback_data="set+ttm.sh")]) keyboard = [] for line in buttons: for button in line: if len(keyboard) == 0 or len(keyboard[-1]) >= 2: keyboard.append([button]) else: keyboard[-1].append(button) for setting_button in SETTINGS_BUTTONS: keyboard.append(setting_button) if cb: await update.message.edit_text( text=text, reply_markup=InlineKeyboardMarkup(keyboard), disable_web_page_preview=True ) else: await update.reply_text( text=text, reply_markup=InlineKeyboardMarkup(keyboard), disable_web_page_preview=True, quote=True )
[ "pyrogram.types.InlineKeyboardButton", "pyrogram.types.InlineKeyboardMarkup", "pyrogram.filters.command" ]
[((2130, 2182), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""🏘 Home"""'], {'callback_data': '"""home"""'}), "('🏘 Home', callback_data='home')\n", (2150, 2182), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2192, 2244), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Help ⚙"""'], {'callback_data': '"""help"""'}), "('Help ⚙', callback_data='help')\n", (2212, 2244), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2266, 2320), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""🔄 Reset"""'], {'callback_data': '"""reset"""'}), "('🔄 Reset', callback_data='reset')\n", (2286, 2320), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2330, 2385), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Close ⛔️"""'], {'callback_data': '"""close"""'}), "('Close ⛔️', callback_data='close')\n", (2350, 2385), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2645, 2671), 'pyrogram.filters.command', 'filters.command', (["['start']"], {}), "(['start'])\n", (2660, 2671), False, 'from pyrogram import Client, filters\n'), ((3031, 3056), 'pyrogram.filters.command', 'filters.command', (["['help']"], {}), "(['help'])\n", (3046, 3056), False, 'from pyrogram import Client, filters\n'), ((3380, 3406), 'pyrogram.filters.command', 'filters.command', (["['about']"], {}), "(['about'])\n", (3395, 3406), False, 'from pyrogram import Client, filters\n'), ((3771, 3797), 'pyrogram.filters.command', 'filters.command', (["['reset']"], {}), "(['reset'])\n", (3786, 3797), False, 'from pyrogram import Client, filters\n'), ((4124, 4151), 'pyrogram.filters.command', 'filters.command', (["['status']"], {}), "(['status'])\n", (4139, 4151), False, 'from pyrogram import Client, filters\n'), ((4564, 4593), 'pyrogram.filters.command', 'filters.command', (["['settings']"], {}), "(['settings'])\n", (4579, 4593), False, 'from pyrogram import Client, filters\n'), ((1295, 1347), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""⚙ Help"""'], {'callback_data': '"""help"""'}), "('⚙ Help', callback_data='help')\n", (1315, 1347), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1357, 1411), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""About 🔰"""'], {'callback_data': '"""about"""'}), "('About 🔰', callback_data='about')\n", (1377, 1411), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1421, 1476), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Close ⛔️"""'], {'callback_data': '"""close"""'}), "('Close ⛔️', callback_data='close')\n", (1441, 1476), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1550, 1602), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""🏘 Home"""'], {'callback_data': '"""home"""'}), "('🏘 Home', callback_data='home')\n", (1570, 1602), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1612, 1666), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""About 🔰"""'], {'callback_data': '"""about"""'}), "('About 🔰', callback_data='about')\n", (1632, 1666), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1687, 1747), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""⚒ Settings"""'], {'callback_data': '"""settings"""'}), "('⚒ Settings', callback_data='settings')\n", (1707, 1747), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1757, 1812), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Close ⛔️"""'], {'callback_data': '"""close"""'}), "('Close ⛔️', callback_data='close')\n", (1777, 1812), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1887, 1939), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""🏘 Home"""'], {'callback_data': '"""home"""'}), "('🏘 Home', callback_data='home')\n", (1907, 1939), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1949, 2001), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Help ⚙"""'], {'callback_data': '"""help"""'}), "('Help ⚙', callback_data='help')\n", (1969, 2001), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2022, 2077), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Close ⛔️"""'], {'callback_data': '"""close"""'}), "('Close ⛔️', callback_data='close')\n", (2042, 2077), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2451, 2516), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Yes ✅"""', 'callback_data': '"""confirm_reset"""'}), "(text='Yes ✅', callback_data='confirm_reset')\n", (2471, 2516), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((2526, 2589), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""No ❌"""', 'callback_data': '"""cancel_reset"""'}), "(text='No ❌', callback_data='cancel_reset')\n", (2546, 2589), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((4991, 5064), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Gplinks.in ✅"""', 'callback_data': '"""set+gplinks.in"""'}), "(text='Gplinks.in ✅', callback_data='set+gplinks.in')\n", (5011, 5064), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5101, 5174), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Gplinks.in ❌"""', 'callback_data': '"""set+gplinks.in"""'}), "(text='Gplinks.in ❌', callback_data='set+gplinks.in')\n", (5121, 5174), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5257, 5322), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Bit.ly ✅"""', 'callback_data': '"""set+bit.ly"""'}), "(text='Bit.ly ✅', callback_data='set+bit.ly')\n", (5277, 5322), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5359, 5424), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Bit.ly ❌"""', 'callback_data': '"""set+bit.ly"""'}), "(text='Bit.ly ❌', callback_data='set+bit.ly')\n", (5379, 5424), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5509, 5578), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Chilp.it ✅"""', 'callback_data': '"""set+chilp.it"""'}), "(text='Chilp.it ✅', callback_data='set+chilp.it')\n", (5529, 5578), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5615, 5684), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Chilp.it ❌"""', 'callback_data': '"""set+chilp.it"""'}), "(text='Chilp.it ❌', callback_data='set+chilp.it')\n", (5635, 5684), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5769, 5838), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Click.ru ✅"""', 'callback_data': '"""set+click.ru"""'}), "(text='Click.ru ✅', callback_data='set+click.ru')\n", (5789, 5838), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((5875, 5944), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Click.ru ❌"""', 'callback_data': '"""set+click.ru"""'}), "(text='Click.ru ❌', callback_data='set+click.ru')\n", (5895, 5944), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6028, 6095), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Cutt.ly ✅"""', 'callback_data': '"""set+cutt.ly"""'}), "(text='Cutt.ly ✅', callback_data='set+cutt.ly')\n", (6048, 6095), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6132, 6199), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Cutt.ly ❌"""', 'callback_data': '"""set+cutt.ly"""'}), "(text='Cutt.ly ❌', callback_data='set+cutt.ly')\n", (6152, 6199), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6281, 6344), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Da.gd ✅"""', 'callback_data': '"""set+da.gd"""'}), "(text='Da.gd ✅', callback_data='set+da.gd')\n", (6301, 6344), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6381, 6444), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Da.gd ❌"""', 'callback_data': '"""set+da.gd"""'}), "(text='Da.gd ❌', callback_data='set+da.gd')\n", (6401, 6444), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6527, 6592), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Git.io ✅"""', 'callback_data': '"""set+git.io"""'}), "(text='Git.io ✅', callback_data='set+git.io')\n", (6547, 6592), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6629, 6694), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Git.io ❌"""', 'callback_data': '"""set+git.io"""'}), "(text='Git.io ❌', callback_data='set+git.io')\n", (6649, 6694), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6776, 6839), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Is.gd ✅"""', 'callback_data': '"""set+is.gd"""'}), "(text='Is.gd ✅', callback_data='set+is.gd')\n", (6796, 6839), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((6876, 6939), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Is.gd ❌"""', 'callback_data': '"""set+is.gd"""'}), "(text='Is.gd ❌', callback_data='set+is.gd')\n", (6896, 6939), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7025, 7096), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Osdb.link ✅"""', 'callback_data': '"""set+osdb.link"""'}), "(text='Osdb.link ✅', callback_data='set+osdb.link')\n", (7045, 7096), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7133, 7204), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Osdb.link ❌"""', 'callback_data': '"""set+osdb.link"""'}), "(text='Osdb.link ❌', callback_data='set+osdb.link')\n", (7153, 7204), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7286, 7349), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Ow.ly ✅"""', 'callback_data': '"""set+ow.ly"""'}), "(text='Ow.ly ✅', callback_data='set+ow.ly')\n", (7306, 7349), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7386, 7449), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Ow.ly ❌"""', 'callback_data': '"""set+ow.ly"""'}), "(text='Ow.ly ❌', callback_data='set+ow.ly')\n", (7406, 7449), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7531, 7594), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Po.st ✅"""', 'callback_data': '"""set+po.st"""'}), "(text='Po.st ✅', callback_data='set+po.st')\n", (7551, 7594), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7631, 7694), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Po.st ❌"""', 'callback_data': '"""set+po.st"""'}), "(text='Po.st ❌', callback_data='set+po.st')\n", (7651, 7694), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7777, 7842), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Qps.ru ✅"""', 'callback_data': '"""set+qps.ru"""'}), "(text='Qps.ru ✅', callback_data='set+qps.ru')\n", (7797, 7842), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((7879, 7944), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Qps.ru ❌"""', 'callback_data': '"""set+qps.ru"""'}), "(text='Qps.ru ❌', callback_data='set+qps.ru')\n", (7899, 7944), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8029, 8098), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Short.cm ✅"""', 'callback_data': '"""set+short.cm"""'}), "(text='Short.cm ✅', callback_data='set+short.cm')\n", (8049, 8098), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8135, 8204), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Short.cm ❌"""', 'callback_data': '"""set+short.cm"""'}), "(text='Short.cm ❌', callback_data='set+short.cm')\n", (8155, 8204), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8292, 8367), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Tinyurl.com ✅"""', 'callback_data': '"""set+tinyurl.com"""'}), "(text='Tinyurl.com ✅', callback_data='set+tinyurl.com')\n", (8312, 8367), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8404, 8479), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Tinyurl.com ❌"""', 'callback_data': '"""set+tinyurl.com"""'}), "(text='Tinyurl.com ❌', callback_data='set+tinyurl.com')\n", (8424, 8479), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8562, 8627), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""0x0.st ✅"""', 'callback_data': '"""set+0x0.st"""'}), "(text='0x0.st ✅', callback_data='set+0x0.st')\n", (8582, 8627), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8664, 8729), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""0x0.st ❌"""', 'callback_data': '"""set+0x0.st"""'}), "(text='0x0.st ❌', callback_data='set+0x0.st')\n", (8684, 8729), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8812, 8877), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""ttm.sh ✅"""', 'callback_data': '"""set+ttm.sh"""'}), "(text='ttm.sh ✅', callback_data='set+ttm.sh')\n", (8832, 8877), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((8914, 8979), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""ttm.sh ❌"""', 'callback_data': '"""set+ttm.sh"""'}), "(text='ttm.sh ❌', callback_data='set+ttm.sh')\n", (8934, 8979), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((9401, 9431), 'pyrogram.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (9421, 9431), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((9576, 9606), 'pyrogram.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (9596, 9606), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n')]
from pylsl import StreamInlet from PyQt5 import QtCore, QtWidgets import pyqtgraph as pg import pylslhandler timestamp_arr, TP9_arr, AF7_arr, AF8_arr, TP10_arr, AUX_arr = ([] for i in range(6)) tickInterval = 1 #milliseconds yRange = 1700 #microVolts xRange = 500 #milliseconds of readings class LiveEEGViewer(pg.GraphicsWindow): def __init__(self, parent=None): super().__init__(parent=parent) self.mainLayout = QtWidgets.QVBoxLayout() self.setLayout(self.mainLayout) self.timer = QtCore.QTimer(self) self.timer.setInterval(tickInterval) self.timer.start() self.timer.timeout.connect(self.pullData) self.plt = self.addPlot(title="Muse Raw EEG Stream (red: TP9, green: AF7, blue: AF8, pink: TP10)") self.plt.setLabel("left", "Potential (uV)", color="grey") self.plt.setYRange(-yRange, yRange) self.curve_TP9 = self.plt.plot(pen=pg.mkPen(color=(255, 0, 0))) self.curve_AF7 = self.plt.plot(pen=pg.mkPen(color=(0, 255, 0))) self.curve_AF8 = self.plt.plot(pen=pg.mkPen(color=(0, 0, 255))) self.curve_TP10 = self.plt.plot(pen=pg.mkPen(color=(255, 0, 255))) #self.curve_AUX = self.plt.plot(pen=pg.mkPen(color=(0, 255, 255))) def setData(self, x, yTP9, yAF7, yAF8, yTP10, yAUX): self.curve_TP9.setData(x, yTP9) self.curve_AF7.setData(x, yAF7) self.curve_AF8.setData(x, yAF8) self.curve_TP10.setData(x, yTP10) #self.curve_AUX.setData(x, yAUX) def pullData(self): sample, timestamp = inlet.pull_sample() if len(TP9_arr) >= xRange: TP9_arr.pop(0) AF7_arr.pop(0) AF8_arr.pop(0) TP10_arr.pop(0) #AUX_arr.pop(0) timestamp_arr.pop(0) #convert relative values to electrical potential (uV) #range=1000, voltage=3.3, gain of AFE=1961 TP9_arr.append((sample[0]/1000)*3.3*(1/1961)*1000000) AF7_arr.append((sample[1]/1000)*3.3*(1/1961)*1000000) AF8_arr.append((sample[2]/1000)*3.3*(1/1961)*1000000) TP10_arr.append((sample[3]/1000)*3.3*(1/1961)*1000000) #AUX_arr.append((sample[4]/1000)*3.3*(1/1961)*1000000) timestamp_arr.append(timestamp) self.setData(timestamp_arr, TP9_arr, AF7_arr, AF8_arr, TP10_arr, AUX_arr) def main(): app = QtWidgets.QApplication([]) window = LiveEEGViewer() window.show() window.resize(800,600) window.setWindowTitle('Muse Raw EEG Stream') window.raise_() app.exec_() if __name__ == "__main__": #first resolve an EEG stream on the lab network streams = pylslhandler.resolve_conn() print("Connection established") #create a new inlet to read from the stream inlet = StreamInlet(streams[0]) main()
[ "PyQt5.QtCore.QTimer", "pylsl.StreamInlet", "PyQt5.QtWidgets.QVBoxLayout", "PyQt5.QtWidgets.QApplication", "pylslhandler.resolve_conn", "pyqtgraph.mkPen" ]
[((2398, 2424), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (2420, 2424), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2678, 2705), 'pylslhandler.resolve_conn', 'pylslhandler.resolve_conn', ([], {}), '()\n', (2703, 2705), False, 'import pylslhandler\n'), ((2803, 2826), 'pylsl.StreamInlet', 'StreamInlet', (['streams[0]'], {}), '(streams[0])\n', (2814, 2826), False, 'from pylsl import StreamInlet\n'), ((439, 462), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (460, 462), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((533, 552), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', (['self'], {}), '(self)\n', (546, 552), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((955, 982), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(255, 0, 0)'}), '(color=(255, 0, 0))\n', (963, 982), True, 'import pyqtgraph as pg\n'), ((1027, 1054), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(0, 255, 0)'}), '(color=(0, 255, 0))\n', (1035, 1054), True, 'import pyqtgraph as pg\n'), ((1099, 1126), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(0, 0, 255)'}), '(color=(0, 0, 255))\n', (1107, 1126), True, 'import pyqtgraph as pg\n'), ((1172, 1201), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(255, 0, 255)'}), '(color=(255, 0, 255))\n', (1180, 1201), True, 'import pyqtgraph as pg\n')]
# ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- """ Supported analyzer types. """ import os import re from codechecker_analyzer import env from codechecker_common.logger import get_logger from .. import host_check from .clangtidy.analyzer import ClangTidy from .clangsa.analyzer import ClangSA LOG = get_logger('analyzer') supported_analyzers = {ClangSA.ANALYZER_NAME: ClangSA, ClangTidy.ANALYZER_NAME: ClangTidy} def is_ctu_capable(context): """ Detects if the current clang is CTU compatible. """ enabled_analyzers, _ = \ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) if not enabled_analyzers: return False clangsa_cfg = ClangSA.construct_config_handler([], context) return clangsa_cfg.ctu_capability.is_ctu_capable def is_ctu_on_demand_available(context): """ Detects if the current clang is capable of on-demand AST loading. """ enabled_analyzers, _ = \ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) if not enabled_analyzers: return False clangsa_cfg = ClangSA.construct_config_handler([], context) return clangsa_cfg.ctu_capability.is_on_demand_ctu_available def is_statistics_capable(context): """ Detects if the current clang is Statistics compatible. """ # Resolve potentially missing binaries. enabled_analyzers, _ = \ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) if not enabled_analyzers: return False clangsa_cfg = ClangSA.construct_config_handler([], context) check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangSA.get_analyzer_checkers(clangsa_cfg, check_env) stat_checkers_pattern = re.compile(r'.+statisticscollector.+') for checker_name, _ in checkers: if stat_checkers_pattern.match(checker_name): return True return False def is_z3_capable(context): """ Detects if the current clang is Z3 compatible. """ enabled_analyzers, _ = \ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) if not enabled_analyzers: return False analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) return host_check.has_analyzer_option(analyzer_binary, ['-Xclang', '-analyzer-constraints=z3'], analyzer_env) def is_z3_refutation_capable(context): """ Detects if the current clang is Z3 refutation compatible. """ # This function basically checks whether the corresponding analyzer config # option exists i.e. it is visible on analyzer config option help page. # However, it doesn't mean that Clang itself is compiled with Z3. if not is_z3_capable(context): return False check_supported_analyzers([ClangSA.ANALYZER_NAME], context) analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) return host_check.has_analyzer_config_option(analyzer_binary, 'crosscheck-with-z3', analyzer_env) def check_supported_analyzers(analyzers, context): """ Checks the given analyzers in the current context for their executability and support in CodeChecker. This method also updates the given context.analyzer_binaries if the context's configuration is bogus but had been resolved. :return: (enabled, failed) where enabled is a list of analyzer names and failed is a list of (analyzer, reason) tuple. """ check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_binaries = context.analyzer_binaries enabled_analyzers = set() failed_analyzers = set() for analyzer_name in analyzers: if analyzer_name not in supported_analyzers: failed_analyzers.add((analyzer_name, "Analyzer unsupported by CodeChecker.")) continue # Get the compiler binary to check if it can run. available_analyzer = True analyzer_bin = analyzer_binaries.get(analyzer_name) if not analyzer_bin: failed_analyzers.add((analyzer_name, "Failed to detect analyzer binary.")) available_analyzer = False elif not os.path.isabs(analyzer_bin): # If the analyzer is not in an absolute path, try to find it... found_bin = supported_analyzers[analyzer_name].\ resolve_missing_binary(analyzer_bin, check_env) # found_bin is an absolute path, an executable in one of the # PATH folders. # If found_bin is the same as the original binary, ie., normally # calling the binary without any search would have resulted in # the same binary being called, it's NOT a "not found". if found_bin and os.path.basename(found_bin) != analyzer_bin: LOG.debug("Configured binary '%s' for analyzer '%s' was " "not found, but environment PATH contains '%s'.", analyzer_bin, analyzer_name, found_bin) context.analyzer_binaries[analyzer_name] = \ os.path.realpath(found_bin) analyzer_bin = found_bin if not analyzer_bin or \ not host_check.check_clang(analyzer_bin, check_env): # Analyzers unavailable under absolute paths are deliberately a # configuration problem. failed_analyzers.add((analyzer_name, "Cannot execute analyzer binary.")) available_analyzer = False if available_analyzer: enabled_analyzers.add(analyzer_name) return enabled_analyzers, failed_analyzers def construct_analyzer(buildaction, analyzer_config): try: analyzer_type = buildaction.analyzer_type LOG.debug_analyzer('Constructing %s analyzer.', analyzer_type) if analyzer_type in supported_analyzers: analyzer = supported_analyzers[analyzer_type](analyzer_config, buildaction) else: analyzer = None LOG.error('Unsupported analyzer type: %s', analyzer_type) return analyzer except Exception as ex: LOG.debug_analyzer(ex) return None def build_config_handlers(args, context, enabled_analyzers): """ Handle config from command line or from config file if no command line config is given. Supported command line config format is in JSON tidy supports YAML also but no standard lib for yaml parsing is available in python. """ analyzer_config_map = {} for ea in enabled_analyzers: config_handler = supported_analyzers[ea].\ construct_config_handler(args, context) analyzer_config_map[ea] = config_handler return analyzer_config_map
[ "os.path.isabs", "os.path.basename", "os.path.realpath", "codechecker_analyzer.env.extend", "codechecker_common.logger.get_logger", "re.compile" ]
[((602, 624), 'codechecker_common.logger.get_logger', 'get_logger', (['"""analyzer"""'], {}), "('analyzer')\n", (612, 624), False, 'from codechecker_common.logger import get_logger\n'), ((1877, 1938), 'codechecker_analyzer.env.extend', 'env.extend', (['context.path_env_extra', 'context.ld_lib_path_extra'], {}), '(context.path_env_extra, context.ld_lib_path_extra)\n', (1887, 1938), False, 'from codechecker_analyzer import env\n'), ((2065, 2102), 're.compile', 're.compile', (['""".+statisticscollector.+"""'], {}), "('.+statisticscollector.+')\n", (2075, 2102), False, 'import re\n'), ((2571, 2632), 'codechecker_analyzer.env.extend', 'env.extend', (['context.path_env_extra', 'context.ld_lib_path_extra'], {}), '(context.path_env_extra, context.ld_lib_path_extra)\n', (2581, 2632), False, 'from codechecker_analyzer import env\n'), ((3458, 3519), 'codechecker_analyzer.env.extend', 'env.extend', (['context.path_env_extra', 'context.ld_lib_path_extra'], {}), '(context.path_env_extra, context.ld_lib_path_extra)\n', (3468, 3519), False, 'from codechecker_analyzer import env\n'), ((4209, 4270), 'codechecker_analyzer.env.extend', 'env.extend', (['context.path_env_extra', 'context.ld_lib_path_extra'], {}), '(context.path_env_extra, context.ld_lib_path_extra)\n', (4219, 4270), False, 'from codechecker_analyzer import env\n'), ((5003, 5030), 'os.path.isabs', 'os.path.isabs', (['analyzer_bin'], {}), '(analyzer_bin)\n', (5016, 5030), False, 'import os\n'), ((5926, 5953), 'os.path.realpath', 'os.path.realpath', (['found_bin'], {}), '(found_bin)\n', (5942, 5953), False, 'import os\n'), ((5584, 5611), 'os.path.basename', 'os.path.basename', (['found_bin'], {}), '(found_bin)\n', (5600, 5611), False, 'import os\n')]
from audioop import add from numpy import mat import tensorflow as tf # 2차원 배열 정의 list_of_list = [[10, 20], [30, 40]] # 텐서 변환 - constant 함수에 2차원 배열 입력 mat1 = tf.constant(list_of_list) # 랭크 확인 print("rank:", tf.rank(mat1)) # 텐서 출력 print("mat1:", mat1) # 1차원 벡터 정의 vec1 = tf.constant([1, 0]) vec2 = tf.constant([-1, 2]) # 텐서 변환 - stack 함수로 1차원 배열을 위아래로 쌓기 mat2 = tf.stack([vec1, vec2]) # 랭크 확인 print("rank:", tf.rank(mat2)) # 텐서 출력하기 print("mat2:", mat2) # element-by-element 연산 element_mul = tf.math.multiply(mat1, mat2) print("result:", element_mul) print("rank:", tf.rank(element_mul)) # 브로드캐스팅 연산 element_bc = tf.math.multiply(mat1, 3) print("result:", element_bc) print("rank:", tf.rank(element_bc)) # 행렬곱 연산 mat_mul = tf.matmul(mat1, mat2) print("result:", mat_mul) print("rank:", tf.rank(mat_mul)) # 덧셈 연산 add1 = tf.math.add(mat1, mat2) print("result:", add1) print("rank:", tf.rank(add1)) # 덧셈 연산(파이썬) add2 = mat1 + mat2 print("result:", add2) print("rank:", tf.rank(add2)) # 텐서를 넘파이로 변환 np_arr = mat_mul.numpy() print(type(np_arr)) print(np_arr)
[ "tensorflow.rank", "tensorflow.math.add", "tensorflow.constant", "tensorflow.stack", "tensorflow.matmul", "tensorflow.math.multiply" ]
[((160, 185), 'tensorflow.constant', 'tf.constant', (['list_of_list'], {}), '(list_of_list)\n', (171, 185), True, 'import tensorflow as tf\n'), ((275, 294), 'tensorflow.constant', 'tf.constant', (['[1, 0]'], {}), '([1, 0])\n', (286, 294), True, 'import tensorflow as tf\n'), ((302, 322), 'tensorflow.constant', 'tf.constant', (['[-1, 2]'], {}), '([-1, 2])\n', (313, 322), True, 'import tensorflow as tf\n'), ((367, 389), 'tensorflow.stack', 'tf.stack', (['[vec1, vec2]'], {}), '([vec1, vec2])\n', (375, 389), True, 'import tensorflow as tf\n'), ((500, 528), 'tensorflow.math.multiply', 'tf.math.multiply', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (516, 528), True, 'import tensorflow as tf\n'), ((622, 647), 'tensorflow.math.multiply', 'tf.math.multiply', (['mat1', '(3)'], {}), '(mat1, 3)\n', (638, 647), True, 'import tensorflow as tf\n'), ((733, 754), 'tensorflow.matmul', 'tf.matmul', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (742, 754), True, 'import tensorflow as tf\n'), ((830, 853), 'tensorflow.math.add', 'tf.math.add', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (841, 853), True, 'import tensorflow as tf\n'), ((210, 223), 'tensorflow.rank', 'tf.rank', (['mat1'], {}), '(mat1)\n', (217, 223), True, 'import tensorflow as tf\n'), ((414, 427), 'tensorflow.rank', 'tf.rank', (['mat2'], {}), '(mat2)\n', (421, 427), True, 'import tensorflow as tf\n'), ((574, 594), 'tensorflow.rank', 'tf.rank', (['element_mul'], {}), '(element_mul)\n', (581, 594), True, 'import tensorflow as tf\n'), ((692, 711), 'tensorflow.rank', 'tf.rank', (['element_bc'], {}), '(element_bc)\n', (699, 711), True, 'import tensorflow as tf\n'), ((796, 812), 'tensorflow.rank', 'tf.rank', (['mat_mul'], {}), '(mat_mul)\n', (803, 812), True, 'import tensorflow as tf\n'), ((892, 905), 'tensorflow.rank', 'tf.rank', (['add1'], {}), '(add1)\n', (899, 905), True, 'import tensorflow as tf\n'), ((978, 991), 'tensorflow.rank', 'tf.rank', (['add2'], {}), '(add2)\n', (985, 991), True, 'import tensorflow as tf\n')]
#!/usr/bin/env python3 import shutil import tempfile import unittest from collections import Counter, defaultdict from os import path from pytorch_translate.research.test import morphology_test_utils as morph_utils from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1 class TestIBMModel1(unittest.TestCase): def test_str2int(self): ibm_model = IBMModel1() # Calling multiple times to make sure we get the same value. assert ibm_model.str2int("hello") == 1 assert ibm_model.str2int("bye") == 2 assert ibm_model.str2int("hello") == 1 assert ibm_model.str2int("bye") == 2 assert len(ibm_model._str2int) == 3 assert len(ibm_model._int2str) == 3 assert ibm_model._int2str == [ibm_model.null_str, "hello", "bye"] assert ibm_model.int2str(2) == "bye" def test_morph_init(self): ibm_model = IBMModel1() tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files() ibm_model.initialize_translation_probs(f1, f2) assert len(ibm_model.translation_prob) == 10 assert ( len(ibm_model.translation_prob[ibm_model.str2int(ibm_model.null_str)]) == 9 ) assert len(ibm_model.translation_prob[ibm_model.str2int("345")]) == 6 assert ( ibm_model.translation_prob[ibm_model.str2int("122")][ ibm_model.str2int("123") ] == 1.0 / 4 ) shutil.rmtree(tmp_dir) def test_expectation_for_one_sentence(self): ibm_model = IBMModel1() tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files() ibm_model.initialize_translation_probs(f1, f2) translation_counts = defaultdict(lambda: defaultdict(float)) ibm_model.expectation_for_one_sentence( Counter( ibm_model.str2int(w) for w in ["123", "124", "234", "345", ibm_model.null_str] ), Counter(ibm_model.str2int(w) for w in ["123", "124", "234", "345"]), translation_counts, ) assert ( round( translation_counts[ibm_model.str2int("123")][ibm_model.str2int("345")], 3, ) == 0.176 ) shutil.rmtree(tmp_dir) def test_ibm_train(self): ibm_model = IBMModel1() tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files() ibm_model.learn_ibm_parameters(src_path=f1, dst_path=f2, num_iters=3) assert ( ibm_model.translation_prob[ibm_model.str2int("456789")][ ibm_model.str2int("345") ] == 0 ) assert ( ibm_model.translation_prob[ibm_model.str2int("456789")][ ibm_model.str2int("456789") ] == 0.5 ) shutil.rmtree(tmp_dir)
[ "collections.defaultdict", "shutil.rmtree", "pytorch_translate.research.test.morphology_test_utils.get_two_same_tmp_files", "pytorch_translate.research.unsupervised_morphology.ibm_model1.IBMModel1" ]
[((391, 402), 'pytorch_translate.research.unsupervised_morphology.ibm_model1.IBMModel1', 'IBMModel1', ([], {}), '()\n', (400, 402), False, 'from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1\n'), ((915, 926), 'pytorch_translate.research.unsupervised_morphology.ibm_model1.IBMModel1', 'IBMModel1', ([], {}), '()\n', (924, 926), False, 'from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1\n'), ((954, 990), 'pytorch_translate.research.test.morphology_test_utils.get_two_same_tmp_files', 'morph_utils.get_two_same_tmp_files', ([], {}), '()\n', (988, 990), True, 'from pytorch_translate.research.test import morphology_test_utils as morph_utils\n'), ((1471, 1493), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (1484, 1493), False, 'import shutil\n'), ((1564, 1575), 'pytorch_translate.research.unsupervised_morphology.ibm_model1.IBMModel1', 'IBMModel1', ([], {}), '()\n', (1573, 1575), False, 'from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1\n'), ((1603, 1639), 'pytorch_translate.research.test.morphology_test_utils.get_two_same_tmp_files', 'morph_utils.get_two_same_tmp_files', ([], {}), '()\n', (1637, 1639), True, 'from pytorch_translate.research.test import morphology_test_utils as morph_utils\n'), ((2279, 2301), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (2292, 2301), False, 'import shutil\n'), ((2353, 2364), 'pytorch_translate.research.unsupervised_morphology.ibm_model1.IBMModel1', 'IBMModel1', ([], {}), '()\n', (2362, 2364), False, 'from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1\n'), ((2392, 2428), 'pytorch_translate.research.test.morphology_test_utils.get_two_same_tmp_files', 'morph_utils.get_two_same_tmp_files', ([], {}), '()\n', (2426, 2428), True, 'from pytorch_translate.research.test import morphology_test_utils as morph_utils\n'), ((2857, 2879), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (2870, 2879), False, 'import shutil\n'), ((1744, 1762), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (1755, 1762), False, 'from collections import Counter, defaultdict\n')]
# -*- coding: utf-8 -*- # Copyright (C) 1999-2015, <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, division, print_function, unicode_literals from contextlib import contextmanager import os import sys import subprocess @contextmanager def cd(path): old_path = os.getcwd() os.chdir(path) try: yield finally: os.chdir(old_path) def puts(*args): sys.stdout.write(''.join([str(arg) for arg in args])) sys.stdout.write('\n') sys.stdout.flush() def system(*args, **kwargs): env = kwargs.pop('env', None) return subprocess.call(list(args), env=env) def mkdir(config): """ create a directory """ os.system("""mkdir -p "%(dest)s/%(date)s/" """ % { 'date': config.tm, 'dest': config.dest, })
[ "sys.stdout.write", "os.getcwd", "os.system", "sys.stdout.flush", "os.chdir" ]
[((1335, 1346), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1344, 1346), False, 'import os\n'), ((1351, 1365), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1359, 1365), False, 'import os\n'), ((1510, 1532), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (1526, 1532), False, 'import sys\n'), ((1537, 1555), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1553, 1555), False, 'import sys\n'), ((1733, 1823), 'os.system', 'os.system', (['(\'mkdir -p "%(dest)s/%(date)s/" \' % {\'date\': config.tm, \'dest\': config.dest})'], {}), '(\'mkdir -p "%(dest)s/%(date)s/" \' % {\'date\': config.tm, \'dest\':\n config.dest})\n', (1742, 1823), False, 'import os\n'), ((1410, 1428), 'os.chdir', 'os.chdir', (['old_path'], {}), '(old_path)\n', (1418, 1428), False, 'import os\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ --- This file is part of pygalle.core.env Copyright (c) 2018 SAS 9 Février. Distributed under the MIT License (license terms are at http://opensource.org/licenses/MIT). --- """ import unittest import sys, os sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'src')) def test_suite(): test_loader = unittest.TestLoader() test_suite = test_loader.discover('test', pattern='test_*.py') print(test_suite) return test_suite
[ "os.path.abspath", "unittest.TestLoader" ]
[((388, 409), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (407, 409), False, 'import unittest\n'), ((309, 334), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (324, 334), False, 'import sys, os\n')]
from gym.envs.registration import register # gridnav: square ############################################################## register( id='GridNav_2-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'gridnav_2_v0.yaml'} ) register( id='GridNav_2-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'gridnav_2_v1.yaml'} ) register( id='GridNav_3-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'gridnav_3_v0.yaml'} ) register( id='GridNav_3-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'gridnav_3_v1.yaml'} ) # nchain modified ############################################################## # gym.error.Error: Cannot re-register id: NChain-v0 register( id='NChain_mod-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'nchain_mod_v0.yaml'} ) register( id='NChain_mod-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'nchain_mod_v1.yaml'} ) # tor ########################################################################## register( id='Tor_20201121a-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20201121a.yaml'} ) register( id='Tor_20201121a-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20201121a_v1.yaml'} ) register( id='hordijk_example-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'hordijk_example_v0.yaml'} ) register( id='Hordijk_example-v3', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'hordijk_example_v3.yaml'} ) register( id='Hordijk_example-v4', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'hordijk_example_v4.yaml'} ) register( id='Tor_20210306-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20210306_v0.yaml'} ) register( id='Tor_20210306-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20210306_v1.yaml'} ) register( id='Tor_20210307-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20210307_v0.yaml'} ) register( id='Tor_20210307-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'tor_20210307_v1.yaml'} ) # feinberg_2002_hmdp ########################################################### register( id='Example_3_1-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_3_1.yaml'} ) register( id='Example_3_3-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_3_3.yaml'} ) register( id='Example_8_1-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_8_1.yaml'} ) # puterman_1994_mdp ############################################################ register( id='Example_10_1_1-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_10_1_1.yaml'} ) register( id='Example_10_1_2-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_10_1_2.yaml'} ) register( id='Example_10_1_2-v1', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_10_1_2_v1.yaml'} ) register( id='Example_10_2_2-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'example_10_2_2.yaml'} ) register( id='Problem_10_7-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'problem_10_7.yaml'} ) register( id='Problem_10_9-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'problem_10_9.yaml'} ) register( id='Problem_6_64-v0', entry_point='gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname': 'problem_6_64.yaml'} )
[ "gym.envs.registration.register" ]
[((125, 258), 'gym.envs.registration.register', 'register', ([], {'id': '"""GridNav_2-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'gridnav_2_v0.yaml'}"}), "(id='GridNav_2-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'gridnav_2_v0.yaml'})\n", (133, 258), False, 'from gym.envs.registration import register\n'), ((265, 398), 'gym.envs.registration.register', 'register', ([], {'id': '"""GridNav_2-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'gridnav_2_v1.yaml'}"}), "(id='GridNav_2-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'gridnav_2_v1.yaml'})\n", (273, 398), False, 'from gym.envs.registration import register\n'), ((405, 538), 'gym.envs.registration.register', 'register', ([], {'id': '"""GridNav_3-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'gridnav_3_v0.yaml'}"}), "(id='GridNav_3-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'gridnav_3_v0.yaml'})\n", (413, 538), False, 'from gym.envs.registration import register\n'), ((545, 678), 'gym.envs.registration.register', 'register', ([], {'id': '"""GridNav_3-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'gridnav_3_v1.yaml'}"}), "(id='GridNav_3-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'gridnav_3_v1.yaml'})\n", (553, 678), False, 'from gym.envs.registration import register\n'), ((818, 953), 'gym.envs.registration.register', 'register', ([], {'id': '"""NChain_mod-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'nchain_mod_v0.yaml'}"}), "(id='NChain_mod-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'nchain_mod_v0.yaml'})\n", (826, 953), False, 'from gym.envs.registration import register\n'), ((960, 1095), 'gym.envs.registration.register', 'register', ([], {'id': '"""NChain_mod-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'nchain_mod_v1.yaml'}"}), "(id='NChain_mod-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'nchain_mod_v1.yaml'})\n", (968, 1095), False, 'from gym.envs.registration import register\n'), ((1183, 1321), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20201121a-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20201121a.yaml'}"}), "(id='Tor_20201121a-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20201121a.yaml'})\n", (1191, 1321), False, 'from gym.envs.registration import register\n'), ((1328, 1469), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20201121a-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20201121a_v1.yaml'}"}), "(id='Tor_20201121a-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20201121a_v1.yaml'})\n", (1336, 1469), False, 'from gym.envs.registration import register\n'), ((1476, 1621), 'gym.envs.registration.register', 'register', ([], {'id': '"""hordijk_example-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'hordijk_example_v0.yaml'}"}), "(id='hordijk_example-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'hordijk_example_v0.yaml'})\n", (1484, 1621), False, 'from gym.envs.registration import register\n'), ((1628, 1773), 'gym.envs.registration.register', 'register', ([], {'id': '"""Hordijk_example-v3"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'hordijk_example_v3.yaml'}"}), "(id='Hordijk_example-v3', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'hordijk_example_v3.yaml'})\n", (1636, 1773), False, 'from gym.envs.registration import register\n'), ((1780, 1925), 'gym.envs.registration.register', 'register', ([], {'id': '"""Hordijk_example-v4"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'hordijk_example_v4.yaml'}"}), "(id='Hordijk_example-v4', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'hordijk_example_v4.yaml'})\n", (1788, 1925), False, 'from gym.envs.registration import register\n'), ((1932, 2071), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20210306-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20210306_v0.yaml'}"}), "(id='Tor_20210306-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20210306_v0.yaml'})\n", (1940, 2071), False, 'from gym.envs.registration import register\n'), ((2078, 2217), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20210306-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20210306_v1.yaml'}"}), "(id='Tor_20210306-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20210306_v1.yaml'})\n", (2086, 2217), False, 'from gym.envs.registration import register\n'), ((2224, 2363), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20210307-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20210307_v0.yaml'}"}), "(id='Tor_20210307-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20210307_v0.yaml'})\n", (2232, 2363), False, 'from gym.envs.registration import register\n'), ((2370, 2509), 'gym.envs.registration.register', 'register', ([], {'id': '"""Tor_20210307-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'tor_20210307_v1.yaml'}"}), "(id='Tor_20210307-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'tor_20210307_v1.yaml'})\n", (2378, 2509), False, 'from gym.envs.registration import register\n'), ((2597, 2731), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_3_1-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_3_1.yaml'}"}), "(id='Example_3_1-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_3_1.yaml'})\n", (2605, 2731), False, 'from gym.envs.registration import register\n'), ((2738, 2872), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_3_3-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_3_3.yaml'}"}), "(id='Example_3_3-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_3_3.yaml'})\n", (2746, 2872), False, 'from gym.envs.registration import register\n'), ((2879, 3013), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_8_1-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_8_1.yaml'}"}), "(id='Example_8_1-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_8_1.yaml'})\n", (2887, 3013), False, 'from gym.envs.registration import register\n'), ((3101, 3241), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_10_1_1-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_10_1_1.yaml'}"}), "(id='Example_10_1_1-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_10_1_1.yaml'})\n", (3109, 3241), False, 'from gym.envs.registration import register\n'), ((3248, 3388), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_10_1_2-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_10_1_2.yaml'}"}), "(id='Example_10_1_2-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_10_1_2.yaml'})\n", (3256, 3388), False, 'from gym.envs.registration import register\n'), ((3395, 3538), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_10_1_2-v1"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_10_1_2_v1.yaml'}"}), "(id='Example_10_1_2-v1', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_10_1_2_v1.yaml'})\n", (3403, 3538), False, 'from gym.envs.registration import register\n'), ((3545, 3685), 'gym.envs.registration.register', 'register', ([], {'id': '"""Example_10_2_2-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'example_10_2_2.yaml'}"}), "(id='Example_10_2_2-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'example_10_2_2.yaml'})\n", (3553, 3685), False, 'from gym.envs.registration import register\n'), ((3692, 3828), 'gym.envs.registration.register', 'register', ([], {'id': '"""Problem_10_7-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'problem_10_7.yaml'}"}), "(id='Problem_10_7-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'problem_10_7.yaml'})\n", (3700, 3828), False, 'from gym.envs.registration import register\n'), ((3835, 3971), 'gym.envs.registration.register', 'register', ([], {'id': '"""Problem_10_9-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'problem_10_9.yaml'}"}), "(id='Problem_10_9-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'problem_10_9.yaml'})\n", (3843, 3971), False, 'from gym.envs.registration import register\n'), ((3978, 4114), 'gym.envs.registration.register', 'register', ([], {'id': '"""Problem_6_64-v0"""', 'entry_point': '"""gym_symbol.envs:SymbolicRepresentation"""', 'kwargs': "{'cfg_fname': 'problem_6_64.yaml'}"}), "(id='Problem_6_64-v0', entry_point=\n 'gym_symbol.envs:SymbolicRepresentation', kwargs={'cfg_fname':\n 'problem_6_64.yaml'})\n", (3986, 4114), False, 'from gym.envs.registration import register\n')]
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Source' db.create_table(u'thumbnails_source', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), )) db.send_create_signal(u'thumbnails', ['Source']) # Adding model 'ThumbnailMeta' db.create_table(u'thumbnails_thumbnailmeta', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='thumbnails', to=orm['thumbnails.Source'])), ('size', self.gf('django.db.models.fields.CharField')(max_length=64)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), )) db.send_create_signal(u'thumbnails', ['ThumbnailMeta']) # Adding unique constraint on 'ThumbnailMeta', fields ['source', 'size'] db.create_unique(u'thumbnails_thumbnailmeta', ['source_id', 'size']) def backwards(self, orm): # Removing unique constraint on 'ThumbnailMeta', fields ['source', 'size'] db.delete_unique(u'thumbnails_thumbnailmeta', ['source_id', 'size']) # Deleting model 'Source' db.delete_table(u'thumbnails_source') # Deleting model 'ThumbnailMeta' db.delete_table(u'thumbnails_thumbnailmeta') models = { u'thumbnails.source': { 'Meta': {'object_name': 'Source'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'thumbnails.thumbnailmeta': { 'Meta': {'unique_together': "(('source', 'size'),)", 'object_name': 'ThumbnailMeta'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'size': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': u"orm['thumbnails.Source']"}) } } complete_apps = ['thumbnails']
[ "south.db.db.delete_table", "south.db.db.create_unique", "south.db.db.delete_unique", "south.db.db.send_create_signal" ]
[((512, 560), 'south.db.db.send_create_signal', 'db.send_create_signal', (['u"""thumbnails"""', "['Source']"], {}), "(u'thumbnails', ['Source'])\n", (533, 560), False, 'from south.db import db\n'), ((1075, 1130), 'south.db.db.send_create_signal', 'db.send_create_signal', (['u"""thumbnails"""', "['ThumbnailMeta']"], {}), "(u'thumbnails', ['ThumbnailMeta'])\n", (1096, 1130), False, 'from south.db import db\n'), ((1221, 1289), 'south.db.db.create_unique', 'db.create_unique', (['u"""thumbnails_thumbnailmeta"""', "['source_id', 'size']"], {}), "(u'thumbnails_thumbnailmeta', ['source_id', 'size'])\n", (1237, 1289), False, 'from south.db import db\n'), ((1413, 1481), 'south.db.db.delete_unique', 'db.delete_unique', (['u"""thumbnails_thumbnailmeta"""', "['source_id', 'size']"], {}), "(u'thumbnails_thumbnailmeta', ['source_id', 'size'])\n", (1429, 1481), False, 'from south.db import db\n'), ((1525, 1562), 'south.db.db.delete_table', 'db.delete_table', (['u"""thumbnails_source"""'], {}), "(u'thumbnails_source')\n", (1540, 1562), False, 'from south.db import db\n'), ((1613, 1657), 'south.db.db.delete_table', 'db.delete_table', (['u"""thumbnails_thumbnailmeta"""'], {}), "(u'thumbnails_thumbnailmeta')\n", (1628, 1657), False, 'from south.db import db\n')]
import subprocess subprocess.Popen(['python','hear_auto.py']) subprocess.Popen(['python','hear_auto.py']) subprocess.Popen(['python','hear_auto.py']) #subprocess.Popen(['python','hear_auto.py']) #subprocess.Popen(['python','hear_auto.py'])
[ "subprocess.Popen" ]
[((19, 63), 'subprocess.Popen', 'subprocess.Popen', (["['python', 'hear_auto.py']"], {}), "(['python', 'hear_auto.py'])\n", (35, 63), False, 'import subprocess\n'), ((63, 107), 'subprocess.Popen', 'subprocess.Popen', (["['python', 'hear_auto.py']"], {}), "(['python', 'hear_auto.py'])\n", (79, 107), False, 'import subprocess\n'), ((107, 151), 'subprocess.Popen', 'subprocess.Popen', (["['python', 'hear_auto.py']"], {}), "(['python', 'hear_auto.py'])\n", (123, 151), False, 'import subprocess\n')]
import hashlib, json, requests from time import time from uuid import uuid4 from textwrap import dedent from flask import Flask, jsonify, request from urllib.parse import urlparse # block = { # 'index': 1, # 'timestamp': 1506057125.900785, # 'transactions': [ # { # 'sender': "8527147fe1f5426f9dd545de4b27ee00", # 'recipient': "a<PASSWORD>", # 'amount': 5, # } # ], # 'proof': 324984774000, # 'previous_hash': "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824" # } class Blockchain(object): def __init__(self): self.chain = [] self.current_transactions = [] self.nodes = set() # O bloco genesis self.newBlock(previous_hash=1, proof=100) def newBlock(self, proof, previous_hash=None): block = { 'index': len(self.chain) + 1, 'timestamp': time(), 'transactions': self.current_transactions, 'proof': proof, 'previous_hash': previous_hash or self.hash(self.chain[-1]) } self.current_transactions = [] # Reseta a lista de transações self.chain.append(block) # Adiciona o bloco à cadeia de blocos return block def newTransaction(self, sender, recipient, amount): self.current_transactions.append({ 'sender': sender, 'recipient': recipient, 'amount': amount }) # Adiciona transação ao bloco return self.lastBlock['index'] + 1 # Retorna o index do bloco da transação @staticmethod def hash(block): blockString = json.dumps(block, sort_keys=True).encode() # O dicionário é ordenado pelas chaves para previnir hashes inconsistentes return hashlib.sha256(blockString).hexdigest() # Transforma em hash e depois transforma em string def proofOfWork(self, last_proof): # Gera o PoW do bloco # Algoritmo: # - Encontre um número p' em que seu hash com a solução do bloco anterior seja um hash que termina em [n] [ny]s. proof = 0 while self.validProof(last_proof, proof) is False: proof += 1 return proof @staticmethod def validProof(last_proof, proof): # Valida a PoW guess = f'{last_proof}{proof}'.encode() guess_hash = hashlib.sha256(guess).hexdigest() return guess_hash[:4] == "0000" def registerNode(self, address): parsed_url = urlparse(address) self.nodes.add(parsed_url.netloc) def validChain(self, chain): lastBlock = chain[0] currentIndex = 1 while currentIndex < len(chain): block = chain[currentIndex] print(f'{lastBlock}') print(f'{block}') print(f'\n-----------\n') # Checa se o hash do bloco está correto if block['previous_hash'] != self.hash(lastBlock): return False # Checa se a PoW está correta if not self.validProof(lastBlock['proof'], block['proof']): return False last_block = block currentIndex += 1 return True def resolveConflicts(self): # Essa função se baseia no Algoritmo do Consenso, o qual resolve conflitos nos Nodes ao trocar a cadeia atual pela mais longa na rede. neighbours = self.nodes newChain = None maxLength = len(self.chain) for node in neighbours: response = requests.get(f"http://{node}/chain") if response.status_code == 200: length = response.json()['length'] chain = response.json()['chain'] # Checa se o tamanho da cadeia é maior e checa se a cadeia é válida if length > maxLength and self.validChain(chain): maxLength = length newChain = chain # Troca a cadeia se uma nova for descoberta if newChain: self.chain = newChain return True return False @property def lastBlock(self): return self.chain[-1] # Node app = Flask(__name__) # Gera um endereço único para esse Node (aleatório) nodeId = str(uuid4()).replace('-', '') # Instancia a Blockchain blockchain = Blockchain() # Criando os endpoints da API @app.route('/mine', methods=['GET']) def mine(): # To-do # - Calcular o PoW # - Dar a recompensa ao minerador, adicionando uma transação que garante 1 moeda # - Criar um novo bloco, adicionando-o à cadeia de blocos / blockchain # Calclar o PoW lastBlock = blockchain.lastBlock lastProof = lastBlock['proof'] proof = blockchain.proofOfWork(lastProof) # Dar a recompensa ao minerador, adicionando uma transação que garante 1 moeda; O 'sender' é 0 para indicar que esse Node minerou uma nova moeda blockchain.newTransaction( sender = "0", recipient = nodeId, amount = 1 ) # Criar um novo bloco, adicionando-o à cadeia de blocos / blockchain previousHash = blockchain.hash(lastBlock) block = blockchain.newBlock(proof, previousHash) # Resposta do endpoint da API response = { 'message': "Novo bloco criado", 'index': block['index'], 'transactions': block['transactions'], 'proof': block['proof'], 'previous_hash': block['previous_hash'] } return jsonify(response), 200 @app.route('/transactions/new', methods=['POST']) def newTranscation(): values = request.get_json() # Checa se todos os valores foram satisfeitos required = ['sender', 'recipient', 'amount'] if not all(k in values for k in required): return 'Valores faltando', 400 # Cria a transação index = blockchain.newTransaction(values['sender'], values['recipient'], values['amount']) response = {'message': f'A transação vai ser adicionada ao bloco {index}'} # Mensagem de resposta do endpoint return jsonify(response), 201 @app.route('/chain', methods=['GET']) def fullChain(): response = { 'chain': blockchain.chain, 'length': len(blockchain.chain), } return jsonify(response), 200 @app.route('/nodes/register', methods=['POST']) def registerNodes(): values = request.get_json() nodes = values.get('nodes') if nodes is None: return "Erro: É necessário que uma lista válida de Nodes seja passada", 400 for node in nodes: blockchain.registerNode(node) response = { 'message': 'Novos Nodes foram adicionados', 'total_nodes': list(blockchain.nodes) } return jsonify(response), 201 @app.route('/nodes/resolve', methods=['GET']) def consensus(): replaced = blockchain.resolveConflicts() if replaced: response = { 'message': 'A cadeia foi trocada', 'new_chain': blockchain.chain } else: response = { 'message': 'A cadeia permanece a mesma', 'chain': blockchain.chain } return jsonify(response), 200 # Rodando o Node if __name__ == '__main__': app.run(host='0.0.0.0', port=5000)
[ "uuid.uuid4", "flask.Flask", "json.dumps", "time.time", "hashlib.sha256", "flask.jsonify", "requests.get", "flask.request.get_json", "urllib.parse.urlparse" ]
[((4167, 4182), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (4172, 4182), False, 'from flask import Flask, jsonify, request\n'), ((5555, 5573), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (5571, 5573), False, 'from flask import Flask, jsonify, request\n'), ((6304, 6322), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6320, 6322), False, 'from flask import Flask, jsonify, request\n'), ((2459, 2476), 'urllib.parse.urlparse', 'urlparse', (['address'], {}), '(address)\n', (2467, 2476), False, 'from urllib.parse import urlparse\n'), ((5446, 5463), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (5453, 5463), False, 'from flask import Flask, jsonify, request\n'), ((6009, 6026), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (6016, 6026), False, 'from flask import Flask, jsonify, request\n'), ((6198, 6215), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (6205, 6215), False, 'from flask import Flask, jsonify, request\n'), ((6661, 6678), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (6668, 6678), False, 'from flask import Flask, jsonify, request\n'), ((7075, 7092), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (7082, 7092), False, 'from flask import Flask, jsonify, request\n'), ((913, 919), 'time.time', 'time', ([], {}), '()\n', (917, 919), False, 'from time import time\n'), ((3512, 3548), 'requests.get', 'requests.get', (['f"""http://{node}/chain"""'], {}), "(f'http://{node}/chain')\n", (3524, 3548), False, 'import hashlib, json, requests\n'), ((4249, 4256), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4254, 4256), False, 'from uuid import uuid4\n'), ((1633, 1666), 'json.dumps', 'json.dumps', (['block'], {'sort_keys': '(True)'}), '(block, sort_keys=True)\n', (1643, 1666), False, 'import hashlib, json, requests\n'), ((1766, 1793), 'hashlib.sha256', 'hashlib.sha256', (['blockString'], {}), '(blockString)\n', (1780, 1793), False, 'import hashlib, json, requests\n'), ((2326, 2347), 'hashlib.sha256', 'hashlib.sha256', (['guess'], {}), '(guess)\n', (2340, 2347), False, 'import hashlib, json, requests\n')]
""" SHERPA is a Python library for hyperparameter tuning of machine learning models. Copyright (C) 2018 <NAME>, <NAME>, and <NAME>. This file is part of SHERPA. SHERPA is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SHERPA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SHERPA. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import absolute_import from .core import * from . import database from .database import Client from . import algorithms import logging __version__ = '1.0.6' logging.basicConfig(level=logging.INFO)
[ "logging.basicConfig" ]
[((937, 976), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (956, 976), False, 'import logging\n')]
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import Union import onnx import tensorrt as trt import torch from .preprocess import preprocess_onnx def onnx2trt(onnx_model: Union[str, onnx.ModelProto], opt_shape_dict: dict, log_level: trt.ILogger.Severity = trt.Logger.ERROR, fp16_mode: bool = False, max_workspace_size: int = 0, device_id: int = 0) -> trt.ICudaEngine: """Convert onnx model to tensorrt engine. Arguments: onnx_model (str or onnx.ModelProto): the onnx model to convert from opt_shape_dict (dict): the min/opt/max shape of each input log_level (TensorRT log level): the log level of TensorRT fp16_mode (bool): enable fp16 mode max_workspace_size (int): set max workspace size of TensorRT engine. some tactic and layers need large workspace. device_id (int): choice the device to create engine. Returns: tensorrt.ICudaEngine: the TensorRT engine created from onnx_model Example: >>> engine = onnx2trt( >>> "onnx_model.onnx", >>> {'input': [[1, 3, 160, 160], >>> [1, 3, 320, 320], >>> [1, 3, 640, 640]]}, >>> log_level=trt.Logger.WARNING, >>> fp16_mode=True, >>> max_workspace_size=1 << 30, >>> device_id=0) >>> }) """ # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) device = torch.device(f'cuda:{device_id}') # create builder and network logger = trt.Logger(log_level) builder = trt.Builder(logger) EXPLICIT_BATCH = 1 << (int)( trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(EXPLICIT_BATCH) # parse onnx parser = trt.OnnxParser(network, logger) if isinstance(onnx_model, str): onnx_model = onnx.load(onnx_model) onnx_model = preprocess_onnx(onnx_model) if not parser.parse(onnx_model.SerializeToString()): error_msgs = '' for error in range(parser.num_errors): error_msgs += f'{parser.get_error(error)}\n' raise RuntimeError(f'parse onnx failed:\n{error_msgs}') # config builder builder.max_workspace_size = max_workspace_size config = builder.create_builder_config() config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for input_name, param in opt_shape_dict.items(): min_shape = tuple(param[0][:]) opt_shape = tuple(param[1][:]) max_shape = tuple(param[2][:]) profile.set_shape(input_name, min_shape, opt_shape, max_shape) config.add_optimization_profile(profile) if fp16_mode: builder.fp16_mode = fp16_mode config.set_flag(trt.BuilderFlag.FP16) # create engine with torch.cuda.device(device): engine = builder.build_engine(network, config) return engine def save_trt_engine(engine: trt.ICudaEngine, path: str) -> None: """Serialize TensorRT engine to disk. Arguments: engine (tensorrt.ICudaEngine): TensorRT engine to serialize path (str): disk path to write the engine """ # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with open(path, mode='wb') as f: f.write(bytearray(engine.serialize())) def load_trt_engine(path: str) -> trt.ICudaEngine: """Deserialize TensorRT engine from disk. Arguments: path (str): disk path to read the engine Returns: tensorrt.ICudaEngine: the TensorRT engine loaded from disk """ # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with trt.Logger() as logger, trt.Runtime(logger) as runtime: with open(path, mode='rb') as f: engine_bytes = f.read() engine = runtime.deserialize_cuda_engine(engine_bytes) return engine def torch_dtype_from_trt(dtype: trt.DataType) -> Union[torch.dtype, TypeError]: """Convert pytorch dtype to TensorRT dtype.""" if dtype == trt.bool: return torch.bool elif dtype == trt.int8: return torch.int8 elif dtype == trt.int32: return torch.int32 elif dtype == trt.float16: return torch.float16 elif dtype == trt.float32: return torch.float32 else: raise TypeError('%s is not supported by torch' % dtype) def torch_device_from_trt( device: trt.TensorLocation) -> Union[torch.device, TypeError]: """Convert pytorch device to TensorRT device.""" if device == trt.TensorLocation.DEVICE: return torch.device('cuda') elif device == trt.TensorLocation.HOST: return torch.device('cpu') else: return TypeError('%s is not supported by torch' % device) class TRTWrapper(torch.nn.Module): """TensorRT engine Wrapper. Arguments: engine (tensorrt.ICudaEngine): TensorRT engine to wrap input_names (list[str]): names of each inputs output_names (list[str]): names of each outputs Note: If the engine is converted from onnx model. The input_names and output_names should be the same as onnx model. """ def __init__(self, engine, input_names=None, output_names=None): # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += blue_text + \ 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) super().__init__() self.engine = engine if isinstance(self.engine, str): self.engine = load_trt_engine(engine) if not isinstance(self.engine, trt.ICudaEngine): raise TypeError('engine should be str or trt.ICudaEngine') self._register_state_dict_hook(TRTWrapper._on_state_dict) self.context = self.engine.create_execution_context() # get input and output names from engine if input_names is None or output_names is None: names = [_ for _ in self.engine] input_names = list(filter(self.engine.binding_is_input, names)) output_names = list(set(names) - set(input_names)) self.input_names = input_names self.output_names = output_names def _on_state_dict(self, state_dict, prefix, local_metadata): state_dict[prefix + 'engine'] = bytearray(self.engine.serialize()) state_dict[prefix + 'input_names'] = self.input_names state_dict[prefix + 'output_names'] = self.output_names def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): engine_bytes = state_dict[prefix + 'engine'] with trt.Logger() as logger, trt.Runtime(logger) as runtime: self.engine = runtime.deserialize_cuda_engine(engine_bytes) self.context = self.engine.create_execution_context() self.input_names = state_dict[prefix + 'input_names'] self.output_names = state_dict[prefix + 'output_names'] def forward(self, inputs): """ Arguments: inputs (dict): dict of input name-tensors pair Return: dict: dict of output name-tensors pair """ assert self.input_names is not None assert self.output_names is not None bindings = [None] * (len(self.input_names) + len(self.output_names)) for input_name, input_tensor in inputs.items(): idx = self.engine.get_binding_index(input_name) if input_tensor.dtype == torch.long: input_tensor = input_tensor.int() self.context.set_binding_shape(idx, tuple(input_tensor.shape)) bindings[idx] = input_tensor.contiguous().data_ptr() # create output tensors outputs = {} for i, output_name in enumerate(self.output_names): idx = self.engine.get_binding_index(output_name) dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) shape = tuple(self.context.get_binding_shape(idx)) device = torch_device_from_trt(self.engine.get_location(idx)) output = torch.empty(size=shape, dtype=dtype, device=device) outputs[output_name] = output bindings[idx] = output.data_ptr() self.context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) return outputs class TRTWraper(TRTWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn( 'TRTWraper will be deprecated in' ' future. Please use TRTWrapper instead', DeprecationWarning)
[ "tensorrt.Logger", "tensorrt.OnnxParser", "torch.empty", "tensorrt.Builder", "tensorrt.Runtime", "torch.device", "torch.cuda.current_stream", "warnings.warn", "torch.cuda.device", "onnx.load" ]
[((2033, 2051), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (2046, 2051), False, 'import warnings\n'), ((2066, 2099), 'torch.device', 'torch.device', (['f"""cuda:{device_id}"""'], {}), "(f'cuda:{device_id}')\n", (2078, 2099), False, 'import torch\n'), ((2146, 2167), 'tensorrt.Logger', 'trt.Logger', (['log_level'], {}), '(log_level)\n', (2156, 2167), True, 'import tensorrt as trt\n'), ((2182, 2201), 'tensorrt.Builder', 'trt.Builder', (['logger'], {}), '(logger)\n', (2193, 2201), True, 'import tensorrt as trt\n'), ((2377, 2408), 'tensorrt.OnnxParser', 'trt.OnnxParser', (['network', 'logger'], {}), '(network, logger)\n', (2391, 2408), True, 'import tensorrt as trt\n'), ((4281, 4299), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (4294, 4299), False, 'import warnings\n'), ((5139, 5157), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (5152, 5157), False, 'import warnings\n'), ((2467, 2488), 'onnx.load', 'onnx.load', (['onnx_model'], {}), '(onnx_model)\n', (2476, 2488), False, 'import onnx\n'), ((3428, 3453), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (3445, 3453), False, 'import torch\n'), ((5168, 5180), 'tensorrt.Logger', 'trt.Logger', ([], {}), '()\n', (5178, 5180), True, 'import tensorrt as trt\n'), ((5192, 5211), 'tensorrt.Runtime', 'trt.Runtime', (['logger'], {}), '(logger)\n', (5203, 5211), True, 'import tensorrt as trt\n'), ((6087, 6107), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6099, 6107), False, 'import torch\n'), ((7288, 7306), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7301, 7306), False, 'import warnings\n'), ((10445, 10560), 'warnings.warn', 'warnings.warn', (['"""TRTWraper will be deprecated in future. Please use TRTWrapper instead"""', 'DeprecationWarning'], {}), "(\n 'TRTWraper will be deprecated in future. Please use TRTWrapper instead',\n DeprecationWarning)\n", (10458, 10560), False, 'import warnings\n'), ((6167, 6186), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6179, 6186), False, 'import torch\n'), ((8573, 8585), 'tensorrt.Logger', 'trt.Logger', ([], {}), '()\n', (8583, 8585), True, 'import tensorrt as trt\n'), ((8597, 8616), 'tensorrt.Runtime', 'trt.Runtime', (['logger'], {}), '(logger)\n', (8608, 8616), True, 'import tensorrt as trt\n'), ((10030, 10081), 'torch.empty', 'torch.empty', ([], {'size': 'shape', 'dtype': 'dtype', 'device': 'device'}), '(size=shape, dtype=dtype, device=device)\n', (10041, 10081), False, 'import torch\n'), ((10257, 10284), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (10282, 10284), False, 'import torch\n')]
from django.db import models from django.contrib.auth.models import User class Recipe(models.Model): recipe_name = models.CharField(max_length=50, null=False, unique=True, blank=False, primary_key=True) author = models.ForeignKey(User(), on_delete=models.CASCADE, blank=False) def __str__(self): return self.recipe_name class Step(models.Model): step = models.TextField(max_length=300) recipe = models.ForeignKey(Recipe, related_name='steps', on_delete=models.CASCADE) def __str__(self): return self.step class Ingredient(models.Model): ingredient = models.CharField(max_length=20) recipe = models.ForeignKey(Recipe, related_name='ingredients', on_delete=models.CASCADE) def __str__(self): return self.ingredient
[ "django.db.models.CharField", "django.db.models.TextField", "django.contrib.auth.models.User", "django.db.models.ForeignKey" ]
[((119, 210), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(False)', 'unique': '(True)', 'blank': '(False)', 'primary_key': '(True)'}), '(max_length=50, null=False, unique=True, blank=False,\n primary_key=True)\n', (135, 210), False, 'from django.db import models\n'), ((379, 411), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (395, 411), False, 'from django.db import models\n'), ((425, 498), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Recipe'], {'related_name': '"""steps"""', 'on_delete': 'models.CASCADE'}), "(Recipe, related_name='steps', on_delete=models.CASCADE)\n", (442, 498), False, 'from django.db import models\n'), ((598, 629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (614, 629), False, 'from django.db import models\n'), ((643, 722), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Recipe'], {'related_name': '"""ingredients"""', 'on_delete': 'models.CASCADE'}), "(Recipe, related_name='ingredients', on_delete=models.CASCADE)\n", (660, 722), False, 'from django.db import models\n'), ((238, 244), 'django.contrib.auth.models.User', 'User', ([], {}), '()\n', (242, 244), False, 'from django.contrib.auth.models import User\n')]
"""Copied and inspired from `unittest._log`, added in Python 3.4+""" import collections import contextlib from io import StringIO import logging LoggingWatcher = collections.namedtuple( "LoggingWatcher", ["records", "output"] ) class CapturingHandler(logging.Handler): def __init__(self): super(CapturingHandler, self).__init__() self.watcher = LoggingWatcher([], []) def flush(self): pass def emit(self, record): self.watcher.records.append(record) msg = self.format(record) self.watcher.output.append(msg) LOGGING_FORMAT = "%(levelname)s | %(message)s" @contextlib.contextmanager def capture_logs(logger, level=logging.DEBUG, fmt=LOGGING_FORMAT): formatter = logging.Formatter(fmt=fmt) handler = CapturingHandler() handler.setFormatter(formatter) old_handlers = logger.handlers[:] old_level = logger.level old_propagate = logger.propagate logger.handlers = [handler] logger.setLevel(level) logger.propagate = False try: yield handler.watcher finally: logger.handlers = old_handlers logger.setLevel(old_level) logger.propagate = old_propagate def check_captured_logs(watcher, expected_records): if not expected_records: assert watcher.records == [], \ "Expected no logs, got:\n{}".format( '\n'.join(msg for msg in watcher.output) ) else: assert len(watcher.records) == len(expected_records), \ "Expected {} log lines, got {}. Received:\n{}".format( len(expected_records), len(watcher.records), '\n'.join(msg for msg in watcher.output) ) for expected, actual in zip(expected_records, watcher.records): assert expected['level'] == actual.levelname, \ "Invalid log level, got '{}', expected '{}'\n{}".format( actual.levelname, expected['level'], actual.message ) assert expected['contains'] in actual.message, \ "Log message '{}' does not contain '{}'".format( actual.message, expected['contains'] )
[ "logging.Formatter", "collections.namedtuple" ]
[((164, 227), 'collections.namedtuple', 'collections.namedtuple', (['"""LoggingWatcher"""', "['records', 'output']"], {}), "('LoggingWatcher', ['records', 'output'])\n", (186, 227), False, 'import collections\n'), ((739, 765), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': 'fmt'}), '(fmt=fmt)\n', (756, 765), False, 'import logging\n')]
import collections from random import random import sys from time import sleep, time start = time() mydic = collections.defaultdict(set) mydic['one'] = 1 mydic['one'] = 2 #mydic['one'].insert(1) #mydic['one'] += 1 #mydic['one'].add(('fname1,3')) #mydic['one'].add(('fname1,2')) #mydic['one'].add(('fname2,1,2')) print(list(mydic)) print(len(mydic)) print(mydic['one']) cdic = dict() cdic['1'] = 0 #[] # key - coverage cdic['2'] = 0 #[] # key - coverage #cdic['3'] = [] # key - coverage #cdic['1'].append("zz") # #cdic['1'].append("zz") # #cdic['2'].append("zz") # #cdic['3'].append("zz") # #cdic['1'].append("zz") cdic['1'] = cdic['1'] + 1 cdic['1'] = cdic['1'] + 1 cdic['1'] = cdic['1'] + 1 cdic['2'] = cdic['2'] + 1 #len(cdic['1']) # hit count #.append(mydic['one']) print("=================") print(cdic['1']) print(cdic.keys()) print(cdic.values()) #print(cdic['2'].count('zz')) #print(cdic['1'].index('zz')) #print(sum(map(len, cdic.values()))) #print(a['one'].pop) #sleep(1) end = time() t = end - start print(end - start) print(random()) numbers = ['one', 'two', 'three', 'four', 'five'] for n in numbers: print(n) print("========================") def UniqueKeys(arr): # Stores the list of unique keys res = list(set(val for dic in arr for val in dic.keys())) # Print the list print(str(res)) print(len(res)) arr = [{'my': 1}, {'my' : 2}, {'is': 1}, {'ria': 2}] print(arr) UniqueKeys(arr)
[ "collections.defaultdict", "random.random", "time.time" ]
[((94, 100), 'time.time', 'time', ([], {}), '()\n', (98, 100), False, 'from time import sleep, time\n'), ((110, 138), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (133, 138), False, 'import collections\n'), ((1003, 1009), 'time.time', 'time', ([], {}), '()\n', (1007, 1009), False, 'from time import sleep, time\n'), ((1054, 1062), 'random.random', 'random', ([], {}), '()\n', (1060, 1062), False, 'from random import random\n')]
from nltk.lm import NgramCounter, Vocabulary from nltk.lm.preprocessing import padded_everygram_pipeline import pickle model_dir = '../../data/ngrams/' with open(f'{model_dir}tokenized_text.pickle', 'rb') as file: tokenized_text = pickle.load(file) training_ngrams, padded_sents = padded_everygram_pipeline(3, tokenized_text) counter = NgramCounter(training_ngrams) vocabulary = Vocabulary(padded_sents, unk_cutoff=10) with open(f'{model_dir}counter.pickle', 'wb') as file: pickle.dump(counter, file) with open(f'{model_dir}vocabulary.pickle', 'wb') as file: pickle.dump(vocabulary, file)
[ "pickle.dump", "nltk.lm.preprocessing.padded_everygram_pipeline", "pickle.load", "nltk.lm.Vocabulary", "nltk.lm.NgramCounter" ]
[((292, 336), 'nltk.lm.preprocessing.padded_everygram_pipeline', 'padded_everygram_pipeline', (['(3)', 'tokenized_text'], {}), '(3, tokenized_text)\n', (317, 336), False, 'from nltk.lm.preprocessing import padded_everygram_pipeline\n'), ((347, 376), 'nltk.lm.NgramCounter', 'NgramCounter', (['training_ngrams'], {}), '(training_ngrams)\n', (359, 376), False, 'from nltk.lm import NgramCounter, Vocabulary\n'), ((390, 429), 'nltk.lm.Vocabulary', 'Vocabulary', (['padded_sents'], {'unk_cutoff': '(10)'}), '(padded_sents, unk_cutoff=10)\n', (400, 429), False, 'from nltk.lm import NgramCounter, Vocabulary\n'), ((237, 254), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (248, 254), False, 'import pickle\n'), ((490, 516), 'pickle.dump', 'pickle.dump', (['counter', 'file'], {}), '(counter, file)\n', (501, 516), False, 'import pickle\n'), ((580, 609), 'pickle.dump', 'pickle.dump', (['vocabulary', 'file'], {}), '(vocabulary, file)\n', (591, 609), False, 'import pickle\n')]
import unittest from test.helpers.httpretty_extension import httpretty import six import datetime import pandas from quandl.model.dataset import Dataset from quandl.model.data import Data from quandl.model.merged_data_list import MergedDataList from quandl.model.merged_dataset import MergedDataset from mock import patch, call from quandl.errors.quandl_error import ColumnNotFound from test.helpers.merged_datasets_helper import setupDatasetsTest class GetMergedDatasetTest(unittest.TestCase): @classmethod def setUp(self): setupDatasetsTest(self, httpretty) @classmethod def tearDownClass(cls): httpretty.disable() httpretty.reset() @patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object') def test_merged_dataset_calls_merged_dataset_get_dataset(self, mock): mock.return_value = self.oil_obj md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) md.data_fields() expected_calls = [ call(('NSE/OIL', {'column_index': [1, 2]})), call(('WIKI/AAPL', {'column_index': [1]})), call('WIKI/MSFT') ] self.assertEqual(mock.call_count, 3) for index, expected in enumerate(expected_calls): self.assertEqual(mock.mock_calls[index], expected) @patch('quandl.model.merged_dataset.MergedDataset._build_dataset_object') def test_removes_column_index_query_param(self, mock): self.oil_obj.requested_column_indexes = [] mock.return_value = self.oil_obj md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]})], params={'column_index': 1}) md.data_fields() expected = call(('NSE/OIL', {'column_index': [1, 2]}), params={}) self.assertEqual(mock.call_args, expected) def test_sets_dataset_codes_for_the_datasets(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) self.assertEqual(md._datasets, None) six.assertCountEqual(self, [1, 2], md.dataset_codes[0][1]['column_index']) six.assertCountEqual(self, [1], md.dataset_codes[1][1]['column_index']) self.assertEqual('I', md.dataset_codes[2][1]) def test_sets_column_index_on_each_dataset(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) md.data_fields() six.assertCountEqual(self, [1, 2], md._datasets[0].requested_column_indexes) six.assertCountEqual(self, [1], md._datasets[1].requested_column_indexes) six.assertCountEqual(self, [], md._datasets[2].requested_column_indexes) def test_merged_dataset_column_names(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) expected = [six.u('Date'), six.u('NSE/OIL - column.1'), six.u('NSE/OIL - column.2'), six.u('WIKI/AAPL - column.1'), six.u('WIKI/MSFT - column.1'), six.u('WIKI/MSFT - column.2'), six.u('WIKI/MSFT - column.3')] six.assertCountEqual(self, md.column_names, expected) def test_merged_dataset_oldest_available_date(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) self.assertEqual(md.oldest_available_date, datetime.date(2013, 1, 1)) def test_merged_dataset_newest_available_date(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) self.assertEqual(md.newest_available_date, datetime.date(2015, 7, 30)) def test_merged_dataset_database_codes(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) six.assertCountEqual(self, md.database_code, ['NSE', 'WIKI']) def test_merged_dataset_dataset_codes(self): md = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]) six.assertCountEqual(self, md.dataset_code, ['OIL', 'AAPL', 'MSFT']) def test_get_returns_merged_dataset_obj(self): md = MergedDataset(['NSE/OIL']) self.assertIsInstance(md, MergedDataset) def test_raise_error_when_datasets_arg_not_list(self): self.assertRaises(ValueError, lambda: MergedDataset('NSE/OIL').data_fields()) def test_raise_error_when_datasets_arg_list_has_invalid_type(self): self.assertRaises( ValueError, lambda: MergedDataset(['NSE/OIL', {'blah': [1]}]).data_fields()) def test_raise_error_when_column_index_specified_and_not_list(self): self.assertRaises(ValueError, lambda: MergedDataset( [('NSE/OIL', {'column_index': 'foo'})]).data_fields()) def test_raise_error_when_column_index_greater_than_max(self): self.assertRaises( ColumnNotFound, lambda: MergedDataset([('NSE/OIL', {'column_index': [1, 10]})]).data()) def test_raise_error_when_column_index_less_than_one(self): self.assertRaises( ColumnNotFound, lambda: MergedDataset([('NSE/OIL', {'column_index': [0, 1]})]).data()) @patch.object(Dataset, 'data') def test_when_only_one_column_requested_adds_column_index_query_param(self, mock_method): mock_method.return_value = self.data_list_obj MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]).data(params={'start_date': '2015-07-01'}) expected_calls = [call(params={'start_date': '2015-07-01'}), call(params={'column_index': 1, 'start_date': '2015-07-01'}), call(params={'start_date': '2015-07-01'})] self.assertEqual(mock_method.mock_calls[0], expected_calls[0]) self.assertEqual(mock_method.mock_calls[1], expected_calls[1]) self.assertEqual(mock_method.mock_calls[2], expected_calls[2]) @patch.object(Dataset, 'data') def test_data_forwards_requests_to_datset_data(self, mock_method): mock_method.return_value = self.data_list_obj MergedDataset( ['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT']).data(params={'start_date': '2015-07-01'}) self.assertEqual(mock_method.call_count, 3) for actual in mock_method.mock_calls: self.assertEqual(actual, call(params={'start_date': '2015-07-01'})) def test_get_merged_dataset_data_returns_correct_types(self): data = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]).data() self.assertIsInstance(data, MergedDataList) self.assertIsInstance(data[0], Data) def test_get_merged_dataset_creates_merged_pandas_dataframe(self): data = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}), ('WIKI/MSFT')]).data() self.assertIsInstance(data.to_pandas(), pandas.core.frame.DataFrame) def test_get_merged_dataset_data_returns_specified_columns(self): data = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {'column_index': [1]}), ('WIKI/MSFT')]).data() actual = data.to_pandas().columns.tolist() expected = [six.u('NSE/OIL - column.1'), six.u('NSE/OIL - column.2'), six.u('SINGLE/COLUMN - column.1'), six.u('WIKI/MSFT - column.1'), six.u('WIKI/MSFT - column.2'), six.u('WIKI/MSFT - column.3')] six.assertCountEqual(self, actual, expected) def test_get_merged_dataset_data_to_list(self): data = MergedDataset( [('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {'column_index': [1]}), 'WIKI/MSFT']).data() results = data.to_list() # NSE/OIL two columns of data # SINGLE/COLUMN one column of data # WIKI/MSFT all 3 columns of data expected = [[datetime.datetime(2015, 7, 11, 0, 0), 444.3, 10, 444.3, 444.3, 10, 3], [datetime.datetime(2015, 7, 13, 0, 0), 433.3, 4, 433.3, 433.3, 4, 3], [datetime.datetime(2015, 7, 14, 0, 0), 437.5, 3, 437.5, 437.5, 3, 3], [datetime.datetime(2015, 7, 15, 0, 0), 440.0, 2, 440.0, 440.0, 2, 3]] for index, expected_item in enumerate(expected): six.assertCountEqual(self, expected_item, results[index]) def test_get_merged_dataset_data_is_descending_when_specified_in_params(self): data = MergedDataset(['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT']).data(params={'order': 'desc'}) results = data.to_list() dates = list([x[0] for x in results]) self.assertTrue(all(dates[i] >= dates[i + 1] for i in range(len(dates) - 1)))
[ "mock.patch.object", "six.assertCountEqual", "test.helpers.httpretty_extension.httpretty.disable", "six.u", "test.helpers.merged_datasets_helper.setupDatasetsTest", "mock.call", "datetime.date", "mock.patch", "datetime.datetime", "test.helpers.httpretty_extension.httpretty.reset", "quandl.model.merged_dataset.MergedDataset" ]
[((685, 757), 'mock.patch', 'patch', (['"""quandl.model.merged_dataset.MergedDataset._build_dataset_object"""'], {}), "('quandl.model.merged_dataset.MergedDataset._build_dataset_object')\n", (690, 757), False, 'from mock import patch, call\n'), ((1411, 1483), 'mock.patch', 'patch', (['"""quandl.model.merged_dataset.MergedDataset._build_dataset_object"""'], {}), "('quandl.model.merged_dataset.MergedDataset._build_dataset_object')\n", (1416, 1483), False, 'from mock import patch, call\n'), ((5709, 5738), 'mock.patch.object', 'patch.object', (['Dataset', '"""data"""'], {}), "(Dataset, 'data')\n", (5721, 5738), False, 'from mock import patch, call\n'), ((6529, 6558), 'mock.patch.object', 'patch.object', (['Dataset', '"""data"""'], {}), "(Dataset, 'data')\n", (6541, 6558), False, 'from mock import patch, call\n'), ((544, 578), 'test.helpers.merged_datasets_helper.setupDatasetsTest', 'setupDatasetsTest', (['self', 'httpretty'], {}), '(self, httpretty)\n', (561, 578), False, 'from test.helpers.merged_datasets_helper import setupDatasetsTest\n'), ((633, 652), 'test.helpers.httpretty_extension.httpretty.disable', 'httpretty.disable', ([], {}), '()\n', (650, 652), False, 'from test.helpers.httpretty_extension import httpretty\n'), ((661, 678), 'test.helpers.httpretty_extension.httpretty.reset', 'httpretty.reset', ([], {}), '()\n', (676, 678), False, 'from test.helpers.httpretty_extension import httpretty\n'), ((886, 996), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (899, 996), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((1648, 1735), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]})]"], {'params': "{'column_index': 1}"}), "([('NSE/OIL', {'column_index': [1, 2]})], params={\n 'column_index': 1})\n", (1661, 1735), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((1788, 1842), 'mock.call', 'call', (["('NSE/OIL', {'column_index': [1, 2]})"], {'params': '{}'}), "(('NSE/OIL', {'column_index': [1, 2]}), params={})\n", (1792, 1842), False, 'from mock import patch, call\n'), ((1964, 2074), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (1977, 2074), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((2164, 2238), 'six.assertCountEqual', 'six.assertCountEqual', (['self', '[1, 2]', "md.dataset_codes[0][1]['column_index']"], {}), "(self, [1, 2], md.dataset_codes[0][1]['column_index'])\n", (2184, 2238), False, 'import six\n'), ((2247, 2318), 'six.assertCountEqual', 'six.assertCountEqual', (['self', '[1]', "md.dataset_codes[1][1]['column_index']"], {}), "(self, [1], md.dataset_codes[1][1]['column_index'])\n", (2267, 2318), False, 'import six\n'), ((2441, 2551), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (2454, 2551), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((2621, 2697), 'six.assertCountEqual', 'six.assertCountEqual', (['self', '[1, 2]', 'md._datasets[0].requested_column_indexes'], {}), '(self, [1, 2], md._datasets[0].requested_column_indexes)\n', (2641, 2697), False, 'import six\n'), ((2706, 2779), 'six.assertCountEqual', 'six.assertCountEqual', (['self', '[1]', 'md._datasets[1].requested_column_indexes'], {}), '(self, [1], md._datasets[1].requested_column_indexes)\n', (2726, 2779), False, 'import six\n'), ((2788, 2860), 'six.assertCountEqual', 'six.assertCountEqual', (['self', '[]', 'md._datasets[2].requested_column_indexes'], {}), '(self, [], md._datasets[2].requested_column_indexes)\n', (2808, 2860), False, 'import six\n'), ((2923, 3033), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (2936, 3033), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((3395, 3448), 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'md.column_names', 'expected'], {}), '(self, md.column_names, expected)\n', (3415, 3448), False, 'import six\n'), ((3520, 3630), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (3533, 3630), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((3816, 3926), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (3829, 3926), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((4106, 4216), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (4119, 4216), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((4261, 4322), 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'md.database_code', "['NSE', 'WIKI']"], {}), "(self, md.database_code, ['NSE', 'WIKI'])\n", (4281, 4322), False, 'import six\n'), ((4386, 4496), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (4399, 4496), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((4541, 4609), 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'md.dataset_code', "['OIL', 'AAPL', 'MSFT']"], {}), "(self, md.dataset_code, ['OIL', 'AAPL', 'MSFT'])\n", (4561, 4609), False, 'import six\n'), ((4704, 4730), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["['NSE/OIL']"], {}), "(['NSE/OIL'])\n", (4717, 4730), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((8251, 8295), 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'actual', 'expected'], {}), '(self, actual, expected)\n', (8271, 8295), False, 'import six\n'), ((1098, 1141), 'mock.call', 'call', (["('NSE/OIL', {'column_index': [1, 2]})"], {}), "(('NSE/OIL', {'column_index': [1, 2]}))\n", (1102, 1141), False, 'from mock import patch, call\n'), ((1155, 1197), 'mock.call', 'call', (["('WIKI/AAPL', {'column_index': [1]})"], {}), "(('WIKI/AAPL', {'column_index': [1]}))\n", (1159, 1197), False, 'from mock import patch, call\n'), ((1211, 1228), 'mock.call', 'call', (['"""WIKI/MSFT"""'], {}), "('WIKI/MSFT')\n", (1215, 1228), False, 'from mock import patch, call\n'), ((3090, 3103), 'six.u', 'six.u', (['"""Date"""'], {}), "('Date')\n", (3095, 3103), False, 'import six\n'), ((3105, 3132), 'six.u', 'six.u', (['"""NSE/OIL - column.1"""'], {}), "('NSE/OIL - column.1')\n", (3110, 3132), False, 'import six\n'), ((3154, 3181), 'six.u', 'six.u', (['"""NSE/OIL - column.2"""'], {}), "('NSE/OIL - column.2')\n", (3159, 3181), False, 'import six\n'), ((3203, 3232), 'six.u', 'six.u', (['"""WIKI/AAPL - column.1"""'], {}), "('WIKI/AAPL - column.1')\n", (3208, 3232), False, 'import six\n'), ((3254, 3283), 'six.u', 'six.u', (['"""WIKI/MSFT - column.1"""'], {}), "('WIKI/MSFT - column.1')\n", (3259, 3283), False, 'import six\n'), ((3305, 3334), 'six.u', 'six.u', (['"""WIKI/MSFT - column.2"""'], {}), "('WIKI/MSFT - column.2')\n", (3310, 3334), False, 'import six\n'), ((3356, 3385), 'six.u', 'six.u', (['"""WIKI/MSFT - column.3"""'], {}), "('WIKI/MSFT - column.3')\n", (3361, 3385), False, 'import six\n'), ((3718, 3743), 'datetime.date', 'datetime.date', (['(2013)', '(1)', '(1)'], {}), '(2013, 1, 1)\n', (3731, 3743), False, 'import datetime\n'), ((4014, 4040), 'datetime.date', 'datetime.date', (['(2015)', '(7)', '(30)'], {}), '(2015, 7, 30)\n', (4027, 4040), False, 'import datetime\n'), ((6110, 6151), 'mock.call', 'call', ([], {'params': "{'start_date': '2015-07-01'}"}), "(params={'start_date': '2015-07-01'})\n", (6114, 6151), False, 'from mock import patch, call\n'), ((6179, 6239), 'mock.call', 'call', ([], {'params': "{'column_index': 1, 'start_date': '2015-07-01'}"}), "(params={'column_index': 1, 'start_date': '2015-07-01'})\n", (6183, 6239), False, 'from mock import patch, call\n'), ((6267, 6308), 'mock.call', 'call', ([], {'params': "{'start_date': '2015-07-01'}"}), "(params={'start_date': '2015-07-01'})\n", (6271, 6308), False, 'from mock import patch, call\n'), ((7957, 7984), 'six.u', 'six.u', (['"""NSE/OIL - column.1"""'], {}), "('NSE/OIL - column.1')\n", (7962, 7984), False, 'import six\n'), ((8006, 8033), 'six.u', 'six.u', (['"""NSE/OIL - column.2"""'], {}), "('NSE/OIL - column.2')\n", (8011, 8033), False, 'import six\n'), ((8055, 8088), 'six.u', 'six.u', (['"""SINGLE/COLUMN - column.1"""'], {}), "('SINGLE/COLUMN - column.1')\n", (8060, 8088), False, 'import six\n'), ((8110, 8139), 'six.u', 'six.u', (['"""WIKI/MSFT - column.1"""'], {}), "('WIKI/MSFT - column.1')\n", (8115, 8139), False, 'import six\n'), ((8161, 8190), 'six.u', 'six.u', (['"""WIKI/MSFT - column.2"""'], {}), "('WIKI/MSFT - column.2')\n", (8166, 8190), False, 'import six\n'), ((8212, 8241), 'six.u', 'six.u', (['"""WIKI/MSFT - column.3"""'], {}), "('WIKI/MSFT - column.3')\n", (8217, 8241), False, 'import six\n'), ((9107, 9164), 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'expected_item', 'results[index]'], {}), '(self, expected_item, results[index])\n', (9127, 9164), False, 'import six\n'), ((5895, 6005), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (5908, 6005), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((6692, 6744), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT']"], {}), "(['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT'])\n", (6705, 6744), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((6948, 6989), 'mock.call', 'call', ([], {'params': "{'start_date': '2015-07-01'}"}), "(params={'start_date': '2015-07-01'})\n", (6952, 6989), False, 'from mock import patch, call\n'), ((7073, 7183), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (7086, 7183), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((7411, 7521), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {'column_index': [1]}\n ), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('WIKI/AAPL', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (7424, 7521), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((7728, 7842), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {'column_index':\n [1]}), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (7741, 7842), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((8364, 8478), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {'column_index':\n [1]}), 'WIKI/MSFT']"], {}), "([('NSE/OIL', {'column_index': [1, 2]}), ('SINGLE/COLUMN', {\n 'column_index': [1]}), 'WIKI/MSFT'])\n", (8377, 8478), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((8697, 8733), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(11)', '(0)', '(0)'], {}), '(2015, 7, 11, 0, 0)\n', (8714, 8733), False, 'import datetime\n'), ((8789, 8825), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(13)', '(0)', '(0)'], {}), '(2015, 7, 13, 0, 0)\n', (8806, 8825), False, 'import datetime\n'), ((8879, 8915), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(14)', '(0)', '(0)'], {}), '(2015, 7, 14, 0, 0)\n', (8896, 8915), False, 'import datetime\n'), ((8969, 9005), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(7)', '(15)', '(0)', '(0)'], {}), '(2015, 7, 15, 0, 0)\n', (8986, 9005), False, 'import datetime\n'), ((9264, 9316), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT']"], {}), "(['NSE/OIL', 'WIKI/AAPL', 'WIKI/MSFT'])\n", (9277, 9316), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((4886, 4910), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (['"""NSE/OIL"""'], {}), "('NSE/OIL')\n", (4899, 4910), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((5058, 5099), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["['NSE/OIL', {'blah': [1]}]"], {}), "(['NSE/OIL', {'blah': [1]}])\n", (5071, 5099), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((5235, 5288), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': 'foo'})]"], {}), "([('NSE/OIL', {'column_index': 'foo'})])\n", (5248, 5288), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((5448, 5503), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [1, 10]})]"], {}), "([('NSE/OIL', {'column_index': [1, 10]})])\n", (5461, 5503), False, 'from quandl.model.merged_dataset import MergedDataset\n'), ((5640, 5694), 'quandl.model.merged_dataset.MergedDataset', 'MergedDataset', (["[('NSE/OIL', {'column_index': [0, 1]})]"], {}), "([('NSE/OIL', {'column_index': [0, 1]})])\n", (5653, 5694), False, 'from quandl.model.merged_dataset import MergedDataset\n')]
from simulator.car import Car from simulator.map import Map from simulator.source import SourceNode from simulator.street import Street class Simulator: def __init__(self, intersections, adjacencyMatrix, trafficLightsByIntersectionId, sourceNodesRaw): streets = [] streetIdsByIntersectionId = {} for (i, adjacents) in enumerate(adjacencyMatrix): for adjacent in adjacents: streets.append(Street(intersections[i], i, intersections[adjacent], adjacent)) if i not in streetIdsByIntersectionId.keys(): streetIdsByIntersectionId[i] = [] streetIdsByIntersectionId[i].append(len(streets)-1) sourceNodes = [SourceNode(pos, probability, streetId, self) for (pos, probability, streetId) in sourceNodesRaw] self.cars = [] self.map = Map(streets, intersections, streetIdsByIntersectionId, trafficLightsByIntersectionId, sourceNodes) def createCar(self, thinker, streetId): self.cars.append(Car(thinker, streetId, self.map.streets[streetId].start, self.map)) def createCarInPosition(self, thinker, streetId, position): self.cars.append(Car(thinker, streetId, position, self.map)) def step(self, deltaTime): for car in self.cars: car.step(deltaTime) for trafficLight in self.map.trafficLightsByIntersectionId.values(): trafficLight.step(deltaTime) for sourceNode in self.map.sourceNodes: sourceNode.step(deltaTime) def getCarPositions(self): for car in self.cars: yield car.pos
[ "simulator.car.Car", "simulator.map.Map", "simulator.street.Street", "simulator.source.SourceNode" ]
[((859, 961), 'simulator.map.Map', 'Map', (['streets', 'intersections', 'streetIdsByIntersectionId', 'trafficLightsByIntersectionId', 'sourceNodes'], {}), '(streets, intersections, streetIdsByIntersectionId,\n trafficLightsByIntersectionId, sourceNodes)\n', (862, 961), False, 'from simulator.map import Map\n'), ((719, 763), 'simulator.source.SourceNode', 'SourceNode', (['pos', 'probability', 'streetId', 'self'], {}), '(pos, probability, streetId, self)\n', (729, 763), False, 'from simulator.source import SourceNode\n'), ((1028, 1094), 'simulator.car.Car', 'Car', (['thinker', 'streetId', 'self.map.streets[streetId].start', 'self.map'], {}), '(thinker, streetId, self.map.streets[streetId].start, self.map)\n', (1031, 1094), False, 'from simulator.car import Car\n'), ((1186, 1228), 'simulator.car.Car', 'Car', (['thinker', 'streetId', 'position', 'self.map'], {}), '(thinker, streetId, position, self.map)\n', (1189, 1228), False, 'from simulator.car import Car\n'), ((447, 509), 'simulator.street.Street', 'Street', (['intersections[i]', 'i', 'intersections[adjacent]', 'adjacent'], {}), '(intersections[i], i, intersections[adjacent], adjacent)\n', (453, 509), False, 'from simulator.street import Street\n')]
from marshmallow import Schema, fields, post_load, EXCLUDE from ..resource import Resource from collections import namedtuple class PlanGroupPlans(Resource): """ https://dev.chartmogul.com/v1.0/reference#plan_groups """ _path = "/plan_groups{/uuid}/plans" _root_key = 'plans' _many = namedtuple('PlanGroupPlans', [_root_key, "current_page", "total_pages"]) class _Schema(Schema): uuid = fields.String() data_source_uuid = fields.String() name = fields.String() interval_count = fields.Int() interval_unit = fields.String() external_id = fields.String() @post_load def make(self, data, **kwargs): return PlanGroupPlans(**data) _schema = _Schema(unknown=EXCLUDE)
[ "marshmallow.fields.Int", "collections.namedtuple", "marshmallow.fields.String" ]
[((310, 382), 'collections.namedtuple', 'namedtuple', (['"""PlanGroupPlans"""', "[_root_key, 'current_page', 'total_pages']"], {}), "('PlanGroupPlans', [_root_key, 'current_page', 'total_pages'])\n", (320, 382), False, 'from collections import namedtuple\n'), ((426, 441), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (439, 441), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n'), ((469, 484), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (482, 484), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n'), ((500, 515), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (513, 515), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n'), ((541, 553), 'marshmallow.fields.Int', 'fields.Int', ([], {}), '()\n', (551, 553), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n'), ((578, 593), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (591, 593), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n'), ((616, 631), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (629, 631), False, 'from marshmallow import Schema, fields, post_load, EXCLUDE\n')]
# -*- coding: utf-8 -*- """ ========================== Flask MySQL initialisation ========================== PROGRAM BY <NAME>, 2019 Coding Rules: - Snake case for variables. - Only argument is configuration file. - No output or print, just log and files. """ from flask_mysqldb import MySQL from flask import current_app as application try: mysql = MySQL(application) except RuntimeError: pass # RUNTIME ERROR: WORKING OUTSIDE OF APPLICATION CONTEXT. LIKE EXECUTING SPHINX
[ "flask_mysqldb.MySQL" ]
[((357, 375), 'flask_mysqldb.MySQL', 'MySQL', (['application'], {}), '(application)\n', (362, 375), False, 'from flask_mysqldb import MySQL\n')]
# -*- coding: utf-8 -*- import utils from tqdm import tqdm from collections import defaultdict import src.news_kg.util as news_kg_util import src.wikidata.query as wiki_query from qwikidata.linked_data_interface import get_entity_dict_from_api from typing import List, Tuple def retrieve_wikidata_neighbors(entities: List[str]) -> List[Tuple[str, str, str]]: """ Retrieves all Wikidata neighbors for the given Wikidata entities set. Args: entities (:obj:`List[str]`): List of entities for which neighbors should be retrieved. Returns: :obj:`List[Tuple[str, str, str]]`: Wikidata triples. """ utils.get_logger().debug(f'Wikidata: Retrieving neighbors from Wikidata for {len(entities)} entities.') # Load or create the map of Wikidata entities to neighbor entities if utils.load_cache('wiki_neighbors_map') is None: wiki_neighbors_map = initialize_wiki_neighbors_map() else: wiki_neighbors_map = utils.load_cache('wiki_neighbors_map') # Load map of Wikidata attributes wiki_attributes_map = utils.load_cache('wiki_attributes_map') # Retrieve neighbors triples = set() steps = 0 for entity in tqdm(entities): steps += 1 if entity in wiki_neighbors_map.keys(): # If entity is already in the map directly retrieve its stored neighbors triples.update(wiki_neighbors_map[entity]) else: # Entity is not in the map, so query Wikidata for its neighbors try: data = get_entity_dict_from_api(entity.split('/')[-1]) except Exception: # Catches errors thrown for entities that no longer appear in Wikidata continue claims = data['claims'] properties = [prop for prop in data['claims']] for prop in properties: for item in claims[prop]: if (('datavalue' in item['mainsnak']) and (type(item['mainsnak']['datavalue']['value'])==dict) and ('id' in item['mainsnak']['datavalue']['value'])): triple = (entity, news_kg_util.pid2wikidata_property(prop), news_kg_util.qid2wikidata_resource(item['mainsnak']['datavalue']['value']['id'])) triples.add(triple) wiki_neighbors_map[entity].add(triple) # Update the Wiki attributes map if not entity in wiki_attributes_map.keys(): label, aliases = wiki_query.get_entity_attributes(data) wiki_attributes_map[entity]['label'] = label wiki_attributes_map[entity]['aliases'] = aliases # Cache Wiki maps ever 1000 steps if steps % 1000 == 0: utils.update_cache('wiki_neighbors_map', wiki_neighbors_map) utils.update_cache('wiki_attributes_map', wiki_attributes_map) utils.get_logger().debug(f'Retrieved {len(triples)} neighbors.') utils.get_logger().debug(f'Size of Wiki attributes map: {len(wiki_attributes_map)}.') # Update caches of Wiki map utils.update_cache('wiki_neighbors_map', wiki_neighbors_map) utils.update_cache('wiki_attributes_map', wiki_attributes_map) return list(triples) def initialize_wiki_neighbors_map(): initializer = lambda: defaultdict(set) return utils.load_or_create_cache('wiki_neighbors_map', initializer)
[ "tqdm.tqdm", "utils.update_cache", "utils.get_logger", "src.wikidata.query.get_entity_attributes", "utils.load_or_create_cache", "src.news_kg.util.pid2wikidata_property", "collections.defaultdict", "src.news_kg.util.qid2wikidata_resource", "utils.load_cache" ]
[((1102, 1141), 'utils.load_cache', 'utils.load_cache', (['"""wiki_attributes_map"""'], {}), "('wiki_attributes_map')\n", (1118, 1141), False, 'import utils\n'), ((1220, 1234), 'tqdm.tqdm', 'tqdm', (['entities'], {}), '(entities)\n', (1224, 1234), False, 'from tqdm import tqdm\n'), ((3112, 3172), 'utils.update_cache', 'utils.update_cache', (['"""wiki_neighbors_map"""', 'wiki_neighbors_map'], {}), "('wiki_neighbors_map', wiki_neighbors_map)\n", (3130, 3172), False, 'import utils\n'), ((3177, 3239), 'utils.update_cache', 'utils.update_cache', (['"""wiki_attributes_map"""', 'wiki_attributes_map'], {}), "('wiki_attributes_map', wiki_attributes_map)\n", (3195, 3239), False, 'import utils\n'), ((3359, 3420), 'utils.load_or_create_cache', 'utils.load_or_create_cache', (['"""wiki_neighbors_map"""', 'initializer'], {}), "('wiki_neighbors_map', initializer)\n", (3385, 3420), False, 'import utils\n'), ((850, 888), 'utils.load_cache', 'utils.load_cache', (['"""wiki_neighbors_map"""'], {}), "('wiki_neighbors_map')\n", (866, 888), False, 'import utils\n'), ((998, 1036), 'utils.load_cache', 'utils.load_cache', (['"""wiki_neighbors_map"""'], {}), "('wiki_neighbors_map')\n", (1014, 1036), False, 'import utils\n'), ((3331, 3347), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3342, 3347), False, 'from collections import defaultdict\n'), ((667, 685), 'utils.get_logger', 'utils.get_logger', ([], {}), '()\n', (683, 685), False, 'import utils\n'), ((2779, 2839), 'utils.update_cache', 'utils.update_cache', (['"""wiki_neighbors_map"""', 'wiki_neighbors_map'], {}), "('wiki_neighbors_map', wiki_neighbors_map)\n", (2797, 2839), False, 'import utils\n'), ((2852, 2914), 'utils.update_cache', 'utils.update_cache', (['"""wiki_attributes_map"""', 'wiki_attributes_map'], {}), "('wiki_attributes_map', wiki_attributes_map)\n", (2870, 2914), False, 'import utils\n'), ((2920, 2938), 'utils.get_logger', 'utils.get_logger', ([], {}), '()\n', (2936, 2938), False, 'import utils\n'), ((2989, 3007), 'utils.get_logger', 'utils.get_logger', ([], {}), '()\n', (3005, 3007), False, 'import utils\n'), ((2529, 2567), 'src.wikidata.query.get_entity_attributes', 'wiki_query.get_entity_attributes', (['data'], {}), '(data)\n', (2561, 2567), True, 'import src.wikidata.query as wiki_query\n'), ((2150, 2190), 'src.news_kg.util.pid2wikidata_property', 'news_kg_util.pid2wikidata_property', (['prop'], {}), '(prop)\n', (2184, 2190), True, 'import src.news_kg.util as news_kg_util\n'), ((2192, 2277), 'src.news_kg.util.qid2wikidata_resource', 'news_kg_util.qid2wikidata_resource', (["item['mainsnak']['datavalue']['value']['id']"], {}), "(item['mainsnak']['datavalue']['value']['id']\n )\n", (2226, 2277), True, 'import src.news_kg.util as news_kg_util\n')]
from torch.distributed.distributed_c10d import is_initialized from torch.utils.data import Dataset, DistributedSampler def get_ddp_sampler(dataset: Dataset, epoch: int): """ This function will create a DistributedSampler if DDP is initialized, and will just return None if DDP is not initialized. """ if is_initialized(): sampler = DistributedSampler(dataset) sampler.set_epoch(epoch) else: sampler = None return sampler
[ "torch.distributed.distributed_c10d.is_initialized", "torch.utils.data.DistributedSampler" ]
[((325, 341), 'torch.distributed.distributed_c10d.is_initialized', 'is_initialized', ([], {}), '()\n', (339, 341), False, 'from torch.distributed.distributed_c10d import is_initialized\n'), ((361, 388), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (379, 388), False, 'from torch.utils.data import Dataset, DistributedSampler\n')]
# License: MIT # ref: https://github.com/thomas-young-2013/open-box/blob/master/openbox/surrogate/skrf.py import logging import typing import numpy as np from typing import List, Optional, Tuple, Union from xbbo.surrogate.base import BaseRF from xbbo.configspace.space import DenseConfigurationSpace from xbbo.utils.constants import MAXINT from xbbo.utils.util import get_types logger = logging.getLogger(__name__) class RandomForestSurrogate(BaseRF): def __init__( self, configspace: DenseConfigurationSpace, ensemble_size: int = 10, normalize_y: bool = True, instance_features: typing.Optional[np.ndarray] = None, pca_components: typing.Optional[int] = None, rng: np.random.RandomState = np.random.RandomState(42),types=None,bounds=None, **kwargs ): self.model_config = dict() self.model_config["n_estimators"] = 10 self.model_config["criterion"] = "mse" self.model_config["max_depth"] = 12 self.model_config["min_samples_split"] = 3 self.model_config["min_samples_leaf"] = 3 self.model_config["min_weight_fraction_leaf"] = 0. self.model_config["max_features"] = 5. / 6. self.model_config["max_leaf_nodes"] = None self.model_config["n_jobs"] = -1 # self.model_config["random_state"] = -1 # self.model_config["max_samples"] = 1. self.ensemble_size = ensemble_size self.models = list() self.configspace = configspace self.rng = rng self.random_seeds = self.rng.randint(low=1, high=MAXINT, size=self.ensemble_size) if types is None or bounds is None: types, bounds = get_types(configspace) super().__init__( configspace=configspace, types=types, bounds=bounds, instance_features=instance_features, pca_components=pca_components, **kwargs ) self.normalize_y = normalize_y self.is_trained = False def _train(self, X: np.ndarray, y: np.ndarray, **kwargs): """ Train a Random Forest Regression model on X and y Parameters ---------- X: np.ndarray (N, D) Input data points. The dimensionality of X is (N, D), with N as the number of points and D is the number of features. y: np.ndarray (N,) The corresponding target values. """ from sklearn.ensemble import RandomForestRegressor X = self._impute_inactive(X) if self.normalize_y: y = self._normalize_y(y) self.models = list() for i in range(self.ensemble_size): configs = self.model_config.copy() configs["random_state"] = self.random_seeds[i] rf_model = RandomForestRegressor(**configs) rf_model.fit(X, y) self.models.append(rf_model) self.is_trained = True return self def _predict(self, X_test: np.ndarray, **kwargs): r""" Returns the predictive mean and variance of the objective function Parameters ---------- X_test: np.ndarray (N, D) Input test points Returns ---------- np.array(N,) predictive mean np.array(N,) predictive variance """ if not self.is_trained: raise Exception('Model has to be trained first!') X_test = self._impute_inactive(X_test) predictions = list() for i, model in enumerate(self.models): pred = model.predict(X_test) # print(i) predictions.append(pred) m = np.mean(predictions, axis=0) v = np.var(predictions, axis=0) # Clip negative variances and set them to the smallest # positive float value if v.shape[0] == 1: v = np.clip(v, np.finfo(v.dtype).eps, np.inf) else: v = np.clip(v, np.finfo(v.dtype).eps, np.inf) v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0 if self.normalize_y: m, v = self._untransform_y(m, v) return m, v def _normalize_y(self, y: np.ndarray) -> np.ndarray: """Normalize data to zero mean unit standard deviation. """ self.mean_y_ = np.mean(y) self.std_y_ = np.std(y) if self.std_y_ == 0: self.std_y_ = 1 return (y - self.mean_y_) / self.std_y_ def _untransform_y( self, y: np.ndarray, var: Optional[np.ndarray] = None, ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: """Transform zeromean unit standard deviation data into the regular space. """ y = y * self.std_y_ + self.mean_y_ if var is not None: var = var * self.std_y_ ** 2 return y, var return y def _impute_inactive(self, X: np.ndarray) -> np.ndarray: X = X.copy() X[~np.isfinite(X)] = -1 return X
[ "numpy.std", "numpy.isfinite", "numpy.random.RandomState", "xbbo.utils.util.get_types", "sklearn.ensemble.RandomForestRegressor", "numpy.finfo", "numpy.mean", "numpy.var", "logging.getLogger" ]
[((390, 417), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (407, 417), False, 'import logging\n'), ((784, 809), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (805, 809), True, 'import numpy as np\n'), ((3751, 3779), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (3758, 3779), True, 'import numpy as np\n'), ((3792, 3819), 'numpy.var', 'np.var', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (3798, 3819), True, 'import numpy as np\n'), ((4414, 4424), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4421, 4424), True, 'import numpy as np\n'), ((4447, 4456), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (4453, 4456), True, 'import numpy as np\n'), ((1738, 1760), 'xbbo.utils.util.get_types', 'get_types', (['configspace'], {}), '(configspace)\n', (1747, 1760), False, 'from xbbo.utils.util import get_types\n'), ((2863, 2895), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '(**configs)\n', (2884, 2895), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((5081, 5095), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (5092, 5095), True, 'import numpy as np\n'), ((3970, 3987), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (3978, 3987), True, 'import numpy as np\n'), ((4042, 4059), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (4050, 4059), True, 'import numpy as np\n'), ((4101, 4118), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (4109, 4118), True, 'import numpy as np\n'), ((4132, 4149), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (4140, 4149), True, 'import numpy as np\n')]
# Copyright (c) 2006, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Utility functions for PDF library. """ import sys from binascii import hexlify try: import __builtin__ as builtins except ImportError: # Py3 import builtins __author__ = "<NAME>" __author_email__ = "<EMAIL>" xrange_fn = getattr(builtins, "xrange", range) _basestring = getattr(builtins, "basestring", str) bytes_type = type(bytes()) # Works the same in Python 2.X and 3.X string_type = getattr(builtins, "unicode", str) int_types = (int, long) if sys.version_info[0] < 3 else (int,) # Make basic type tests more consistent def isString(s): """Test if arg is a string. Compatible with Python 2 and 3.""" return isinstance(s, _basestring) def isInt(n): """Test if arg is an int. Compatible with Python 2 and 3.""" return isinstance(n, int_types) def isBytes(b): """Test if arg is a bytes instance. Compatible with Python 2 and 3.""" return isinstance(b, bytes_type) #custom implementation of warnings.formatwarning def formatWarning(message, category, filename, lineno, line=None): file = filename.replace("/", "\\").rsplit("\\", 1)[1] # find the file name return "%s: %s [%s:%s]\n" % (category.__name__, message, file, lineno) def readUntilWhitespace(stream, maxchars=None): """ Reads non-whitespace characters and returns them. Stops upon encountering whitespace or when maxchars is reached. """ txt = pypdfBytes("") while True: tok = stream.read(1) if tok.isspace() or not tok: break txt += tok if len(txt) == maxchars: break return txt def readNonWhitespace(stream): """ Finds and reads the next non-whitespace character (ignores whitespace). :param stream: a file-like object. """ tok = WHITESPACES[0] while tok in WHITESPACES: tok = stream.read(1) return tok def skipOverWhitespace(stream): """ Similar to ``readNonWhitespace()``, but returns a Boolean if more than one whitespace character was read. :param stream: a file-like object. """ tok = WHITESPACES[0] cnt = 0 while tok in WHITESPACES: tok = stream.read(1) cnt+=1 return cnt > 1 def skipOverComment(stream): tok = stream.read(1) stream.seek(-1, 1) if tok == pypdfBytes('%'): while tok not in (pypdfBytes('\n'), pypdfBytes('\r')): tok = stream.read(1) def readUntilRegex(stream, regex, ignore_eof=False): """ Reads until the regular expression pattern matched (ignore the match) Raise PdfStreamError on premature end-of-file. :param bool ignore_eof: If true, ignore end-of-line and return immediately """ name = pypdfBytes('') while True: tok = stream.read(16) if not tok: # stream has truncated prematurely if ignore_eof == True: return name else: raise PdfStreamError("Stream has ended unexpectedly") m = regex.search(tok) if m is not None: name += tok[:m.start()] stream.seek(m.start()-len(tok), 1) break name += tok return name class ConvertFunctionsToVirtualList(object): def __init__(self, lengthFunction, getFunction): self.lengthFunction = lengthFunction self.getFunction = getFunction def __len__(self): return self.lengthFunction() def __getitem__(self, index): if isinstance(index, slice): indices = xrange_fn(*index.indices(len(self))) cls = type(self) return cls(indices.__len__, lambda idx: self[indices[idx]]) if not isInt(index): raise TypeError("sequence indices must be integers") len_self = len(self) if index < 0: # support negative indexes index = len_self + index if index < 0 or index >= len_self: raise IndexError("sequence index out of range") return self.getFunction(index) def RC4Encrypt(key, plaintext): S = [i for i in range(256)] j = 0 for i in range(256): j = (j + S[i] + pypdfOrd(key[i % len(key)])) % 256 S[i], S[j] = S[j], S[i] i, j = 0, 0 retval = [] for x in range(len(plaintext)): i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] t = S[(S[i] + S[j]) % 256] retval.append(pypdfBytes(chr(pypdfOrd(plaintext[x]) ^ t))) return pypdfBytes("").join(retval) def matrixMultiply(a, b): return [[sum([float(i) * float(j) for i, j in zip(row, col)] ) for col in zip(*b)] for row in a] def markLocation(stream): """Creates text file showing current location in context.""" # Mainly for debugging RADIUS = 5000 stream.seek(-RADIUS, 1) outputDoc = open('PyPDF4_pdfLocation.txt', 'w') outputDoc.write(stream.read(RADIUS)) outputDoc.write('HERE') outputDoc.write(stream.read(RADIUS)) outputDoc.close() stream.seek(-RADIUS, 1) class PyPdfError(Exception): pass class PdfReadError(PyPdfError): pass class PageSizeNotDefinedError(PyPdfError): pass class PdfReadWarning(UserWarning): pass class PdfStreamError(PdfReadError): pass if sys.version_info < (3, 0): pypdfBytes = lambda s: s else: def pypdfBytes(s): if isinstance(s, bytes): # In Python 2, bytes is str return s else: return s.encode('LATIN-1') pypdfBytes.__doc__ = """ Abstracts the conversion from ``str`` to ``bytes`` over versions 2.7.x and 3 of Python. """ def pypdfUnicode(s): """ Encodes a string ``s`` according to the Unicode character set (default for Python 3). :param s: a ``str`` instance. :rtype: ``unicode`` for Python 2, ``str`` for Python 3. """ if sys.version_info[0] < 3: return unicode(s, 'unicode_escape') else: return s def pypdfStr(b): """ Abstracts the conversion from bytes to string over versions 2.7.x and 3 of Python. """ if sys.version_info[0] < 3: return b else: if isinstance(b, bytes): return b.decode("LATIN1") else: return b def pypdfOrd(b): """ Abstracts the conversion from a single-character string to the corresponding integer value over versions 2.7.x and 3 of Python. """ # In case of bugs, try to look here! Should the condition be brought like # it used to be in the comment below? # if sys.version_info[0] < 3 or type(b) == str: # (``str is bytes == True`` in Python 2) if isinstance(b, str): return ord(b) elif sys.version_info < (3, 0) and isinstance(b, unicode): return ord(b) # TO-DO The code below should be changed (b could be ANYTHING!) but I have # no idea of what (and how much) previous code could be depending on this # behavior else: return b def pypdfChr(c): """ Abstracts the conversion from a single byte to the corresponding ASCII character over versions 2.7.x and 3 of Python. """ if sys.version_info[0] < 3: return c else: return chr(c) def pypdfBytearray(b): """ Abstracts the conversion from a ``bytes`` variable to a ``bytearray`` value over versions 2.7.x and 3 of Python. """ if sys.version_info[0] < 3: return b else: return bytearray(b) def hexEncode(s): """ Abstracts the conversion from a LATIN 1 string to an hex-valued string representation of the former over versions 2.7.x and 3 of Python. :param str s: a ``str`` to convert from LATIN 1 to an hexadecimal string representation. :return: a hex-valued string, e.g. ``hexEncode("$A'") == "244127"``. :rtype: str """ if sys.version_info < (3, 0): return s.encode('hex') else: if isinstance(s, str): s = s.encode("LATIN1") # The output is in the set of "0123456789ABCDEF" characters. Using the # ASCII decoder is a safeguard against anomalies, albeit unlikely return hexlify(s).decode("ASCII") def hexStr(num): return hex(num).replace('L', '') WHITESPACES = [pypdfBytes(x) for x in [' ', '\n', '\r', '\t', '\x00']] def paethPredictor(left, up, up_left): p = left + up - up_left dist_left = abs(p - left) dist_up = abs(p - up) dist_up_left = abs(p - up_left) if dist_left <= dist_up and dist_left <= dist_up_left: return left elif dist_up <= dist_up_left: return up else: return up_left def pairs(sequence): """ :param sequence: an indexable sequence value with ``__len__()``. :return: an iterable of paired values from ``sequence``. """ if (len(sequence) % 2) != 0: raise ValueError("sequence must contain an even number of elements") for i in range(0, len(sequence) - 1, 2): yield (sequence[i], sequence[i + 1])
[ "binascii.hexlify" ]
[((9608, 9618), 'binascii.hexlify', 'hexlify', (['s'], {}), '(s)\n', (9615, 9618), False, 'from binascii import hexlify\n')]
# coding: utf-8 """ Domains No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v3 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from hubspot.cms.domains.configuration import Configuration class Domain(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { "portal_id": "int", "id": "int", "created": "int", "updated": "int", "domain": "str", "primary_landing_page": "bool", "primary_email": "bool", "primary_blog": "bool", "primary_blog_post": "bool", "primary_site_page": "bool", "primary_knowledge": "bool", "primary_legacy_page": "bool", "primary_click_tracking": "bool", "full_category_key": "str", "secondary_to_domain": "str", "is_resolving": "bool", "is_dns_correct": "bool", "manually_marked_as_resolving": "bool", "consecutive_non_resolving_count": "int", "ssl_cname": "str", "is_ssl_enabled": "bool", "is_ssl_only": "bool", "certificate_id": "int", "ssl_request_id": "int", "is_used_for_blog_post": "bool", "is_used_for_site_page": "bool", "is_used_for_landing_page": "bool", "is_used_for_email": "bool", "is_used_for_knowledge": "bool", "setup_task_id": "int", "is_setup_complete": "bool", "set_up_language": "str", "team_ids": "list[int]", "actual_cname": "str", "correct_cname": "str", "actual_ip": "str", "apex_resolution_status": "str", "apex_domain": "str", "public_suffix": "str", "apex_ip_addresses": "list[str]", "site_id": "int", "brand_id": "int", "deletable": "bool", "domain_cdn_config": "DomainCdnConfig", "setup_info": "DomainSetupInfo", "derived_brand_name": "str", "created_by_id": "int", "updated_by_id": "int", "label": "str", "is_any_primary": "bool", "is_legacy_domain": "bool", "is_internal_domain": "bool", "is_resolving_internal_property": "bool", "is_resolving_ignoring_manually_marked_as_resolving": "bool", "is_used_for_any_content_type": "bool", "is_legacy": "bool", "author_at": "int", "cos_object_type": "str", "cdn_purge_embargo_time": "int", "is_staging_domain": "bool", } attribute_map = { "portal_id": "portalId", "id": "id", "created": "created", "updated": "updated", "domain": "domain", "primary_landing_page": "primaryLandingPage", "primary_email": "primaryEmail", "primary_blog": "primaryBlog", "primary_blog_post": "primaryBlogPost", "primary_site_page": "primarySitePage", "primary_knowledge": "primaryKnowledge", "primary_legacy_page": "primaryLegacyPage", "primary_click_tracking": "primaryClickTracking", "full_category_key": "fullCategoryKey", "secondary_to_domain": "secondaryToDomain", "is_resolving": "isResolving", "is_dns_correct": "isDnsCorrect", "manually_marked_as_resolving": "manuallyMarkedAsResolving", "consecutive_non_resolving_count": "consecutiveNonResolvingCount", "ssl_cname": "sslCname", "is_ssl_enabled": "isSslEnabled", "is_ssl_only": "isSslOnly", "certificate_id": "certificateId", "ssl_request_id": "sslRequestId", "is_used_for_blog_post": "isUsedForBlogPost", "is_used_for_site_page": "isUsedForSitePage", "is_used_for_landing_page": "isUsedForLandingPage", "is_used_for_email": "isUsedForEmail", "is_used_for_knowledge": "isUsedForKnowledge", "setup_task_id": "setupTaskId", "is_setup_complete": "isSetupComplete", "set_up_language": "setUpLanguage", "team_ids": "teamIds", "actual_cname": "actualCname", "correct_cname": "correctCname", "actual_ip": "actualIp", "apex_resolution_status": "apexResolutionStatus", "apex_domain": "apexDomain", "public_suffix": "publicSuffix", "apex_ip_addresses": "apexIpAddresses", "site_id": "siteId", "brand_id": "brandId", "deletable": "deletable", "domain_cdn_config": "domainCdnConfig", "setup_info": "setupInfo", "derived_brand_name": "derivedBrandName", "created_by_id": "createdById", "updated_by_id": "updatedById", "label": "label", "is_any_primary": "isAnyPrimary", "is_legacy_domain": "isLegacyDomain", "is_internal_domain": "isInternalDomain", "is_resolving_internal_property": "isResolvingInternalProperty", "is_resolving_ignoring_manually_marked_as_resolving": "isResolvingIgnoringManuallyMarkedAsResolving", "is_used_for_any_content_type": "isUsedForAnyContentType", "is_legacy": "isLegacy", "author_at": "authorAt", "cos_object_type": "cosObjectType", "cdn_purge_embargo_time": "cdnPurgeEmbargoTime", "is_staging_domain": "isStagingDomain", } def __init__( self, portal_id=None, id=None, created=None, updated=None, domain=None, primary_landing_page=None, primary_email=None, primary_blog=None, primary_blog_post=None, primary_site_page=None, primary_knowledge=None, primary_legacy_page=None, primary_click_tracking=None, full_category_key=None, secondary_to_domain=None, is_resolving=None, is_dns_correct=None, manually_marked_as_resolving=None, consecutive_non_resolving_count=None, ssl_cname=None, is_ssl_enabled=None, is_ssl_only=None, certificate_id=None, ssl_request_id=None, is_used_for_blog_post=None, is_used_for_site_page=None, is_used_for_landing_page=None, is_used_for_email=None, is_used_for_knowledge=None, setup_task_id=None, is_setup_complete=None, set_up_language=None, team_ids=None, actual_cname=None, correct_cname=None, actual_ip=None, apex_resolution_status=None, apex_domain=None, public_suffix=None, apex_ip_addresses=None, site_id=None, brand_id=None, deletable=None, domain_cdn_config=None, setup_info=None, derived_brand_name=None, created_by_id=None, updated_by_id=None, label=None, is_any_primary=None, is_legacy_domain=None, is_internal_domain=None, is_resolving_internal_property=None, is_resolving_ignoring_manually_marked_as_resolving=None, is_used_for_any_content_type=None, is_legacy=None, author_at=None, cos_object_type=None, cdn_purge_embargo_time=None, is_staging_domain=None, local_vars_configuration=None, ): # noqa: E501 """Domain - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._portal_id = None self._id = None self._created = None self._updated = None self._domain = None self._primary_landing_page = None self._primary_email = None self._primary_blog = None self._primary_blog_post = None self._primary_site_page = None self._primary_knowledge = None self._primary_legacy_page = None self._primary_click_tracking = None self._full_category_key = None self._secondary_to_domain = None self._is_resolving = None self._is_dns_correct = None self._manually_marked_as_resolving = None self._consecutive_non_resolving_count = None self._ssl_cname = None self._is_ssl_enabled = None self._is_ssl_only = None self._certificate_id = None self._ssl_request_id = None self._is_used_for_blog_post = None self._is_used_for_site_page = None self._is_used_for_landing_page = None self._is_used_for_email = None self._is_used_for_knowledge = None self._setup_task_id = None self._is_setup_complete = None self._set_up_language = None self._team_ids = None self._actual_cname = None self._correct_cname = None self._actual_ip = None self._apex_resolution_status = None self._apex_domain = None self._public_suffix = None self._apex_ip_addresses = None self._site_id = None self._brand_id = None self._deletable = None self._domain_cdn_config = None self._setup_info = None self._derived_brand_name = None self._created_by_id = None self._updated_by_id = None self._label = None self._is_any_primary = None self._is_legacy_domain = None self._is_internal_domain = None self._is_resolving_internal_property = None self._is_resolving_ignoring_manually_marked_as_resolving = None self._is_used_for_any_content_type = None self._is_legacy = None self._author_at = None self._cos_object_type = None self._cdn_purge_embargo_time = None self._is_staging_domain = None self.discriminator = None self.portal_id = portal_id self.id = id self.created = created self.updated = updated self.domain = domain self.primary_landing_page = primary_landing_page self.primary_email = primary_email self.primary_blog = primary_blog self.primary_blog_post = primary_blog_post self.primary_site_page = primary_site_page self.primary_knowledge = primary_knowledge self.primary_legacy_page = primary_legacy_page self.primary_click_tracking = primary_click_tracking self.full_category_key = full_category_key self.secondary_to_domain = secondary_to_domain self.is_resolving = is_resolving self.is_dns_correct = is_dns_correct self.manually_marked_as_resolving = manually_marked_as_resolving self.consecutive_non_resolving_count = consecutive_non_resolving_count self.ssl_cname = ssl_cname self.is_ssl_enabled = is_ssl_enabled self.is_ssl_only = is_ssl_only self.certificate_id = certificate_id self.ssl_request_id = ssl_request_id self.is_used_for_blog_post = is_used_for_blog_post self.is_used_for_site_page = is_used_for_site_page self.is_used_for_landing_page = is_used_for_landing_page self.is_used_for_email = is_used_for_email self.is_used_for_knowledge = is_used_for_knowledge self.setup_task_id = setup_task_id self.is_setup_complete = is_setup_complete self.set_up_language = set_up_language self.team_ids = team_ids self.actual_cname = actual_cname self.correct_cname = correct_cname self.actual_ip = actual_ip self.apex_resolution_status = apex_resolution_status self.apex_domain = apex_domain self.public_suffix = public_suffix self.apex_ip_addresses = apex_ip_addresses self.site_id = site_id self.brand_id = brand_id self.deletable = deletable self.domain_cdn_config = domain_cdn_config self.setup_info = setup_info self.derived_brand_name = derived_brand_name self.created_by_id = created_by_id self.updated_by_id = updated_by_id self.label = label self.is_any_primary = is_any_primary self.is_legacy_domain = is_legacy_domain self.is_internal_domain = is_internal_domain self.is_resolving_internal_property = is_resolving_internal_property self.is_resolving_ignoring_manually_marked_as_resolving = ( is_resolving_ignoring_manually_marked_as_resolving ) self.is_used_for_any_content_type = is_used_for_any_content_type self.is_legacy = is_legacy self.author_at = author_at self.cos_object_type = cos_object_type self.cdn_purge_embargo_time = cdn_purge_embargo_time self.is_staging_domain = is_staging_domain @property def portal_id(self): """Gets the portal_id of this Domain. # noqa: E501 :return: The portal_id of this Domain. # noqa: E501 :rtype: int """ return self._portal_id @portal_id.setter def portal_id(self, portal_id): """Sets the portal_id of this Domain. :param portal_id: The portal_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and portal_id is None ): # noqa: E501 raise ValueError( "Invalid value for `portal_id`, must not be `None`" ) # noqa: E501 self._portal_id = portal_id @property def id(self): """Gets the id of this Domain. # noqa: E501 :return: The id of this Domain. # noqa: E501 :rtype: int """ return self._id @id.setter def id(self, id): """Sets the id of this Domain. :param id: The id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and id is None ): # noqa: E501 raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501 self._id = id @property def created(self): """Gets the created of this Domain. # noqa: E501 :return: The created of this Domain. # noqa: E501 :rtype: int """ return self._created @created.setter def created(self, created): """Sets the created of this Domain. :param created: The created of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and created is None ): # noqa: E501 raise ValueError( "Invalid value for `created`, must not be `None`" ) # noqa: E501 self._created = created @property def updated(self): """Gets the updated of this Domain. # noqa: E501 :return: The updated of this Domain. # noqa: E501 :rtype: int """ return self._updated @updated.setter def updated(self, updated): """Sets the updated of this Domain. :param updated: The updated of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and updated is None ): # noqa: E501 raise ValueError( "Invalid value for `updated`, must not be `None`" ) # noqa: E501 self._updated = updated @property def domain(self): """Gets the domain of this Domain. # noqa: E501 :return: The domain of this Domain. # noqa: E501 :rtype: str """ return self._domain @domain.setter def domain(self, domain): """Sets the domain of this Domain. :param domain: The domain of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and domain is None ): # noqa: E501 raise ValueError( "Invalid value for `domain`, must not be `None`" ) # noqa: E501 self._domain = domain @property def primary_landing_page(self): """Gets the primary_landing_page of this Domain. # noqa: E501 :return: The primary_landing_page of this Domain. # noqa: E501 :rtype: bool """ return self._primary_landing_page @primary_landing_page.setter def primary_landing_page(self, primary_landing_page): """Sets the primary_landing_page of this Domain. :param primary_landing_page: The primary_landing_page of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_landing_page is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_landing_page`, must not be `None`" ) # noqa: E501 self._primary_landing_page = primary_landing_page @property def primary_email(self): """Gets the primary_email of this Domain. # noqa: E501 :return: The primary_email of this Domain. # noqa: E501 :rtype: bool """ return self._primary_email @primary_email.setter def primary_email(self, primary_email): """Sets the primary_email of this Domain. :param primary_email: The primary_email of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_email is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_email`, must not be `None`" ) # noqa: E501 self._primary_email = primary_email @property def primary_blog(self): """Gets the primary_blog of this Domain. # noqa: E501 :return: The primary_blog of this Domain. # noqa: E501 :rtype: bool """ return self._primary_blog @primary_blog.setter def primary_blog(self, primary_blog): """Sets the primary_blog of this Domain. :param primary_blog: The primary_blog of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_blog is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_blog`, must not be `None`" ) # noqa: E501 self._primary_blog = primary_blog @property def primary_blog_post(self): """Gets the primary_blog_post of this Domain. # noqa: E501 :return: The primary_blog_post of this Domain. # noqa: E501 :rtype: bool """ return self._primary_blog_post @primary_blog_post.setter def primary_blog_post(self, primary_blog_post): """Sets the primary_blog_post of this Domain. :param primary_blog_post: The primary_blog_post of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_blog_post is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_blog_post`, must not be `None`" ) # noqa: E501 self._primary_blog_post = primary_blog_post @property def primary_site_page(self): """Gets the primary_site_page of this Domain. # noqa: E501 :return: The primary_site_page of this Domain. # noqa: E501 :rtype: bool """ return self._primary_site_page @primary_site_page.setter def primary_site_page(self, primary_site_page): """Sets the primary_site_page of this Domain. :param primary_site_page: The primary_site_page of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_site_page is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_site_page`, must not be `None`" ) # noqa: E501 self._primary_site_page = primary_site_page @property def primary_knowledge(self): """Gets the primary_knowledge of this Domain. # noqa: E501 :return: The primary_knowledge of this Domain. # noqa: E501 :rtype: bool """ return self._primary_knowledge @primary_knowledge.setter def primary_knowledge(self, primary_knowledge): """Sets the primary_knowledge of this Domain. :param primary_knowledge: The primary_knowledge of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_knowledge is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_knowledge`, must not be `None`" ) # noqa: E501 self._primary_knowledge = primary_knowledge @property def primary_legacy_page(self): """Gets the primary_legacy_page of this Domain. # noqa: E501 :return: The primary_legacy_page of this Domain. # noqa: E501 :rtype: bool """ return self._primary_legacy_page @primary_legacy_page.setter def primary_legacy_page(self, primary_legacy_page): """Sets the primary_legacy_page of this Domain. :param primary_legacy_page: The primary_legacy_page of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_legacy_page is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_legacy_page`, must not be `None`" ) # noqa: E501 self._primary_legacy_page = primary_legacy_page @property def primary_click_tracking(self): """Gets the primary_click_tracking of this Domain. # noqa: E501 :return: The primary_click_tracking of this Domain. # noqa: E501 :rtype: bool """ return self._primary_click_tracking @primary_click_tracking.setter def primary_click_tracking(self, primary_click_tracking): """Sets the primary_click_tracking of this Domain. :param primary_click_tracking: The primary_click_tracking of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and primary_click_tracking is None ): # noqa: E501 raise ValueError( "Invalid value for `primary_click_tracking`, must not be `None`" ) # noqa: E501 self._primary_click_tracking = primary_click_tracking @property def full_category_key(self): """Gets the full_category_key of this Domain. # noqa: E501 :return: The full_category_key of this Domain. # noqa: E501 :rtype: str """ return self._full_category_key @full_category_key.setter def full_category_key(self, full_category_key): """Sets the full_category_key of this Domain. :param full_category_key: The full_category_key of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and full_category_key is None ): # noqa: E501 raise ValueError( "Invalid value for `full_category_key`, must not be `None`" ) # noqa: E501 self._full_category_key = full_category_key @property def secondary_to_domain(self): """Gets the secondary_to_domain of this Domain. # noqa: E501 :return: The secondary_to_domain of this Domain. # noqa: E501 :rtype: str """ return self._secondary_to_domain @secondary_to_domain.setter def secondary_to_domain(self, secondary_to_domain): """Sets the secondary_to_domain of this Domain. :param secondary_to_domain: The secondary_to_domain of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and secondary_to_domain is None ): # noqa: E501 raise ValueError( "Invalid value for `secondary_to_domain`, must not be `None`" ) # noqa: E501 self._secondary_to_domain = secondary_to_domain @property def is_resolving(self): """Gets the is_resolving of this Domain. # noqa: E501 :return: The is_resolving of this Domain. # noqa: E501 :rtype: bool """ return self._is_resolving @is_resolving.setter def is_resolving(self, is_resolving): """Sets the is_resolving of this Domain. :param is_resolving: The is_resolving of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_resolving is None ): # noqa: E501 raise ValueError( "Invalid value for `is_resolving`, must not be `None`" ) # noqa: E501 self._is_resolving = is_resolving @property def is_dns_correct(self): """Gets the is_dns_correct of this Domain. # noqa: E501 :return: The is_dns_correct of this Domain. # noqa: E501 :rtype: bool """ return self._is_dns_correct @is_dns_correct.setter def is_dns_correct(self, is_dns_correct): """Sets the is_dns_correct of this Domain. :param is_dns_correct: The is_dns_correct of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_dns_correct is None ): # noqa: E501 raise ValueError( "Invalid value for `is_dns_correct`, must not be `None`" ) # noqa: E501 self._is_dns_correct = is_dns_correct @property def manually_marked_as_resolving(self): """Gets the manually_marked_as_resolving of this Domain. # noqa: E501 :return: The manually_marked_as_resolving of this Domain. # noqa: E501 :rtype: bool """ return self._manually_marked_as_resolving @manually_marked_as_resolving.setter def manually_marked_as_resolving(self, manually_marked_as_resolving): """Sets the manually_marked_as_resolving of this Domain. :param manually_marked_as_resolving: The manually_marked_as_resolving of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and manually_marked_as_resolving is None ): # noqa: E501 raise ValueError( "Invalid value for `manually_marked_as_resolving`, must not be `None`" ) # noqa: E501 self._manually_marked_as_resolving = manually_marked_as_resolving @property def consecutive_non_resolving_count(self): """Gets the consecutive_non_resolving_count of this Domain. # noqa: E501 :return: The consecutive_non_resolving_count of this Domain. # noqa: E501 :rtype: int """ return self._consecutive_non_resolving_count @consecutive_non_resolving_count.setter def consecutive_non_resolving_count(self, consecutive_non_resolving_count): """Sets the consecutive_non_resolving_count of this Domain. :param consecutive_non_resolving_count: The consecutive_non_resolving_count of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and consecutive_non_resolving_count is None ): # noqa: E501 raise ValueError( "Invalid value for `consecutive_non_resolving_count`, must not be `None`" ) # noqa: E501 self._consecutive_non_resolving_count = consecutive_non_resolving_count @property def ssl_cname(self): """Gets the ssl_cname of this Domain. # noqa: E501 :return: The ssl_cname of this Domain. # noqa: E501 :rtype: str """ return self._ssl_cname @ssl_cname.setter def ssl_cname(self, ssl_cname): """Sets the ssl_cname of this Domain. :param ssl_cname: The ssl_cname of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and ssl_cname is None ): # noqa: E501 raise ValueError( "Invalid value for `ssl_cname`, must not be `None`" ) # noqa: E501 self._ssl_cname = ssl_cname @property def is_ssl_enabled(self): """Gets the is_ssl_enabled of this Domain. # noqa: E501 :return: The is_ssl_enabled of this Domain. # noqa: E501 :rtype: bool """ return self._is_ssl_enabled @is_ssl_enabled.setter def is_ssl_enabled(self, is_ssl_enabled): """Sets the is_ssl_enabled of this Domain. :param is_ssl_enabled: The is_ssl_enabled of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_ssl_enabled is None ): # noqa: E501 raise ValueError( "Invalid value for `is_ssl_enabled`, must not be `None`" ) # noqa: E501 self._is_ssl_enabled = is_ssl_enabled @property def is_ssl_only(self): """Gets the is_ssl_only of this Domain. # noqa: E501 :return: The is_ssl_only of this Domain. # noqa: E501 :rtype: bool """ return self._is_ssl_only @is_ssl_only.setter def is_ssl_only(self, is_ssl_only): """Sets the is_ssl_only of this Domain. :param is_ssl_only: The is_ssl_only of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_ssl_only is None ): # noqa: E501 raise ValueError( "Invalid value for `is_ssl_only`, must not be `None`" ) # noqa: E501 self._is_ssl_only = is_ssl_only @property def certificate_id(self): """Gets the certificate_id of this Domain. # noqa: E501 :return: The certificate_id of this Domain. # noqa: E501 :rtype: int """ return self._certificate_id @certificate_id.setter def certificate_id(self, certificate_id): """Sets the certificate_id of this Domain. :param certificate_id: The certificate_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and certificate_id is None ): # noqa: E501 raise ValueError( "Invalid value for `certificate_id`, must not be `None`" ) # noqa: E501 self._certificate_id = certificate_id @property def ssl_request_id(self): """Gets the ssl_request_id of this Domain. # noqa: E501 :return: The ssl_request_id of this Domain. # noqa: E501 :rtype: int """ return self._ssl_request_id @ssl_request_id.setter def ssl_request_id(self, ssl_request_id): """Sets the ssl_request_id of this Domain. :param ssl_request_id: The ssl_request_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and ssl_request_id is None ): # noqa: E501 raise ValueError( "Invalid value for `ssl_request_id`, must not be `None`" ) # noqa: E501 self._ssl_request_id = ssl_request_id @property def is_used_for_blog_post(self): """Gets the is_used_for_blog_post of this Domain. # noqa: E501 :return: The is_used_for_blog_post of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_blog_post @is_used_for_blog_post.setter def is_used_for_blog_post(self, is_used_for_blog_post): """Sets the is_used_for_blog_post of this Domain. :param is_used_for_blog_post: The is_used_for_blog_post of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_blog_post is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_blog_post`, must not be `None`" ) # noqa: E501 self._is_used_for_blog_post = is_used_for_blog_post @property def is_used_for_site_page(self): """Gets the is_used_for_site_page of this Domain. # noqa: E501 :return: The is_used_for_site_page of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_site_page @is_used_for_site_page.setter def is_used_for_site_page(self, is_used_for_site_page): """Sets the is_used_for_site_page of this Domain. :param is_used_for_site_page: The is_used_for_site_page of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_site_page is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_site_page`, must not be `None`" ) # noqa: E501 self._is_used_for_site_page = is_used_for_site_page @property def is_used_for_landing_page(self): """Gets the is_used_for_landing_page of this Domain. # noqa: E501 :return: The is_used_for_landing_page of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_landing_page @is_used_for_landing_page.setter def is_used_for_landing_page(self, is_used_for_landing_page): """Sets the is_used_for_landing_page of this Domain. :param is_used_for_landing_page: The is_used_for_landing_page of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_landing_page is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_landing_page`, must not be `None`" ) # noqa: E501 self._is_used_for_landing_page = is_used_for_landing_page @property def is_used_for_email(self): """Gets the is_used_for_email of this Domain. # noqa: E501 :return: The is_used_for_email of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_email @is_used_for_email.setter def is_used_for_email(self, is_used_for_email): """Sets the is_used_for_email of this Domain. :param is_used_for_email: The is_used_for_email of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_email is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_email`, must not be `None`" ) # noqa: E501 self._is_used_for_email = is_used_for_email @property def is_used_for_knowledge(self): """Gets the is_used_for_knowledge of this Domain. # noqa: E501 :return: The is_used_for_knowledge of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_knowledge @is_used_for_knowledge.setter def is_used_for_knowledge(self, is_used_for_knowledge): """Sets the is_used_for_knowledge of this Domain. :param is_used_for_knowledge: The is_used_for_knowledge of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_knowledge is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_knowledge`, must not be `None`" ) # noqa: E501 self._is_used_for_knowledge = is_used_for_knowledge @property def setup_task_id(self): """Gets the setup_task_id of this Domain. # noqa: E501 :return: The setup_task_id of this Domain. # noqa: E501 :rtype: int """ return self._setup_task_id @setup_task_id.setter def setup_task_id(self, setup_task_id): """Sets the setup_task_id of this Domain. :param setup_task_id: The setup_task_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and setup_task_id is None ): # noqa: E501 raise ValueError( "Invalid value for `setup_task_id`, must not be `None`" ) # noqa: E501 self._setup_task_id = setup_task_id @property def is_setup_complete(self): """Gets the is_setup_complete of this Domain. # noqa: E501 :return: The is_setup_complete of this Domain. # noqa: E501 :rtype: bool """ return self._is_setup_complete @is_setup_complete.setter def is_setup_complete(self, is_setup_complete): """Sets the is_setup_complete of this Domain. :param is_setup_complete: The is_setup_complete of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_setup_complete is None ): # noqa: E501 raise ValueError( "Invalid value for `is_setup_complete`, must not be `None`" ) # noqa: E501 self._is_setup_complete = is_setup_complete @property def set_up_language(self): """Gets the set_up_language of this Domain. # noqa: E501 :return: The set_up_language of this Domain. # noqa: E501 :rtype: str """ return self._set_up_language @set_up_language.setter def set_up_language(self, set_up_language): """Sets the set_up_language of this Domain. :param set_up_language: The set_up_language of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and set_up_language is None ): # noqa: E501 raise ValueError( "Invalid value for `set_up_language`, must not be `None`" ) # noqa: E501 self._set_up_language = set_up_language @property def team_ids(self): """Gets the team_ids of this Domain. # noqa: E501 :return: The team_ids of this Domain. # noqa: E501 :rtype: list[int] """ return self._team_ids @team_ids.setter def team_ids(self, team_ids): """Sets the team_ids of this Domain. :param team_ids: The team_ids of this Domain. # noqa: E501 :type: list[int] """ if ( self.local_vars_configuration.client_side_validation and team_ids is None ): # noqa: E501 raise ValueError( "Invalid value for `team_ids`, must not be `None`" ) # noqa: E501 self._team_ids = team_ids @property def actual_cname(self): """Gets the actual_cname of this Domain. # noqa: E501 :return: The actual_cname of this Domain. # noqa: E501 :rtype: str """ return self._actual_cname @actual_cname.setter def actual_cname(self, actual_cname): """Sets the actual_cname of this Domain. :param actual_cname: The actual_cname of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and actual_cname is None ): # noqa: E501 raise ValueError( "Invalid value for `actual_cname`, must not be `None`" ) # noqa: E501 self._actual_cname = actual_cname @property def correct_cname(self): """Gets the correct_cname of this Domain. # noqa: E501 :return: The correct_cname of this Domain. # noqa: E501 :rtype: str """ return self._correct_cname @correct_cname.setter def correct_cname(self, correct_cname): """Sets the correct_cname of this Domain. :param correct_cname: The correct_cname of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and correct_cname is None ): # noqa: E501 raise ValueError( "Invalid value for `correct_cname`, must not be `None`" ) # noqa: E501 self._correct_cname = correct_cname @property def actual_ip(self): """Gets the actual_ip of this Domain. # noqa: E501 :return: The actual_ip of this Domain. # noqa: E501 :rtype: str """ return self._actual_ip @actual_ip.setter def actual_ip(self, actual_ip): """Sets the actual_ip of this Domain. :param actual_ip: The actual_ip of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and actual_ip is None ): # noqa: E501 raise ValueError( "Invalid value for `actual_ip`, must not be `None`" ) # noqa: E501 self._actual_ip = actual_ip @property def apex_resolution_status(self): """Gets the apex_resolution_status of this Domain. # noqa: E501 :return: The apex_resolution_status of this Domain. # noqa: E501 :rtype: str """ return self._apex_resolution_status @apex_resolution_status.setter def apex_resolution_status(self, apex_resolution_status): """Sets the apex_resolution_status of this Domain. :param apex_resolution_status: The apex_resolution_status of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and apex_resolution_status is None ): # noqa: E501 raise ValueError( "Invalid value for `apex_resolution_status`, must not be `None`" ) # noqa: E501 allowed_values = [ "INELIGIBLE", "SUGGEST_RESOLVING", "ALREADY_RESOLVING", "ERROR", ] # noqa: E501 if ( self.local_vars_configuration.client_side_validation and apex_resolution_status not in allowed_values ): # noqa: E501 raise ValueError( "Invalid value for `apex_resolution_status` ({0}), must be one of {1}".format( # noqa: E501 apex_resolution_status, allowed_values ) ) self._apex_resolution_status = apex_resolution_status @property def apex_domain(self): """Gets the apex_domain of this Domain. # noqa: E501 :return: The apex_domain of this Domain. # noqa: E501 :rtype: str """ return self._apex_domain @apex_domain.setter def apex_domain(self, apex_domain): """Sets the apex_domain of this Domain. :param apex_domain: The apex_domain of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and apex_domain is None ): # noqa: E501 raise ValueError( "Invalid value for `apex_domain`, must not be `None`" ) # noqa: E501 self._apex_domain = apex_domain @property def public_suffix(self): """Gets the public_suffix of this Domain. # noqa: E501 :return: The public_suffix of this Domain. # noqa: E501 :rtype: str """ return self._public_suffix @public_suffix.setter def public_suffix(self, public_suffix): """Sets the public_suffix of this Domain. :param public_suffix: The public_suffix of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and public_suffix is None ): # noqa: E501 raise ValueError( "Invalid value for `public_suffix`, must not be `None`" ) # noqa: E501 self._public_suffix = public_suffix @property def apex_ip_addresses(self): """Gets the apex_ip_addresses of this Domain. # noqa: E501 :return: The apex_ip_addresses of this Domain. # noqa: E501 :rtype: list[str] """ return self._apex_ip_addresses @apex_ip_addresses.setter def apex_ip_addresses(self, apex_ip_addresses): """Sets the apex_ip_addresses of this Domain. :param apex_ip_addresses: The apex_ip_addresses of this Domain. # noqa: E501 :type: list[str] """ if ( self.local_vars_configuration.client_side_validation and apex_ip_addresses is None ): # noqa: E501 raise ValueError( "Invalid value for `apex_ip_addresses`, must not be `None`" ) # noqa: E501 self._apex_ip_addresses = apex_ip_addresses @property def site_id(self): """Gets the site_id of this Domain. # noqa: E501 :return: The site_id of this Domain. # noqa: E501 :rtype: int """ return self._site_id @site_id.setter def site_id(self, site_id): """Sets the site_id of this Domain. :param site_id: The site_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and site_id is None ): # noqa: E501 raise ValueError( "Invalid value for `site_id`, must not be `None`" ) # noqa: E501 self._site_id = site_id @property def brand_id(self): """Gets the brand_id of this Domain. # noqa: E501 :return: The brand_id of this Domain. # noqa: E501 :rtype: int """ return self._brand_id @brand_id.setter def brand_id(self, brand_id): """Sets the brand_id of this Domain. :param brand_id: The brand_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and brand_id is None ): # noqa: E501 raise ValueError( "Invalid value for `brand_id`, must not be `None`" ) # noqa: E501 self._brand_id = brand_id @property def deletable(self): """Gets the deletable of this Domain. # noqa: E501 :return: The deletable of this Domain. # noqa: E501 :rtype: bool """ return self._deletable @deletable.setter def deletable(self, deletable): """Sets the deletable of this Domain. :param deletable: The deletable of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and deletable is None ): # noqa: E501 raise ValueError( "Invalid value for `deletable`, must not be `None`" ) # noqa: E501 self._deletable = deletable @property def domain_cdn_config(self): """Gets the domain_cdn_config of this Domain. # noqa: E501 :return: The domain_cdn_config of this Domain. # noqa: E501 :rtype: DomainCdnConfig """ return self._domain_cdn_config @domain_cdn_config.setter def domain_cdn_config(self, domain_cdn_config): """Sets the domain_cdn_config of this Domain. :param domain_cdn_config: The domain_cdn_config of this Domain. # noqa: E501 :type: DomainCdnConfig """ if ( self.local_vars_configuration.client_side_validation and domain_cdn_config is None ): # noqa: E501 raise ValueError( "Invalid value for `domain_cdn_config`, must not be `None`" ) # noqa: E501 self._domain_cdn_config = domain_cdn_config @property def setup_info(self): """Gets the setup_info of this Domain. # noqa: E501 :return: The setup_info of this Domain. # noqa: E501 :rtype: DomainSetupInfo """ return self._setup_info @setup_info.setter def setup_info(self, setup_info): """Sets the setup_info of this Domain. :param setup_info: The setup_info of this Domain. # noqa: E501 :type: DomainSetupInfo """ if ( self.local_vars_configuration.client_side_validation and setup_info is None ): # noqa: E501 raise ValueError( "Invalid value for `setup_info`, must not be `None`" ) # noqa: E501 self._setup_info = setup_info @property def derived_brand_name(self): """Gets the derived_brand_name of this Domain. # noqa: E501 :return: The derived_brand_name of this Domain. # noqa: E501 :rtype: str """ return self._derived_brand_name @derived_brand_name.setter def derived_brand_name(self, derived_brand_name): """Sets the derived_brand_name of this Domain. :param derived_brand_name: The derived_brand_name of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and derived_brand_name is None ): # noqa: E501 raise ValueError( "Invalid value for `derived_brand_name`, must not be `None`" ) # noqa: E501 self._derived_brand_name = derived_brand_name @property def created_by_id(self): """Gets the created_by_id of this Domain. # noqa: E501 :return: The created_by_id of this Domain. # noqa: E501 :rtype: int """ return self._created_by_id @created_by_id.setter def created_by_id(self, created_by_id): """Sets the created_by_id of this Domain. :param created_by_id: The created_by_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and created_by_id is None ): # noqa: E501 raise ValueError( "Invalid value for `created_by_id`, must not be `None`" ) # noqa: E501 self._created_by_id = created_by_id @property def updated_by_id(self): """Gets the updated_by_id of this Domain. # noqa: E501 :return: The updated_by_id of this Domain. # noqa: E501 :rtype: int """ return self._updated_by_id @updated_by_id.setter def updated_by_id(self, updated_by_id): """Sets the updated_by_id of this Domain. :param updated_by_id: The updated_by_id of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and updated_by_id is None ): # noqa: E501 raise ValueError( "Invalid value for `updated_by_id`, must not be `None`" ) # noqa: E501 self._updated_by_id = updated_by_id @property def label(self): """Gets the label of this Domain. # noqa: E501 :return: The label of this Domain. # noqa: E501 :rtype: str """ return self._label @label.setter def label(self, label): """Sets the label of this Domain. :param label: The label of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and label is None ): # noqa: E501 raise ValueError( "Invalid value for `label`, must not be `None`" ) # noqa: E501 self._label = label @property def is_any_primary(self): """Gets the is_any_primary of this Domain. # noqa: E501 :return: The is_any_primary of this Domain. # noqa: E501 :rtype: bool """ return self._is_any_primary @is_any_primary.setter def is_any_primary(self, is_any_primary): """Sets the is_any_primary of this Domain. :param is_any_primary: The is_any_primary of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_any_primary is None ): # noqa: E501 raise ValueError( "Invalid value for `is_any_primary`, must not be `None`" ) # noqa: E501 self._is_any_primary = is_any_primary @property def is_legacy_domain(self): """Gets the is_legacy_domain of this Domain. # noqa: E501 :return: The is_legacy_domain of this Domain. # noqa: E501 :rtype: bool """ return self._is_legacy_domain @is_legacy_domain.setter def is_legacy_domain(self, is_legacy_domain): """Sets the is_legacy_domain of this Domain. :param is_legacy_domain: The is_legacy_domain of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_legacy_domain is None ): # noqa: E501 raise ValueError( "Invalid value for `is_legacy_domain`, must not be `None`" ) # noqa: E501 self._is_legacy_domain = is_legacy_domain @property def is_internal_domain(self): """Gets the is_internal_domain of this Domain. # noqa: E501 :return: The is_internal_domain of this Domain. # noqa: E501 :rtype: bool """ return self._is_internal_domain @is_internal_domain.setter def is_internal_domain(self, is_internal_domain): """Sets the is_internal_domain of this Domain. :param is_internal_domain: The is_internal_domain of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_internal_domain is None ): # noqa: E501 raise ValueError( "Invalid value for `is_internal_domain`, must not be `None`" ) # noqa: E501 self._is_internal_domain = is_internal_domain @property def is_resolving_internal_property(self): """Gets the is_resolving_internal_property of this Domain. # noqa: E501 :return: The is_resolving_internal_property of this Domain. # noqa: E501 :rtype: bool """ return self._is_resolving_internal_property @is_resolving_internal_property.setter def is_resolving_internal_property(self, is_resolving_internal_property): """Sets the is_resolving_internal_property of this Domain. :param is_resolving_internal_property: The is_resolving_internal_property of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_resolving_internal_property is None ): # noqa: E501 raise ValueError( "Invalid value for `is_resolving_internal_property`, must not be `None`" ) # noqa: E501 self._is_resolving_internal_property = is_resolving_internal_property @property def is_resolving_ignoring_manually_marked_as_resolving(self): """Gets the is_resolving_ignoring_manually_marked_as_resolving of this Domain. # noqa: E501 :return: The is_resolving_ignoring_manually_marked_as_resolving of this Domain. # noqa: E501 :rtype: bool """ return self._is_resolving_ignoring_manually_marked_as_resolving @is_resolving_ignoring_manually_marked_as_resolving.setter def is_resolving_ignoring_manually_marked_as_resolving( self, is_resolving_ignoring_manually_marked_as_resolving ): """Sets the is_resolving_ignoring_manually_marked_as_resolving of this Domain. :param is_resolving_ignoring_manually_marked_as_resolving: The is_resolving_ignoring_manually_marked_as_resolving of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_resolving_ignoring_manually_marked_as_resolving is None ): # noqa: E501 raise ValueError( "Invalid value for `is_resolving_ignoring_manually_marked_as_resolving`, must not be `None`" ) # noqa: E501 self._is_resolving_ignoring_manually_marked_as_resolving = ( is_resolving_ignoring_manually_marked_as_resolving ) @property def is_used_for_any_content_type(self): """Gets the is_used_for_any_content_type of this Domain. # noqa: E501 :return: The is_used_for_any_content_type of this Domain. # noqa: E501 :rtype: bool """ return self._is_used_for_any_content_type @is_used_for_any_content_type.setter def is_used_for_any_content_type(self, is_used_for_any_content_type): """Sets the is_used_for_any_content_type of this Domain. :param is_used_for_any_content_type: The is_used_for_any_content_type of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_used_for_any_content_type is None ): # noqa: E501 raise ValueError( "Invalid value for `is_used_for_any_content_type`, must not be `None`" ) # noqa: E501 self._is_used_for_any_content_type = is_used_for_any_content_type @property def is_legacy(self): """Gets the is_legacy of this Domain. # noqa: E501 :return: The is_legacy of this Domain. # noqa: E501 :rtype: bool """ return self._is_legacy @is_legacy.setter def is_legacy(self, is_legacy): """Sets the is_legacy of this Domain. :param is_legacy: The is_legacy of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_legacy is None ): # noqa: E501 raise ValueError( "Invalid value for `is_legacy`, must not be `None`" ) # noqa: E501 self._is_legacy = is_legacy @property def author_at(self): """Gets the author_at of this Domain. # noqa: E501 :return: The author_at of this Domain. # noqa: E501 :rtype: int """ return self._author_at @author_at.setter def author_at(self, author_at): """Sets the author_at of this Domain. :param author_at: The author_at of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and author_at is None ): # noqa: E501 raise ValueError( "Invalid value for `author_at`, must not be `None`" ) # noqa: E501 self._author_at = author_at @property def cos_object_type(self): """Gets the cos_object_type of this Domain. # noqa: E501 :return: The cos_object_type of this Domain. # noqa: E501 :rtype: str """ return self._cos_object_type @cos_object_type.setter def cos_object_type(self, cos_object_type): """Sets the cos_object_type of this Domain. :param cos_object_type: The cos_object_type of this Domain. # noqa: E501 :type: str """ if ( self.local_vars_configuration.client_side_validation and cos_object_type is None ): # noqa: E501 raise ValueError( "Invalid value for `cos_object_type`, must not be `None`" ) # noqa: E501 allowed_values = [ "CONTENT", "EXTENSION_RESOURCE", "LAYOUT", "CUSTOM_WIDGET", "WIDGET", "FORM", "PLACEMENT", "IMAGE", "DOMAIN_SETTINGS", "SITE_SETTINGS", "EMAIL_ADDRESS", "WORKFLOW", "HUBDB_TABLE", "REDIRECT_URL", "DESIGN_FOLDER", "SITE_MAP", "DOMAIN", "BLOG", "FILE", "FOLDER", "SITE_MENU", "THEME", "CONTENT_GROUP", "FOLLOW_ME", "KNOWLEDGE_BASE", "LIST_MEMBERSHIP", "CONTACT_MEMBERSHIP", "PASSWORD_PROTECTED", "UNRESTRICTED_ACCESS", "MARKETPLACE_LISTING", "LAYOUT_SECTION", "THEME_SETTINGS", "VIDEO_PLAYER", "URL_MAPPING", "KNOWLEDGE_CATEGORY", "KNOWLEDGE_HOMEPAGE_CATEGORY", "RAW_ASSET", "GLOBAL_CONTENT", "HUBDB_TABLE_ROW", "BLOG_AUTHOR", "SERVERLESS_FUNCTION", "KNOWLEDGE_CATEGORY_TRANSLATION", ] # noqa: E501 if ( self.local_vars_configuration.client_side_validation and cos_object_type not in allowed_values ): # noqa: E501 raise ValueError( "Invalid value for `cos_object_type` ({0}), must be one of {1}".format( # noqa: E501 cos_object_type, allowed_values ) ) self._cos_object_type = cos_object_type @property def cdn_purge_embargo_time(self): """Gets the cdn_purge_embargo_time of this Domain. # noqa: E501 :return: The cdn_purge_embargo_time of this Domain. # noqa: E501 :rtype: int """ return self._cdn_purge_embargo_time @cdn_purge_embargo_time.setter def cdn_purge_embargo_time(self, cdn_purge_embargo_time): """Sets the cdn_purge_embargo_time of this Domain. :param cdn_purge_embargo_time: The cdn_purge_embargo_time of this Domain. # noqa: E501 :type: int """ if ( self.local_vars_configuration.client_side_validation and cdn_purge_embargo_time is None ): # noqa: E501 raise ValueError( "Invalid value for `cdn_purge_embargo_time`, must not be `None`" ) # noqa: E501 self._cdn_purge_embargo_time = cdn_purge_embargo_time @property def is_staging_domain(self): """Gets the is_staging_domain of this Domain. # noqa: E501 :return: The is_staging_domain of this Domain. # noqa: E501 :rtype: bool """ return self._is_staging_domain @is_staging_domain.setter def is_staging_domain(self, is_staging_domain): """Sets the is_staging_domain of this Domain. :param is_staging_domain: The is_staging_domain of this Domain. # noqa: E501 :type: bool """ if ( self.local_vars_configuration.client_side_validation and is_staging_domain is None ): # noqa: E501 raise ValueError( "Invalid value for `is_staging_domain`, must not be `None`" ) # noqa: E501 self._is_staging_domain = is_staging_domain def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list( map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value) ) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items(), ) ) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Domain): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, Domain): return True return self.to_dict() != other.to_dict()
[ "six.iteritems", "hubspot.cms.domains.configuration.Configuration" ]
[((64842, 64875), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (64855, 64875), False, 'import six\n'), ((7799, 7814), 'hubspot.cms.domains.configuration.Configuration', 'Configuration', ([], {}), '()\n', (7812, 7814), False, 'from hubspot.cms.domains.configuration import Configuration\n')]
# -*- coding: utf-8 -*- from django.conf.urls import patterns, url from clients.views import ClientListView, ClientCreateView, ClientSetupView from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView from clients.views import ClientRelationshipListView, RelationshipCreateView, RelationshipEditView, RelationshipDeleteView urlpatterns = patterns('clients.views', url( regex=r'^$', view=ClientListView.as_view(), name='client_list' ), url( regex=r'^create/$', view=ClientCreateView.as_view(), name='client_create' ), url( regex=r'^setup/(?P<pk>\d+)$', view=ClientSetupView.as_view(), name='client_setup' ), url( regex=r'^setup/resume/(?P<pk>\d+)$', view=ClientSetupView.as_view(), name='client_setup_resumeo' ), url( regex=r'^profile/id/(?P<pk>\d+)$', view=ClientProfileIdentificationView.as_view(), name='client_profile_identification' ), url( regex=r'^profile/contact/(?P<pk>\d+)$', view=ClientProfileContactView.as_view(), name='client_profile_contact' ), url( regex=r'^profile/comm/(?P<pk>\d+)$', view=ClientProfileCommunicationView.as_view(), name='client_profile_communication' ), url( regex=r'^profile/ref/(?P<pk>\d+)$', view=ClientProfileReferralView.as_view(), name='client_profile_referral' ), url( regex=r'^profile/rel/(?P<pk>\d+)$', view=ClientProfileRelationshipView.as_view(), name='client_profile_relationship' ), url( regex=r'^(?P<pk>\d+)/rel$', view=ClientRelationshipListView.as_view(), name='client_relationship_list' ), url( regex=r'^profile/order/(?P<pk>\d+)$', view=ClientProfileOrderView.as_view(), name='client_profile_order' ), url( regex=r'^profile/diet/(?P<pk>\d+)$', view=ClientProfileDietView.as_view(), name='client_profile_diet' ), url( regex=r'^rel/(?P<pk>\d+)/create$', # pk of client view=RelationshipCreateView.as_view(), name='relationship_create' ), url( regex=r'^rel/(?P<pk>\d+)/edit$', # pk of relationship view=RelationshipEditView.as_view(), name='relationship_edit' ), url( regex=r'^rel/(?P<pk>\d+)/delete$', # pk of relationship view=RelationshipDeleteView.as_view(), name='relationship_delete' ), url( regex=r'^profile/id/(?P<pk>\d+)/edit$', view=ClientProfileEditIdentificationView.as_view(), name='client_profile_identification_edit' ), url( regex=r'^profile/contact/(?P<pk>\d+)/edit$', view=ClientProfileEditContactView.as_view(), name='client_profile_contact_edit' ), url( regex=r'^profile/comm/(?P<pk>\d+)/edit$', view=ClientProfileEditCommunicationView.as_view(), name='client_profile_communication_edit' ), url( regex=r'^profile/ref/(?P<pk>\d+)/edit$', view=ClientProfileEditReferralView.as_view(), name='client_profile_referral_edit' ), url( regex=r'^profile/order/(?P<pk>\d+)/edit$', view=ClientProfileEditOrderView.as_view(), name='client_profile_order_edit' ) )
[ "clients.views.ClientSetupView.as_view", "clients.views.ClientProfileEditOrderView.as_view", "clients.views.ClientProfileReferralView.as_view", "clients.views.ClientProfileOrderView.as_view", "clients.views.ClientProfileEditIdentificationView.as_view", "clients.views.ClientProfileIdentificationView.as_view", "clients.views.RelationshipDeleteView.as_view", "clients.views.ClientProfileContactView.as_view", "clients.views.ClientProfileEditReferralView.as_view", "clients.views.ClientListView.as_view", "clients.views.ClientProfileCommunicationView.as_view", "clients.views.RelationshipCreateView.as_view", "clients.views.ClientProfileEditContactView.as_view", "clients.views.ClientCreateView.as_view", "clients.views.RelationshipEditView.as_view", "clients.views.ClientProfileRelationshipView.as_view", "clients.views.ClientProfileEditCommunicationView.as_view", "clients.views.ClientRelationshipListView.as_view", "clients.views.ClientProfileDietView.as_view" ]
[((761, 785), 'clients.views.ClientListView.as_view', 'ClientListView.as_view', ([], {}), '()\n', (783, 785), False, 'from clients.views import ClientListView, ClientCreateView, ClientSetupView\n'), ((876, 902), 'clients.views.ClientCreateView.as_view', 'ClientCreateView.as_view', ([], {}), '()\n', (900, 902), False, 'from clients.views import ClientListView, ClientCreateView, ClientSetupView\n'), ((1005, 1030), 'clients.views.ClientSetupView.as_view', 'ClientSetupView.as_view', ([], {}), '()\n', (1028, 1030), False, 'from clients.views import ClientListView, ClientCreateView, ClientSetupView\n'), ((1134, 1159), 'clients.views.ClientSetupView.as_view', 'ClientSetupView.as_view', ([], {}), '()\n', (1157, 1159), False, 'from clients.views import ClientListView, ClientCreateView, ClientSetupView\n'), ((1269, 1310), 'clients.views.ClientProfileIdentificationView.as_view', 'ClientProfileIdentificationView.as_view', ([], {}), '()\n', (1308, 1310), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((1434, 1468), 'clients.views.ClientProfileContactView.as_view', 'ClientProfileContactView.as_view', ([], {}), '()\n', (1466, 1468), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((1583, 1623), 'clients.views.ClientProfileCommunicationView.as_view', 'ClientProfileCommunicationView.as_view', ([], {}), '()\n', (1621, 1623), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((1742, 1777), 'clients.views.ClientProfileReferralView.as_view', 'ClientProfileReferralView.as_view', ([], {}), '()\n', (1775, 1777), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((1891, 1930), 'clients.views.ClientProfileRelationshipView.as_view', 'ClientProfileRelationshipView.as_view', ([], {}), '()\n', (1928, 1930), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((2040, 2076), 'clients.views.ClientRelationshipListView.as_view', 'ClientRelationshipListView.as_view', ([], {}), '()\n', (2074, 2076), False, 'from clients.views import ClientRelationshipListView, RelationshipCreateView, RelationshipEditView, RelationshipDeleteView\n'), ((2193, 2225), 'clients.views.ClientProfileOrderView.as_view', 'ClientProfileOrderView.as_view', ([], {}), '()\n', (2223, 2225), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((2337, 2368), 'clients.views.ClientProfileDietView.as_view', 'ClientProfileDietView.as_view', ([], {}), '()\n', (2366, 2368), False, 'from clients.views import ClientProfileIdentificationView, ClientProfileContactView, ClientProfileCommunicationView, ClientProfileReferralView, ClientProfileRelationshipView, ClientProfileOrderView, ClientProfileDietView\n'), ((2492, 2524), 'clients.views.RelationshipCreateView.as_view', 'RelationshipCreateView.as_view', ([], {}), '()\n', (2522, 2524), False, 'from clients.views import ClientRelationshipListView, RelationshipCreateView, RelationshipEditView, RelationshipDeleteView\n'), ((2671, 2701), 'clients.views.RelationshipEditView.as_view', 'RelationshipEditView.as_view', ([], {}), '()\n', (2699, 2701), False, 'from clients.views import ClientRelationshipListView, RelationshipCreateView, RelationshipEditView, RelationshipDeleteView\n'), ((2830, 2862), 'clients.views.RelationshipDeleteView.as_view', 'RelationshipDeleteView.as_view', ([], {}), '()\n', (2860, 2862), False, 'from clients.views import ClientRelationshipListView, RelationshipCreateView, RelationshipEditView, RelationshipDeleteView\n'), ((2992, 3037), 'clients.views.ClientProfileEditIdentificationView.as_view', 'ClientProfileEditIdentificationView.as_view', ([], {}), '()\n', (3035, 3037), False, 'from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView\n'), ((3172, 3210), 'clients.views.ClientProfileEditContactView.as_view', 'ClientProfileEditContactView.as_view', ([], {}), '()\n', (3208, 3210), False, 'from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView\n'), ((3335, 3379), 'clients.views.ClientProfileEditCommunicationView.as_view', 'ClientProfileEditCommunicationView.as_view', ([], {}), '()\n', (3377, 3379), False, 'from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView\n'), ((3509, 3548), 'clients.views.ClientProfileEditReferralView.as_view', 'ClientProfileEditReferralView.as_view', ([], {}), '()\n', (3546, 3548), False, 'from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView\n'), ((3675, 3711), 'clients.views.ClientProfileEditOrderView.as_view', 'ClientProfileEditOrderView.as_view', ([], {}), '()\n', (3709, 3711), False, 'from clients.views import ClientProfileEditIdentificationView, ClientProfileEditContactView, ClientProfileEditCommunicationView, ClientProfileEditReferralView, ClientProfileEditOrderView\n')]