content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
# --- # jupyter: # jupytext: # formats: ipynb,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.9.1+dev # kernelspec: # display_name: Python [conda env:core_acc_env] * # language: python # name: conda-env-core_acc_env-py # --- # # Differential expression analysis # # This notebook performs Differential Expression analysis using experiment, PRJNA283002, associated with [this publication](https://pubmed.ncbi.nlm.nih.gov/26078448/). Here they characterized the Anr regulon by comparing WT vs anr mutants. # + # %load_ext autoreload # %load_ext rpy2.ipython # %autoreload 2 import os import pandas as pd from rpy2.robjects import pandas2ri from core_acc_modules import paths_corr, utils, DE_helper pandas2ri.activate() # - # Load gene expression data expression_df = pd.read_csv(paths_corr.PAO1_GE, sep="\t", index_col=0, header=0) # ## Select expression data for experiment and replace gene ids # + # Select expression data associated with PRJNA283002 experiment sample_metadata = pd.read_csv(paths_corr.DE_METADATA, sep="\t", index_col=0, header=0) select_sample_ids = list(sample_metadata.index) select_expression_df = expression_df.loc[select_sample_ids] # + # Replace gene sequencing ids with PAO1 ids to help us interpret our findings pao1_fasta_file = paths_corr.PAO1_REF seq_id_to_gene_id_pao1 = utils.dict_gene_num_to_ids(pao1_fasta_file) select_expression_df.rename(mapper=seq_id_to_gene_id_pao1, axis="columns", inplace=True) select_expression_df.head() # - # Save selected expression data select_expression_df.to_csv(paths_corr.SELECT_GE, sep="\t") # ## DE analysis # Process data for DESeq DE_helper.process_samples_for_DESeq( paths_corr.SELECT_GE, paths_corr.DE_METADATA, paths_corr.SELECT_GE_PROCESSED, ) # Create subdirectory: "<local_dir>/DE_stats/" os.makedirs(paths_corr.DE_STATS_DIR, exist_ok=True) # Convert python path objects for use by R in the next cell metadata_filename = str(paths_corr.DE_METADATA) processed_expression_filename = str(paths_corr.SELECT_GE_PROCESSED) repo_dir = str(paths_corr.PROJECT_DIR) out_filename = str(paths_corr.DE_STATS_OUTPUT) # + magic_args="-i metadata_filename -i processed_expression_filename -i out_filename -i repo_dir" language="R" # # source(paste0(repo_dir, '/core_acc_modules/DE_analysis.R')) # # # File created: "<local_dir>/DE_stats/DE_stats_template_data_<project_id>_real.txt" # get_DE_stats_DESeq( # metadata_filename, # processed_expression_filename, # out_filename # ) # - # ## Compare results with publication # + # Get top DEGs # Compare against publication DE_stats = pd.read_csv(paths_corr.DE_STATS_OUTPUT, sep="\t", header=0, index_col=0) selected_DE_stats = DE_stats[(abs(DE_stats["log2FoldChange"]) > 1)] print(selected_DE_stats.shape) selected_DE_stats # - published_DEGs = [ "PA1557", "PA3928", "PA2119", "PA3847", "PA0515", "PA0513", "PA0512", "PA0510", "PA0521", "PA0522", "PA0525", "PA0526", "PA2126", "PA2127", "PA2133", ] selected_DE_stats.loc[published_DEGs] # + input_DESeq_data = pd.read_csv( processed_expression_filename, sep="\t", index_col=0, header=0 ) input_DESeq_data[published_DEGs] # - # The differential expression results can be found in [Figure 1](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4524035/) of the paper. Spot checking it looks like the genes have consistent direction of logFC. # # Note: # * NaN's occur if the samples are all 0. Need to check why PA3847 is NaN. Setting filtering to False doesn't get rid of NaNs # # https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na
nilq/baby-python
python
from rest_framework import viewsets, filters, status from rest_framework.authentication import TokenAuthentication from rest_framework.authtoken.views import ObtainAuthToken from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework.views import APIView from profiles_api import permissions from profiles_api import serializers, models # noinspection PyMethodMayBeStatic class HelloAPIViewSets(viewsets.ViewSet): """Test API Viewset""" serializer_class = serializers.HelloSerializer def list(self, request): """Return a hello message""" a_viewset = [ 'Uses actions(list, create, retrieve, update, partial_update)' 'Automatically maps to URLs', 'More features , less code' ] return Response({'message': 'Hello', 'an_apiview': a_viewset}) def create(self, request): """Create a new hello message""" serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') message = f'Hellow {name}' return Response({'message': message}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def retrieve(self, request, pk=None): """Retrieve an particular obj""" return Response({'method': 'GET'}) def update(self, request, pk=None): """Update an object""" return Response({'method': 'PUT'}) def partial_update(self, request, pk=None): """Update a field in object""" return Response({'method': 'PATCH'}) def destroy(self, requests, pk=None): """delete an object""" return Response({'method': 'DELETE'}) # noinspection PyMethodMayBeStatic class HelloAPIView(APIView): """Test API View""" serializer_class = serializers.HelloSerializer def get(self, request): """Return a list of API view features""" an_apiview = [ 'Uses HTTP methods as function (get, post, patch, put, delete)' 'Is similar to a traditional view', 'Gives you the most control over application', 'Is mapped manually to URLs' ] return Response({'message': 'Hello', 'an_apiview': an_apiview}) def post(self, request): """Create hello message post request""" serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') message = f'Hellow {name}' return Response({'message': message}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def put(self, request): """Update an object""" return Response({'method': 'PUT'}) def patch(self, request): """Partial Update of the object""" return Response({'method': 'PATCH'}) def delete(self, requests): """delete an object""" return Response({'method': 'DELETE'}) # noinspection PyMethodMayBeStatic class UserViewSet(viewsets.ModelViewSet): """Handles creating and updating viewset""" serializer_class = serializers.UserSerializer queryset = models.User.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (permissions.UpdateOwnProfile,) filter_backends = (filters.SearchFilter,) search_fields = ('name', 'email',) class UserLoginAPIView(ObtainAuthToken): """Handling creating user auth""" renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES class UserProfileFeedViewSet(viewsets.ModelViewSet): """Handles creating, reading and updating profile feed items""" authentication_classes = (TokenAuthentication,) serializer_class = serializers.ProfileFeedItemSerializer queryset = models.ProfileFeedItem.objects.all() permission_classes = ( permissions.UpdateOwnStatus, IsAuthenticated ) def perform_create(self, serializer): """Sets the user profile to the logged-in user""" serializer.save(user_profile=self.request.user)
nilq/baby-python
python
import json, yaml import logging DEBUG =0 logger = logging.getLogger() if DEBUG: #coloredlogs.install(level='DEBUG') logger.setLevel(logging.DEBUG) else: #coloredlogs.install(level='INFO') logger.setLevel(logging.INFO) strhdlr = logging.StreamHandler() logger.addHandler(strhdlr) formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s') strhdlr.setFormatter(formatter)
nilq/baby-python
python
#!/usr/bin/env python3 """ Makes Maven multi module project. """ from argparse import ArgumentParser from os import makedirs from os.path import realpath, relpath, dirname, normpath from sys import argv import vang.maven.pom as pom POM_TEMPLATE = """<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>###group_id###</groupId> <artifactId>###artifact_id###</artifactId> <version>###version###</version> <packaging>pom</packaging> <modules> ###modules### </modules> </project>""" def get_pom(pom_infos, output_dir, group_id, artifact_id, version): """ Returns multi module pom content for pom_infos with paths relative to output_dir. """ modules = '\n'.join(' <module>{}</module>'.format( relpath(realpath(dirname(info['pom_path'])), realpath(output_dir))) for info in pom_infos) return POM_TEMPLATE \ .replace('###group_id###', group_id) \ .replace('###artifact_id###', artifact_id) \ .replace('###version###', version) \ .replace('###modules###', modules) def make_project(pom_infos, output_dir, group_id, artifact_id, version, **kwargs): """ Makes a Maven multi module project. """ pom = get_pom(pom_infos, output_dir, group_id, artifact_id, version) makedirs(output_dir) with open( normpath(f'{output_dir}/pom.xml'), 'wt', encoding='utf-8', ) as pom_file: pom_file.write(pom) def get_pom_infos(source_dir): pom_infos = [] for pom_path in pom.get_pom_paths(source_dir): try: pom_info = pom.get_pom_info(pom_path) pom_infos.append(pom_info) except Exception as e: # pragma: no cover print(f'Can not add {pom_path}') print(e) return pom_infos def parse_args(args): parser = ArgumentParser(description='Create Maven multi module project') parser.add_argument( '-d', '--use_defaults', action='store_true', help='Create with default values.') return parser.parse_args(args) def main(use_defaults): defaults = { 'group_id': 'my.group', 'artifact_id': 'ws', 'version': '1.0.0-SNAPSHOT', 'source_dir': '.', 'output_dir': 'ws' } if use_defaults: pom_infos = get_pom_infos(defaults['source_dir']) make_project(pom_infos, **defaults) else: group_id = str( input('groupId (default mygroup): ') or defaults['group_id']) artifact_id = str( input('artifactId (default ws): ') or defaults['artifact_id']) version = str( input('version (default 1.0.0-SNAPSHOT): ') or defaults['version']) source_dir = normpath( str(input('sourceDir: (default .)') or defaults['source_dir'])) output_dir = normpath( str(input('outputDir: (default ./ws)') or defaults['output_dir'])) pom_infos = get_pom_infos(source_dir) make_project(pom_infos, output_dir, group_id, artifact_id, version) if __name__ == '__main__': # pragma: no cover main(**parse_args(argv[1:]).__dict__)
nilq/baby-python
python
from sparkpost import SparkPost sp = SparkPost() response = sp.templates.update( 'TEST_ID', name='Test Template', from_email='test@test.com', subject='Updated Test email template!', html='<b>This is a test email template! Updated!</b>' ) print(response)
nilq/baby-python
python
# Copyright 2013 - Noorul Islam K M # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from solumclient.common.apiclient import client from solumclient.common.apiclient import fake_client from solumclient.tests import base from solumclient.v1 import plan plan_list = [ { 'name': 'Example plan 1', 'artifacts': ( [{'name': 'My python app', 'artifact_type': 'git_pull', 'content': {'href': 'git://example.com/project.git'}, 'requirements': [{ 'requirement_type': 'git_pull', 'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94', 'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}]), 'services': [{'name': 'Build Service', 'id': 'build', 'characteristics': ['python_build_service']}], 'description': 'A plan with no services or artifacts shown' }, { 'name': 'Example plan 2', 'artifacts': ( [{'name': 'My java app', 'artifact_type': 'git_pull', 'content': {'href': 'git://example.com/project.git'}, 'requirements': [{ 'requirement_type': 'git_pull', 'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94', 'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}]), 'services': [{'name': 'Build Service', 'id': 'build', 'characteristics': ['python_build_service']}], 'description': 'A plan with no services or artifacts shown' }, ] artifacts = [{'name': 'My python app', 'artifact_type': 'git_pull', 'content': {'href': 'git://example.com/project.git'}, 'requirements': [{ 'requirement_type': 'git_pull', 'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94', 'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}] services = [{'name': 'Build Service', 'id': 'build', 'characteristics': ['python_build_service']}] plan_fixture = { 'uri': 'http://example.com/v1/plans/p1', 'name': 'Example plan', 'type': 'plan', 'tags': ['small'], 'artifacts': artifacts, 'services': services, 'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94', 'user_id': '55f41cf46df74320b9486a35f5d28a11', 'description': 'A plan with no services or artifacts shown' } fixtures_list = { '/v1/plans': { 'GET': ( {}, plan_list ), } } fixtures_list_empty = { '/v1/plans': { 'GET': ( {}, [] ), } } fixtures_get = { '/v1/plans/p1': { 'GET': ( {}, plan_fixture ), } } fixtures_create = { '/v1/plans': { 'POST': ( {}, plan_fixture ), } } fixtures_put = { '/v1/plans/p1': { 'PUT': ( {}, plan_fixture ), } } class PlanManagerTest(base.TestCase): def assert_plan_obj(self, plan_obj): self.assertIn('Plan', repr(plan_obj)) self.assertIn('Artifact', repr(plan_obj.artifacts[0])) self.assertIn('ServiceReference', repr(plan_obj.services[0])) self.assertEqual(plan_fixture['uri'], plan_obj.uri) self.assertEqual(plan_fixture['type'], plan_obj.type) self.assertEqual(plan_fixture['project_id'], plan_obj.project_id) self.assertEqual(plan_fixture['user_id'], plan_obj.user_id) def test_list_all(self): fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_list) api_client = client.BaseClient(fake_http_client) plan.PlanManager(api_client) # NOTE(stannie): will re-enable this test once # https://bugs.launchpad.net/solum/+bug/1331093 is committed. # FakeHTTPClient doesn't manage YAML properly but since this method # will use the json content-type once implemented in the API, this can # stay temporary disabled. def test_list_empty(self): fake_http_client = fake_client.FakeHTTPClient( fixtures=fixtures_list_empty) api_client = client.BaseClient(fake_http_client) mgr = plan.PlanManager(api_client) self.assertEqual([], mgr.list()) def test_create(self): fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_create) api_client = client.BaseClient(fake_http_client) mgr = plan.PlanManager(api_client) plan_obj = mgr.create('version: 1\nname: ex_plan1\ndescription: dsc1.') self.assert_plan_obj(plan_obj) def test_plan_create_post_failure(self): api_client = mock.MagicMock() api_client.post.side_effect = Exception("Bad data") try: mgr = plan.PlanManager(api_client) mgr.create('version: 1\nname: ex_plan1\ndescription: dsc1.') except Exception: self.assertTrue(True) def test_plan_create_post_success(self): api_client = mock.MagicMock() dummy_data = 'version: 1\nname: ex_plan1\ndescription: dsc1.' response = mock.MagicMock() setattr(response, 'content', dummy_data) api_client.post.return_value = response try: mgr = plan.PlanManager(api_client) plan_obj = mgr.create(dummy_data) assert plan_obj is not None assert plan_obj.name == 'ex_plan1' assert plan_obj.description == 'dsc1.' assert plan_obj.version == 1 except Exception: self.assertFalse(True) def test_get(self): fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_get) api_client = client.BaseClient(fake_http_client) mgr = plan.PlanManager(api_client) plan_obj = mgr.get(plan_id='p1') self.assert_plan_obj(plan_obj) def test_update(self): fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_put) api_client = client.BaseClient(fake_http_client) mgr = plan.PlanManager(api_client) plan_obj = mgr.update('version: 1\nname: ex_plan1\ndescription: dsc1.', plan_id='p1') self.assert_plan_obj(plan_obj)
nilq/baby-python
python
import json import logging import os import boto3 from get_and_parse_hiscores.lib.hiscores import rs_api logger = logging.getLogger() logger.setLevel(logging.DEBUG) ddb = boto3.resource("dynamodb") table = ddb.Table(os.environ["HISCORES_TABLE_NAME"]) def handler(event, context): """Call HiScores API, parse response, and save to Dynamo table.""" # retrieve player username logger.debug(f"Received event: {event}") records = event["Records"] try: players = [json.loads(record["body"])["player"] for record in records] player = players[0] except (KeyError, IndexError): raise ValueError(f"Event did not contain player names: {event}") if len(players) > 1: logger.warn( f"Received records for multiple players: {players}. " f"Only the first player, '{player}', will be processed." ) player = player.replace("-", " ") # retrieve HiScores for `player` logger.info(f"Getting HiScores for {player}") payload = rs_api.process_hiscores_response( rs_api.request_hiscores(player=player, timeout=45.0) ) # write result to `table` logger.info( f"Putting payload for player '{payload['player']}', " f"timestamp '{payload['timestamp']}'" ) logger.debug(f"Putting payload {payload}") table.put_item(Item=payload) return payload
nilq/baby-python
python
import sys import copy import math def extra(): fp = open("23.input") nums = list(map(int, fp.readline())) next_nums = {} for i in range(len(nums) - 1): next_nums[nums[i]] = nums[i + 1] MAX_VAL = 1_000_000 next_nums[nums[-1]] = 10 for i in range(10, MAX_VAL): next_nums[i] = i + 1 next_nums[MAX_VAL] = nums[0] N_PICKUP = 3 N_ROUND = 10_000_000 current = nums[0] for n_round in range(N_ROUND): if n_round != 0: current = next_nums[current] one = next_nums[current] two = next_nums[one] three = next_nums[two] pickup = (one, two, three) destination = current - 1 if current > 1 else MAX_VAL while destination in pickup: destination = destination - 1 if destination > 1 else MAX_VAL # Fix next_nums[current] = next_nums[three] next_nums[three] = next_nums[destination] next_nums[destination] = one ans = next_nums[1] * next_nums[next_nums[1]] print(ans) def main(): fp = open("23.input") nums = list(map(int, fp.readline())) curr_cup_idx = 0 N_PICKUP = 3 N_ROUND = 100 for n_round in range(N_ROUND): # curr step current_val = nums[curr_cup_idx] pickups = [ nums[(curr_cup_idx + x) % len(nums)] for x in range(1, N_PICKUP + 1) ] destination = nums[curr_cup_idx] - 1 if nums[curr_cup_idx] > 1 else 9 while True: if destination in pickups: destination = destination - 1 if destination > 1 else 9 else: break # print("Round ===> {}".format(n_round + 1)) # print("Current: [{}] {}".format(nums[curr_cup_idx], nums)) # print("Pickup: {}".format(pickups)) # print("Destination: {}".format(destination)) # move to next step for pickup in pickups: nums.remove(pickup) for i, num in enumerate(nums): if num == destination: for k, pickup in enumerate(pickups): nums.insert(i + k + 1, pickup) break for i, num in enumerate(nums): if num == current_val: curr_cup_idx = i break curr_cup_idx = (curr_cup_idx + 1) % len(nums) pos1 = None for i, num in enumerate(nums): if num == 1: pos1 = i break ans = [] for i in range(8): ans.append(str(nums[(pos1 + i + 1) % len(nums)])) ans = ''.join(ans) print(ans) if __name__ == '__main__': if len(sys.argv) == 2 and sys.argv[1] == 'extra': extra() else: main()
nilq/baby-python
python
# coding=utf-8 """ Common methods for UI code. """ from __future__ import absolute_import from datacube.utils import is_supported_document_type def get_metadata_path(dataset_path): """ Find a metadata path for a given input/dataset path. :type dataset_path: pathlib.Path :rtype: Path """ # They may have given us a metadata file directly. if dataset_path.is_file() and is_supported_document_type(dataset_path): return dataset_path # Otherwise there may be a sibling file with appended suffix '.agdc-md.yaml'. expected_name = dataset_path.parent.joinpath('{}.agdc-md'.format(dataset_path.name)) found = _find_any_metadata_suffix(expected_name) if found: return found # Otherwise if it's a directory, there may be an 'agdc-metadata.yaml' file describing all contained datasets. if dataset_path.is_dir(): expected_name = dataset_path.joinpath('agdc-metadata') found = _find_any_metadata_suffix(expected_name) if found: return found raise ValueError('No metadata found for input %r' % dataset_path) def _find_any_metadata_suffix(path): """ Find any metadata files that exist with the given file name/path. (supported suffixes are tried on the name) :type path: pathlib.Path """ existing_paths = list(filter(is_supported_document_type, path.parent.glob(path.name + '*'))) if not existing_paths: return None if len(existing_paths) > 1: raise ValueError('Multiple matched metadata files: {!r}'.format(existing_paths)) return existing_paths[0]
nilq/baby-python
python
import sys import json import collections MEM_TOKEN_SIZE = 4 def build_vocab(tokens, vocab_count): token_list = tokens.split() for tok in token_list: if tok in vocab_count.keys(): vocab_count[tok] += 1 else: vocab_count[tok] = 1 def process_one_conversation(text, vocab_count): conversation = json.loads(text.strip(), encoding="utf-8", object_pairs_hook=collections.OrderedDict) goal = conversation["goal"] knowledge = conversation["knowledge"] history = conversation["history"] if len(conversation["history"]) > 0 else ["null"] response = conversation["response"] if "response" in conversation else "null" video_entities, person_entities = [], [] context_arr, conv_arr, kb_arr = [], [], [] all_entities = {'topic_a': [], 'topic_b': [] } topic_a = goal[0][1] topic_b = goal[0][2] nid = 0 for i, triple in enumerate(knowledge): [s, p, o] = triple triple_str = " ".join(triple) build_vocab(triple_str, vocab_count) assert s in [topic_a, topic_b] o_tokens = o.split() if s == topic_a: all_entities['topic_a'].append(s) for tok in o_tokens: all_entities['topic_a'].append(tok) else: all_entities['topic_b'].append(s) for tok in o_tokens: all_entities['topic_b'].append(tok) if u"领域" == p: if topic_a == s: domain_a = o if domain_a == u"电影": video_entities.append(topic_a) else: person_entities.append(topic_a) elif topic_b == s: domain_b = o if domain_b == u"电影": video_entities.append(topic_b) else: person_entities.append(topic_b) kb_info = generate_memory(triple, "", str(nid)) kb_arr += kb_info context_arr = kb_info + context_arr for i, utterance in enumerate(history): if utterance == 'null': gen_m = generate_memory(utterance, "$u", 0) elif i % 2 == 0: build_vocab(utterance, vocab_count) nid += 1 gen_m = generate_memory(utterance, "$s", str(nid)) else: build_vocab(utterance, vocab_count) gen_m = generate_memory(utterance, "$u", str(nid)) context_arr += gen_m conv_arr += gen_m build_vocab(response, vocab_count) # get gold entity for each response gold_ent = [] for w in response.split(): if w in all_entities['topic_a'] or w in all_entities['topic_b']: gold_ent.append(w) # get local pointer position for each word in system response ptr_index = [] for key in response.split(): index = [loc for loc, val in enumerate(context_arr) if (val[0] == key and key in gold_ent)] if len(index) > 0: index = max(index) else: index = len(context_arr) ptr_index.append(index) # get global pointer labels for words in system response, the 1 in the end is for the NULL token selector_index = [1 if (word_arr[0] in gold_ent or word_arr[0] in response.split()) else 0 for word_arr in context_arr] + [1] # get sketch response topic_entity = [topic_a, topic_b] sketch_response = generate_template(topic_entity, response) data_detail = { 'context_arr': list(context_arr + [['$$$$'] * MEM_TOKEN_SIZE]), # $$$$ is NULL token 'response': response, 'sketch_response': sketch_response, 'ptr_index': ptr_index + [len(context_arr)], 'selector_index': selector_index, 'ent_index': gold_ent, 'conv_arr': list(conv_arr), 'kb_arr': list(kb_arr) } return data_detail def generate_memory(sent, speaker, time): sent_new = [] if speaker == "$u" or speaker == "$s": # dialogue memory sent_token = sent.split(' ') for idx, word in enumerate(sent_token): temp = [word, speaker, 'turn'+str(time), 'word'+str(idx)] + ["PAD"]*(MEM_TOKEN_SIZE-4) sent_new.append(temp) else: # knowledge memory sent_token = sent[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent)) sent_new.append(sent_token) return sent_new def generate_template(topic_entity, sentence): """ Based on the system response and the provided entity table, the output is the sketch response. """ sketch_response = [] for word in sentence.split(): if word not in topic_entity: sketch_response.append(word) else: if word == topic_entity[0]: ent_type = 'topic_a' else: ent_type = 'topic_b' sketch_response.append('@' + ent_type) sketch_response = " ".join(sketch_response) return sketch_response def convert_sample_to_json(sample_file, json_file, vocab_file=None): print("Reading lines from %s" % sample_file) vocab_count = {} with open(sample_file, 'r') as fr, open(json_file, 'w') as fw: for i, line in enumerate(fr): text_dict = process_one_conversation(line, vocab_count) text_json = json.dumps(text_dict, ensure_ascii=False) fw.write(text_json + "\n") if i > 0 and i % 10000 == 0: print("line %d done" % i) if vocab_file is not None: print("Building vocabs...") vocab_sorted = sorted(vocab_count.items(), key=lambda tup: tup[1], reverse=True) with open(vocab_file, 'w') as fw: for word, freq in vocab_sorted: fw.write(word + '\t' + str(freq) + '\n') if __name__ == '__main__': try: convert_sample_to_json(sys.argv[1], sys.argv[2], sys.argv[3]) except KeyboardInterrupt: print("\nExited from the program ealier!")
nilq/baby-python
python
import os, wifisec, platform from speedtest import SpeedTest from hku import fetch_heroku from rich import print from rich.table import Table def display(): plat = platform.system() global clear if plat == "Linux": clear = lambda: os.system('clear') elif plat == "Windows": clear = lambda: os.system('cls') while True: main_tab = Table(header_style="bold magenta", show_edge=False, show_lines=False, show_header=False) main_tab.add_column("Network") main_tab.add_column("Projects") projects_tab = Table(header_style="bold magenta", title="Projects") projects_tab.add_column("Name") projects_tab.add_column("Running") for p in fetch_heroku(): if p[1]: projects_tab.add_row(str(p[0]), "[green]True[/green]") else: projects_tab.add_row(str(p[0]), "[red]False[/red]") try: st = SpeedTest() ping = "%.2f" % st.ping() #download = "%.2f" % st.download() #upload = "%.2f" % st.upload() except OSError: ping = "# Connection Error" status_tab = Table(title="Network Status", header_style="bold magenta") status_tab.add_column("Stat") status_tab.add_column("Data") status_tab.add_row("Ping", str(ping)) #status_tab.add_row("Download", str(download)) #status_tab.add_row("Upload", str(upload)) devices = wifisec.who() connected_tab = Table(title="Connected Devices", header_style="bold magenta") connected_tab.add_column("NAME") connected_tab.add_column("MAC") connected_tab.add_column("IP") clear() for d in devices: ip = d[1].split(".") for i, sec in enumerate(ip[1:-1]): ip[i+1] = "#"*len(sec) ip = ".".join(ip) mac = d[3].split(":") for i, sec in enumerate(mac[1:-1]): mac[i+1] = "#"*len(sec) mac = ":".join(mac) name = d[-1] if "(" in name: name = name[:name.find("(")] connected_tab.add_row(name, mac, ip) main_tab.add_row(status_tab, connected_tab, projects_tab) print(main_tab) #print("[bold red]No device secured, breach possible.[/bold red]")
nilq/baby-python
python
from scripttease.library.commands.base import Command, ItemizedCommand, Sudo from scripttease.library.overlays.common import python_pip class TestCommand(object): def test_getattr(self): c = Command("ls -ls", extra=True) assert c.extra is True def test_get_statement(self): c = Command( "ls -ls", comment="kitchen sink", condition="$last_command -eq 0", cd="/path/to/project", prefix="source python/bin/active", register="list_success", stop=True, sudo="deploy" ) statement = c.get_statement(cd=True) assert "( cd" in statement assert "sudo" in statement assert ")" in statement assert "# kitchen sink" in statement assert "if [[ $last_command" in statement assert "list_success=$?" in statement assert "if [[ $list_success" in statement c = Command( "ls -ls", stop=True ) statement = c.get_statement() assert "if [[ $?" in statement def test_has_attribute(self): c = Command("ls -ls") assert c.has_attribute("testing") is False def test_init(self): c = Command("ls -ls", sudo=Sudo(user="deploy")) assert isinstance(c.sudo, Sudo) assert c.sudo.user == "deploy" c = Command("ls -ls", sudo="deploy") assert isinstance(c.sudo, Sudo) assert c.sudo.user == "deploy" c = Command("ls -ls", sudo=True) assert isinstance(c.sudo, Sudo) assert c.sudo.user == "root" c = Command("ls -ls") assert isinstance(c.sudo, Sudo) assert c.sudo.user == "root" assert c.sudo.enabled is False def test_is_itemized(self): c = Command("ls -ls") assert c.is_itemized is False def test_repr(self): c = Command("ls -ls", comment="listing") assert repr(c) == "<Command listing>" c = Command("ls -ls") assert repr(c) == "<Command>" def test_set_attribute(self): c = Command("ls -ls") assert c.testing is None c.set_attribute("testing", True) assert c.testing is True class TestItemizedCommand(object): def test_getattr(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item", extra=True) assert c.extra is True def test_get_commands(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") commands = c.get_commands() for i in commands: assert isinstance(i, Command) def test_get_statement(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") statement = c.get_statement() assert "Pillow" in statement assert "psycopg2-binary" in statement assert "django" in statement def test_has_attribute(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") assert c.has_attribute("testing") is False def test_is_itemized(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") assert c.is_itemized is True def test_repr(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") assert repr(c) == "<ItemizedCommand python_pip>" def test_set_attribute(self): c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item") assert c.testing is None c.set_attribute("testing", True) assert c.testing is True class TestSudo(object): def test_bool(self): s = Sudo() assert bool(s) is False s = Sudo(True) assert bool(s) is True def test_str(self): s = Sudo() assert str(s) == "" s = Sudo(True) assert str(s) == "sudo -u root"
nilq/baby-python
python
from AtomicContributions.ContributionsOfAtomsToModes import AtomicContributionsCalculator import unittest import numpy as np import os path_here = os.path.dirname(__file__) class AtomicContributionToModesTest(unittest.TestCase): def setUp(self): self.Contributions = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR'), ForceConstants=False, ForceFileName=os.path.join(path_here, 'FORCE_SETS'), supercell=[[3, 0, 0], [0, 3, 0], [0, 0, 4]], primitive=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) self.Contributions_masses = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR'), ForceConstants=False, ForceFileName=os.path.join(path_here, 'FORCE_SETS'), supercell=[[3, 0, 0], [0, 3, 0], [0, 0, 4]], primitive=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], masses=[12.010700, 12.010700, 15.999400, 15.999400, 14.006700, 14.006700, 14.006700, 14.006700, 2, 2, 2, 2, 2, 2, 2, 2]) self.Contributions2 = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR.NaCl'), ForceConstants=False, ForceFileName=os.path.join(path_here, 'FORCE_SETS.NaCl'), supercell=[[2, 0, 0], [0, 2, 0], [0, 0, 2]], nac=True, BornFileName=os.path.join(path_here, 'BORN.NaCl'), primitive=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]) self.ContributionsFC = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR_Methanol'), ForceConstants=True, ForceFileName=os.path.join(path_here, 'FORCE_CONSTANTS_Methanol'), supercell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], nac=False) def test_attributes(self): # test calculation of frequencies self.assertAlmostEqual(self.Contributions._frequencies[47], 3490.6434922723, places=1) # test calculation of eigenvectors self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 0]), 0.00084433323436) self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 1]), 0.00084433323436) self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 2]), 0.37170414232138) # check if sign of eigenvectors is consistent!! self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]), np.sign(self.Contributions._EigFormat[15, 47, 0])) self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]), np.sign(self.Contributions._EigFormat[15, 47, 1])) self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]), np.sign(self.Contributions._EigFormat[15, 47, 2])) # test irreps self.assertEqual(self.Contributions._IRLabels[-1], 'B2') # test contributions sum_contribution = 0.0 for atom in range(0, 16): sum_contribution += self.Contributions._PercentageAtom[47, atom] self.assertAlmostEqual(sum_contribution, 1.0) # TODO: test NAC self.assertAlmostEqual(self.Contributions2._frequencies[-1], 153.7212069157, places=2) # TODO: set masses externally [e.g., use D mass] self.assertAlmostEqual(self.Contributions_masses._frequencies[47], 2598.2875793589, places=1) # test calculation of eigenvectors self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 0]), 0.00378948635566) self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 1]), 0.00378948635566) self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 2]), 0.33223420830758) # check if sign of eigenvectors is consistent self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]), np.sign(self.Contributions_masses._EigFormat[15, 47, 0])) self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]), np.sign(self.Contributions_masses._EigFormat[15, 47, 1])) self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]), np.sign(self.Contributions_masses._EigFormat[15, 47, 2])) # test irreps self.assertEqual(self.Contributions._IRLabels[-1], 'B2') # start from FORCE constants instead self.assertAlmostEqual(self.ContributionsFC._frequencies[-1], 3741.4132865293, places=1) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
from typing import Type from serflag import SerFlag from handlers.graphql.utils.query import resolve_from_root def resolve_myactions(actions_type: Type[SerFlag]): def resolver(root, info, **args): actions = resolve_from_root(root, info) if not actions: return [] return actions_type.deserialize_distinct(actions) return resolver def resolve_owner(actions_type: Type[SerFlag]): def resolver(root, info, **args): data = resolve_from_root(root, "my_actions") if not data: return False return actions_type.deserialize(data) == actions_type.ALL return resolver
nilq/baby-python
python
#!/usr/bin/python3 import math import pygame import random import sys from pygame import K_d, K_a, K_w, K_s, K_SPACE SIZE = WIDTH, HEIGHT = 500, 500 BLACK = 0, 0, 0 WHITE = 255, 255, 255 SHIP_W = 12 SHIP_H = 25 MAX_SPEED = 3 ASTEROID_LIMIT = 2 class Game_Space: """Initiates and holds all variables needed for the game to run. Also includes all methods for changing the state of the game: move, shoot, etc. """ def __init__(self): # Sets screen, font, and generates player's ship self.screen = pygame.display.set_mode(SIZE) self.font = pygame.font.SysFont('monospace', 25) self.ship = Ship([WIDTH // 2, HEIGHT // 2], SHIP_W, SHIP_H) self.asteroids = [] self.explosions = [] self.score = 0 self.big_asteroids = 0 self.satelite = None self.target_score = 1000 def collision_check(self): # Collision check for all objects in the GameSpace if self.satelite is not None: for i in range(len(self.ship.shots)): if self.satelite.collision(self.ship.shots[i]): self.score += 850 del self.ship.shots[i] self.satelite.explode() self.satelite = None return for i in range(len(self.asteroids)): for j in range(len(self.ship.shots)): if self.asteroids[i].collision(self.ship.shots[j]): self.asteroids[i].break_apart() if isinstance(self.asteroids[i], Big_Asteroid): self.score += 100 self.big_asteroids -= 1 else: self.score += 50 del self.asteroids[i] del self.ship.shots[j] return for asteroid in self.asteroids: if self.ship.collision(asteroid): self.ship.explode() self.game_over() def handle_explosions(self): # Cleans up explosion debris for explosion in self.explosions: for i in range(len(explosion)): if explosion[i].timer <= 0: del explosion[i] return else: explosion[i].timer -= 1 def update_score(self): # Updates the score displayed on the screen display_score = self.font.render(str(self.score), False, WHITE) width, height = self.font.size(str(self.score)) self.screen.blit(display_score, (WIDTH - width - 10, HEIGHT - height - 10)) def game_over(self): # Game over operation # TODO: End game, display high scores self.ship.x = WIDTH // 2 self.ship.y = HEIGHT // 2 def draw_all(self): # Draw all objects in the GameSpace self.ship.draw() for asteroid in self.asteroids: asteroid.draw() for shot in self.ship.shots: shot.draw() for explosion in self.explosions: for debris in explosion: debris.draw() if self.satelite is not None: self.satelite.draw() def move_all(self): # Move all objects in the GameSpace self.ship.move() for asteroid in self.asteroids: asteroid.move() for shot in self.ship.shots: shot.move() for explosion in self.explosions: for debris in explosion: debris.move() if self.satelite is not None: self.satelite.move() def spawn_asteroids(self): # Spawns BigAsteroids if currently under the limit if self.big_asteroids < ASTEROID_LIMIT: if random.choice([True, False]): self.asteroids.append(Big_Asteroid(None)) self.big_asteroids += 1 def spawn_satelite(self): # Spawns Satelite object if target score is met, increases target each # spawn if self.score > self.target_score: if self.satelite is None: self.satelite = Satelite() self.target_score *= 3 elif self.satelite.x < 0: self.satelite = None def run_game(self): while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() self.screen.fill(BLACK) self.update_score() self.draw_all() self.ship.control(pygame.key.get_pressed()) self.collision_check() self.move_all() self.spawn_asteroids() self.spawn_satelite() self.handle_explosions() self.ship.remove_shots() pygame.display.flip() pygame.time.wait(25) class Menu: """Menu object to be displayed before and after every game. Work in progress, not yet implmented. """ def __init__(self): # Set font and grab current pygame surface self.font_inactive = pygame.font.SysFont('monospace', 45) self.font_active = pygame.font.SysFont('monospace', 60) self.screen = pygame.display.get_surface() self.options = [['New Game', True], ['Exit', False]] self.spacing = 10 self.padding_top = 100 self.padding_left = 80 def make_menu(self): # Draw the menu on the screen x = self.padding_left y = self.padding_top for menu_item in self.options: option = menu_item[0] active = menu_item[1] if active: button = self.font_active.render(option, False, WHITE) width, height = self.font_active.size(option) else: button = self.font_inactive.render(option, False, WHITE) width, height = self.font_inactive.size(option) self.screen.blit(button, (x, y)) y += height + self.spacing def action(self, keys): # Get user input and change active menu item or do menu action. for i in range(len(self.options)): if self.options[i][1]: pos = i if keys[K_w]: if pos > 0: self.options[pos][1] = False self.options[pos - 1][1] = True elif keys[K_s]: if pos < len(self.options) - 1: self.options[pos][1] = False self.options[pos + 1][1] = True elif keys[K_SPACE]: if self.options[pos][0] == 'New Game': game.run_game() elif self.options[pos][0] == 'Exit': sys.exit() class Space_Object: """Base object for all other objects. Includes draw and move methods.""" def __init__(self, position, width, height): # Requires position, width, and height as inputs. Gets the current # pygame surface self.position = position self.x = position[0] self.y = position[1] self.width = width self.height = height self.screen = pygame.display.get_surface() self.speed = [0, 0] self.direction = 0 self.delta_speed = 0 self.speed_limit = MAX_SPEED self.rotation = 0 self.color = WHITE self.screen_wrap = True def move(self): # Adjust the objects position variables depending on it's speed and # direction rad = -math.radians(self.direction + self.rotation) sx = self.delta_speed * math.sin(rad) sy = self.delta_speed * math.cos(rad) self.delta_speed = 0 self.speed[0] -= sx self.speed[1] += sy if self.speed[0] > self.speed_limit: self.speed[0] = self.speed_limit elif self.speed[0] < -self.speed_limit: self.speed[0] = -self.speed_limit if self.speed[1] > self.speed_limit: self.speed[1] = self.speed_limit elif self.speed[1] < -self.speed_limit: self.speed[1] = -self.speed_limit self.x += self.speed[0] self.y += self.speed[1] if self.screen_wrap: if self.x < 0 - 10: self.x += WIDTH elif self.x > WIDTH + 10: self.x -= WIDTH if self.y < 0 - 10: self.y += HEIGHT elif self.y > HEIGHT + 10: self.y -= HEIGHT self.position = [self.x, self.y] def points(self): # Returns the objects relative shape adjusted for orientation and # position point_list = [] rad = -math.radians(self.direction) for point in self.relative_coord: dx = self.x + point[0] * math.cos(rad) - point[1] * math.sin(rad) dy = self.y + point[1] * math.cos(rad) + point[0] * math.sin(rad) point_list.append([dx, dy]) return point_list def draw(self): # Draws object on the screen pygame.draw.polygon(self.screen, self.color, self.points(), 2) def collision(self, item): # Determines if a collision has taken place between two objects using # their positions, widths, and heights min_safe_x = self.width / 2 + item.width / 4 min_safe_y = self.height / 2 + item.height / 4 min_safe_dist = math.sqrt(min_safe_x ** 2 + min_safe_y ** 2) abs_x = abs(self.x - item.x) abs_y = abs(self.y - item.y) abs_dist = math.sqrt(abs_x ** 2 + abs_y ** 2) if abs_dist < min_safe_dist: return True def explode(self): # Create an explosion effect be generating debris explosion = [] direction = random.randint(0, 365) debris_amount = 5 for i in range(debris_amount): explosion.append(Debris(self.position, direction)) direction += 73 game.explosions.append(explosion) class Ship(Space_Object): """The user controlled space ship. Has special methods shoot, control, and remove_shots. Stores the number of ship shots currently active and applies a shot limit. Holds the ships limiting factors: acceleration, turn speed. """ def __init__(self, position, width, height): # Initialize SpaceObject and set object shape Space_Object.__init__(self, position, width, height) self.relative_coord = [[-self.width // 2, self.height * 2 // 5], [0, self.height // 5], [self.width // 2, self.height * 2 // 5], [0, -self.height * 3 // 5]] self.shots = [] self.shot_limit = 10 self.shot_delay = 0 self.acceleration = 2 self.turn_speed = 5 def shoot(self): # Generate a shot from the front of the ship origin = self.points()[3] if self.shot_delay == 0: if len(self.shots) < 10: self.shots.append(Shot(origin, self.direction)) self.shot_delay = 8 else: self.shot_delay -= 1 def remove_shots(self): # Cleans up shots that have moveed off screen for i in range(len(self.shots)): if self.shots[i].x < 0 or self.shots[i].y < 0: del self.shots[i] break elif self.shots[i].x > WIDTH or self.shots[i].y > HEIGHT: del self.shots[i] break def control(self, keys): # Defines the result from user input and applies it if keys[K_w]: self.delta_speed -= self.acceleration elif keys[K_s]: self.delta_speed += self.acceleration if keys[K_a]: self.direction += self.turn_speed elif keys[K_d]: self.direction -= self.turn_speed if keys[K_SPACE]: self.shoot() class Shot(Space_Object): """Shot object, fired from ship and can collide with other space objects. """ def __init__(self, position, direction): # Calculates speed on initiation self.width = 2 self.height = 6 self.speed_limit = MAX_SPEED + 4 self.screen_wrap = False Space_Object.__init__(self, position, self.width, self.height) self.direction = direction rad = -math.radians(self.direction) self.speed = [self.speed_limit * math.sin(rad), -self.speed_limit * math.cos(rad)] self.relative_coord = [[0, 0], [0, self.height]] def draw(self): # Drawn as a line instead of the default polygon points = self.points() pygame.draw.line(self.screen, self.color, points[0], points[1], self.width) class Asteroid(Space_Object): """Base object for asteroids. Includes different shapes and break apart methods for asteroid destruction. """ def __init__(self, position): # Randomly chooses asteroid from collection of shapes. ASTEROID_SHAPES = [ [[-self.width / 2, -self.height / 3], [-self.width / 3, -self.height / 2], [self.width / 6, -self.height / 2], [self.width / 2, -self.height / 6], [self.width / 2, self.height / 3], [self.width / 3, self.height / 2], [self.width / 6, self.height / 2], [-self.width / 6, self.height / 6], [-self.width / 3, self.height / 6], [-self.width / 2, 0]], [[0, self.height / 2], [self.width / 6, self.height / 2], [self.width / 3, self.height / 3], [self.width / 3, self.height / 6], [self.width / 2, 0], [self.width / 2, -self.height / 6], [self.width / 3, -self.height / 3], [self.width / 6, -self.height / 3], [0, -self.height / 2], [-self.width / 6, -self.height / 2], [-self.width / 6, -self.height / 3], [-self.width / 2, 0], [-self.width / 2, self.height / 6], [-self.width / 3, self.height / 3], [-self.width / 6, self.height / 3]] ] # Randomly choose start position if position is not inherited if position is None: start = random.choice([1, 2, 3, 4]) if start == 1: position = [0, random.randint(0, HEIGHT)] elif start == 2: position = [WIDTH, random.randint(0, HEIGHT)] elif start == 3: position = [random.randint(0, WIDTH), 0] else: position = [random.randint(0, WIDTH), HEIGHT] Space_Object.__init__(self, position, self.width, self.height) self.speed = random.randint(1, self.speed_limit) self.direction = random.randint(0, 365) self.relative_coord = ASTEROID_SHAPES[random.randint(0, len(ASTEROID_SHAPES) - 1)] # noqa rad = -math.radians(self.direction) self.speed = [self.speed_limit * math.sin(rad), -self.speed_limit * math.cos(rad)] self.rotation = random.randint(-20, 20) def break_apart(self): # Default break_apart calls base explode method self.explode() class Big_Asteroid(Asteroid): """Big asteroids are slow and break apart into small asteroids.""" def __init__(self, position): self.height = 75 self.width = 75 self.speed_limit = MAX_SPEED - 2 Asteroid.__init__(self, position) def break_apart(self): for i in range(random.randint(1, 4)): game.asteroids.append(Small_Asteroid(self.position)) self.explode() class Small_Asteroid(Asteroid): """Small asteroids are fast and are destroyed on collision with a shot.""" height = 20 width = 20 speed_limit = MAX_SPEED - 1 def __init__(self, position): Asteroid.__init__(self, position) class Debris(Shot): """Debris uses the shot class to show destruction. Incldues a timer variable to be deleted when timer hits zero. """ def __init__(self, position, direction): self.width = 1 self.height = random.randint(1, 20) Shot.__init__(self, position, direction) self.timer = random.randint(5, 15) class Satelite(Space_Object): """Special, high value target. Moves from right to left across the middle of the screen. A more complex shape than the other sopace objects. """ def __init__(self): Space_Object.__init__(self, [WIDTH, HEIGHT // 2], 12, 10) self.speed = [-MAX_SPEED, 0] self.screen_wrap = False def draw(self): # Draw method includes a circle and three lines. line_1 = [[self.x, self.y - self.height // 4], [self.x + self.width * 3 // 4, self.y - self.height // 2]] line_2 = [[self.x + self.width // 4, self.y], [self.x + self.width * 3 // 4, self.y]] line_3 = [[self.x, self.y + self.height // 4], [self.x + self.width * 3 // 4, self.y + self.height // 2]] pygame.draw.circle(self.screen, self.color, (int(self.x), int(self.y)), self.width // 4) pygame.draw.line(self.screen, self.color, line_1[0], line_1[1], 1) pygame.draw.line(self.screen, self.color, line_2[0], line_2[1], 1) pygame.draw.line(self.screen, self.color, line_3[0], line_3[1], 1) def main(game): menu = Menu() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() game.screen.fill(BLACK) menu.make_menu() menu.action(pygame.key.get_pressed()) pygame.display.flip() pygame.time.wait(25) if __name__ == '__main__': pygame.init() game = Game_Space() main(game)
nilq/baby-python
python
import logging import os.path from os import getenv import telegram.ext from dotenv import load_dotenv from telegram.ext import Updater, CommandHandler, MessageHandler from telegram.ext.filters import Filters from bot.commands import hello_cmd, echo_cmd, pin_message_cmd, slap_cmd, me_cmd, \ unknown_command_cmd, shrug_cmd, google_cmd, get_cmd, list_cmd, set_cmd, \ del_cmd, credits_cmd, pidor_cmd, pidoreg_cmd, meme_cmd, pidorules_cmd # Setup logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s \ - %(message)s') logger = logging.getLogger() logger.setLevel(logging.INFO) # Load configs and create bot instance load_dotenv() # load telegram bot token from .env file API_TOKEN = getenv("TELEGRAM_BOT_API_SECRET", "") logger.debug("Beginning of token: %s", API_TOKEN[:5]) if not os.path.exists('storage'): os.mkdir('storage') updater = Updater(API_TOKEN, use_context=True, persistence=telegram.ext.PicklePersistence( filename='storage/data.bin')) dispatch = updater.dispatcher not_edited = ~Filters.update.edited_message # Setup dispatcher with callbacks dispatch.add_handler(CommandHandler('hello', hello_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('slap', slap_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('me', me_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('shrug', shrug_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('google', google_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('pin', pin_message_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('credits', credits_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('meme', meme_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('get', get_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('list', list_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('set', set_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('del', del_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('pidor', pidor_cmd, filters=not_edited)) dispatch.add_handler( CommandHandler('pidorules', pidorules_cmd, filters=not_edited)) dispatch.add_handler(CommandHandler('pidoreg', pidoreg_cmd, filters=not_edited)) updater.dispatcher.add_handler( MessageHandler(Filters.regex(r'^/\w+') & not_edited, unknown_command_cmd)) updater.dispatcher.add_handler( MessageHandler(Filters.text & ~Filters.update.edited_message, echo_cmd)) # Run the bot updater.start_polling() updater.idle()
nilq/baby-python
python
from pyHS100 import Discover for dev in Discover.discover().values(): print(dev) print("host:" + dev.host)
nilq/baby-python
python
from collections import namedtuple Meta= namedtuple('Meta', ('long_name', 'units', 'comment')) Meta.__new__.__defaults__ = (None,) * len(Meta._fields) METADATA = { 'FileCode': Meta( long_name='file_code'), 'HeaderLen': Meta( long_name='header_length', units='bytes'), 'StartTime': Meta( long_name='start_time', comment='time of first sample in file'), 'StopTime': Meta( long_name='stop_time', comment='time of last sample in file'), 'CGProg': Meta( long_name='program_number', comment='chirp generator program number'), 'ModelNo': Meta( long_name='model_number', comment='0=94GHz single polarisation radar, 1=94GHz dual polarisation radar'), 'ProgName': Meta( long_name='program_name'), 'CustName': Meta( long_name='customer_name'), 'Freq': Meta( long_name='radar_frequency', units='GHz'), 'AntSep': Meta( long_name='antenna_separation', units='m', comment='separation of both antenna axis (bistatic configuration)'), 'AntDia': Meta( long_name='antenna_diameter', units='m'), 'AntG': Meta( long_name='antenna_gain', comment='linear antenna gain'), 'HPBW': Meta( long_name='half_power_beam_width', units='degrees'), 'Cr': Meta( long_name='radar_constant'), 'DualPol': Meta( long_name='dual_polarisation', comment='0=single polarisation radar, 1=dual polarisation radar in LDR mode, ' '2=dual polarisation radar in STSR mode'), 'CompEna': Meta( long_name='compression', comment='0=not compressed, 1=compressed, 2=compressed and polarimetric variables saved'), 'AntiAlias': Meta( long_name='anti_alias', comment='0=spectra not anti-aliased, 1=spectra have been anti-aliased'), 'SampDur': Meta( long_name='sample_duration', units='s'), 'GPSLat': Meta( long_name='gps_latitude', units='degrees_north'), 'GPSLong': Meta( long_name='gps_longitude', units='degrees_east'), 'CalInt': Meta( long_name='calibration_interval', comment='period for automatic zero calibrations in number of samples'), 'RAltN': Meta( long_name='n_range_layers', comment='number of radar ranging layers'), 'TAltN': Meta( long_name='n_temperature_layers',), 'HAltN': Meta( long_name='n_humidity_layers'), 'SequN': Meta( long_name='n_chirp_sequences'), 'RAlts': Meta( long_name='range_layers'), 'TAlts': Meta( long_name='temperature_layers'), 'HAlts': Meta( long_name='humidity_layers'), 'Fr': Meta( long_name='range_factors'), 'SpecN': Meta( long_name='n_samples_in_chirp'), 'RngOffs': Meta( long_name='chirp_start_indices'), 'ChirpReps': Meta( long_name='n_chirps_in_sequence'), 'SeqIntTime': Meta( long_name='sequence_integration_time'), 'dR': Meta( long_name='range_resolution', units='m', comment='chirp sequence range resolution'), 'MaxVel': Meta( long_name='max_doppler_velocity', units='m/s', comment='max. Doppler velocity for each chirp sequence (unambiguous)'), 'ChanBW': Meta( long_name='bandwidth', units='Hz', comment='bandwidth of individual radar channel in the sequence'), 'ChirpLowIF': Meta( long_name='lowest_IF_frequency', units='Hz'), 'ChirpHighIF': Meta( long_name='highest_IF_frequency', units='Hz'), 'RangeMin': Meta( long_name='minimum_altitude', units='m', comment='minimum altitude (range) of the sequence'), 'RangeMax': Meta( long_name='maximum_altitude', units='m', comment='maximum altitude (range) of the sequence)'), 'ChirpFFTSize': Meta( long_name='fft_size', comment='Must be power of 2'), 'ChirpInvSamples': Meta( long_name='n_invalid_samples', comment='number of invalid samples at beginning of chirp'), 'ChirpCenterFr': Meta( long_name='chirp_center_frequency', units='MHz'), 'ChirpBWFr': Meta( long_name='chirp_bandwidth', units='MHz'), 'FFTStartInd': Meta( long_name='fft_start_index'), 'FFTStopInd': Meta( long_name='fft_stop_index'), 'ChirpFFTNo': Meta( long_name='n_chirp_fft', comment='number of FFT range layers in one chirp (usually = 1)'), 'SampRate': Meta( long_name='adc_sampling_rate', units='Hz'), 'MaxRange': Meta( long_name='maximum_range', units='m', comment='maximum unambiguous range'), 'SupPowLev': Meta( long_name='power_leveling_flag', comment='flag indicating the use of power levelling (0=yes, 1=no)'), 'SpkFilEna': Meta( long_name='spike_filter_flag', comment='flag indicating the use of spike/plankton filter (1=yes, 0=no)'), 'PhaseCorr': Meta( long_name='phase_correction_flag', comment='flag indicating the use of phase correction (1=yes, 0=no)'), 'RelPowCorr': Meta( long_name='relative_power_correction_flag', comment='flag indicating the use of relative power correction (1=yes, 0=no)'), 'FFTWindow': Meta( long_name='fft_window', comment='FFT window in use: 0=square, 1=parzen, 2=blackman, 3=welch, 4=slepian2, 5=slepian3'), 'FFTInputRng': Meta( long_name='adc_voltage_range', comment='ADC input voltage range (+/-)', units='mV'), 'NoiseFilt': Meta( long_name='noise_filter_threshold', comment='noise filter threshold factor (multiple of STD in Doppler spectra)'), 'Time': Meta( long_name='time', units='s'), 'MSec': Meta( long_name='time_ms', units='ms'), 'QF': Meta( long_name='quality_flag', comment='Bit 1=ADC saturation, Bit 2=spectral width too high, Bit 3=no transm. power leveling'), 'RR': Meta( long_name='rain_rate', units='mm/h'), 'RelHum': Meta( long_name='relative_humidity', units='%'), 'EnvTemp': Meta( long_name='temperature', units='K', comment='environment temperature'), 'BaroP': Meta( long_name='pressure', units='hPa', comment='barometric pressure'), 'WS': Meta( long_name='wind_speed', units='km/h',), 'WD': Meta( long_name='wind_direction', units='degrees'), 'DDVolt': Meta( long_name='voltage', units='V', comment='direct detection channel voltage'), 'DDTb': Meta( long_name='brightness_temperature', units='K'), 'TransPow': Meta( long_name='transmitter_power', units='W'), 'TransT': Meta( long_name='transmitter_temperature', units='K'), 'RecT': Meta( long_name='receiver_temperature', units='K'), 'PCT': Meta( long_name='pc_temperature', units='K'), 'LWP': Meta( long_name='liquid_water_path', units='g/m2'), 'Elev': Meta( long_name='elevation', units='degrees'), 'Azi': Meta( long_name='azimuth', units='degrees'), 'Status': Meta( long_name='status_flag', comment='mitigation status flags: 0/1=heater switch (ON/OFF) 0/10=blower switch (ON/OFF)'), 'TotSpec': Meta( long_name='doppler_spectrum', comment='linear Ze'), 'HSpec': Meta( long_name='doppler_spectrum_h', comment='horizontal polarisation, linear Ze'), 'ReVHSpec': Meta( long_name='covariance_spectrum_re', comment='real part linear Ze'), 'ImVHSpec': Meta( long_name='covariance_spectrum_im', comment='imaginary part linear Ze'), 'RefRat': Meta( long_name='linear_depolarization_ratio', units='dB'), 'DiffPh': Meta( long_name='differential_phase', units='rad'), 'SLDR': Meta( long_name='ldr_slanted', units='dB'), 'CorrCoeff': Meta( long_name='correlation_coefficient',), 'SCorrCoeff': Meta( long_name='correlation_coefficient_slanted',), 'KDP': Meta( long_name='differential_phase_shift', units='rad/km'), 'DiffAtt': Meta( long_name='differential_attenuation', units='db/km'), 'TotNoisePow': Meta( long_name='integrated_noise', comment='integrated Doppler spectrum noise power'), 'HNoisePow': Meta( long_name='integrated_noise_h', comment='integrated Doppler spectrum noise power in h-pol'), 'AliasMsk': Meta( long_name='anti_alias_correction', comment='mask indicating if anti-aliasing has been applied (=1) or not (=0)'), 'MinVel': Meta( long_name='minimum_velocity', units='m/s'), 'PowIF': Meta( long_name='IF_power', comment='IF power at ADC', units='uW'), 'Ze': Meta( long_name='reflectivity', comment='linear reflectivity in Ze units for vertical polarisation'), 'MeanVel': Meta( long_name='velocity', units='m/s', comment='mean velocity for vertical polarisation'), 'SpecWidth': Meta( long_name='width', units='m/s', comment='spectral width for vertical polarisation'), 'Skewn': Meta( long_name='skewness', comment='spectral skewness for vertical polarisation'), 'Kurt': Meta( long_name='kurtosis',), }
nilq/baby-python
python
# Generated by Django 2.2.6 on 2019-10-12 18:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('fashion_catalogue', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='category', name='color', ), migrations.RemoveField( model_name='category', name='size', ), ]
nilq/baby-python
python
# eqcmd.py: Basic routines for interfacing with EQ. import asyncio from asyncio.subprocess import create_subprocess_shell, PIPE import eqlog import os import re import shlex import random class CommandError(Exception): """A problem running a command""" pass class NotReadyError(Exception): """EverQuest isn't ready to receive a command""" pass async def _xdotool(display, text) -> [str]: """Interface with X via calls to xdotool.""" cmd = "/usr/bin/xdotool "+text env = dict([(k, os.environ[k]) for k in os.environ]) env["DISPLAY"] = display proc = await asyncio.create_subprocess_shell(cmd, stdout=PIPE, stderr=PIPE, env=env) stdout, stderr = await proc.communicate() rc = await proc.wait() if rc != 0: raise CommandError("Command send failed") return str(stdout, "utf8").splitlines() _EQDISPLAY = None async def _eqdisplay() -> str: """Figure out which X display EQ is running on.""" return os.environ["DISPLAY"] # global _EQDISPLAY # if _EQDISPLAY is None: # for i in range(100): # try: # display = ":%d" % i # await _xdotool(display, "search --name EverQuest") # _EQDISPLAY = display # break # except CommandError: # pass # else: # raise CommandError("Couldn't find EverQuest display") # return _EQDISPLAY async def _eqxdo(text): """Run xdotool against the display holding EverQuest""" return await _xdotool(await _eqdisplay(), text) _WINDLOC_RE = re.compile(r"\s*Position: ([0-9]+),([0-9]+).*") _GEOMETRY_RE = re.compile(r"\s*Geometry: ([0-9]+)x([0-9]+).*") async def _geometry(): """Get the EQ window location""" lines = await _eqxdo("search --name EverQuest getwindowgeometry") loc = None size = None for line in lines: m = _WINDLOC_RE.match(line) if m: loc = int(m.group(1)), int(m.group(2)) m = _GEOMETRY_RE.match(line) if m: size = int(m.group(1)), int(m.group(2)) if loc is None or size is None: raise CommandError("Couldn't find EverQuest window") return loc, size async def _prepare(): """Prepare EQ window to receive input""" loc, size = await _geometry() x = loc[0] + (size[0]//3 + random.randint(0, size[0]//3)) y = loc[1] + (size[1]//3 + random.randint(0, size[1]//3)) await _eqxdo("mousemove %d %d" % (x, y)) await _eqxdo("click 1") await asyncio.sleep(0.2) await _eqxdo("search --name EverQuest windowmap windowraise windowfocus") await _eqxdo("click 1") async def _press_raw(key_name): """Press a key in EQ""" await _eqxdo("key " + shlex.quote(key_name)) await asyncio.sleep(0.2) async def _press(key_name): """Press a key in EQ after preparing for input""" await _prepare() await _press_raw(key_name) async def _type(text): """Type a line of text in EQ""" await _prepare() await _press_raw("Return") await _eqxdo("type --delay 20 "+shlex.quote(text)) await _press_raw("Return") async def _expect_io(): """Wait until a line of text comes in from the EQ log.""" try: with eqlog.tap() as t: await t.next_line() except asyncio.CancelledError: pass _EQ_READY = False async def _ping_watch(): """Keep the _EQ_READY variable up to date. Here, we decide EQ is up and running if we've seen at least 1 chat message (e.g. "You are out of food and drink.") sometime in the past minute.""" global _EQ_READY while True: f = asyncio.ensure_future(_expect_io()) try: await asyncio.wait_for(f, 60) _EQ_READY = True except asyncio.TimeoutError: _EQ_READY = False def is_ready(): """Determine if EQ is ready to receive commands""" return _EQ_READY async def wait_for_ready(): """Wait for EQ to be ready to receive commands""" while True: if is_ready(): return else: await asyncio.sleep(1) _is_init = False async def init(): """Prepare the EQ command subsytem""" global _is_init if _is_init: return _is_init = True await _eqdisplay() asyncio.ensure_future(_ping_watch()) class CommandTap(object): """A context object, extending the functionality of eqlog.LogTap, which also allows sending commands to EQ.""" _LOCK = asyncio.Lock() def __init__(self): self._ltctx = None self._lt = None def __enter__(self): self._ltctx = eqlog.tap() self._lt = self._ltctx.__enter__() return self def __exit__(self, *args): try: self._ltctx.__exit__(*args) finally: CommandTap._LOCK.release() async def next_line(self): """Retrieve the next line""" return await self._lt.next_line() async def skip_until(self, text): """Wait until a line matching the specified regexp comes up""" while True: line = await self.next_line() if isinstance(text, str): if line == text: return line else: m = text.match(line) if m: return m async def send(self, text): """Send a command to EQ.""" if not is_ready(): raise NotReadyError("EQ is not currently ready to receive commands") await _type(text) async def press(self, key_name): """Press a key in the EQ window.""" if not is_ready(): raise NotReadyError("EQ is not currently ready to receive commands") await _press(key_name) async def tap(): """Call as 'with await eqcmd.tap() as t:' to get a CommandTap object to manipulate EQ with.""" await CommandTap._LOCK.acquire() return CommandTap()
nilq/baby-python
python
# Generated by Django 2.0.5 on 2018-08-03 11:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('surveys', '0005_answer_training_set'), ] operations = [ migrations.CreateModel( name='Translate_Hired_Power', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hired_power_literal', models.CharField(max_length=200)), ('min_hired_power', models.IntegerField(default=0)), ('max_hired_power', models.IntegerField(default=800)), ('deleted', models.IntegerField(default=0)), ], ), ]
nilq/baby-python
python
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mixin for device_power capability.""" from unittest import mock class DevicePowerTestMixin: """Mixin for common device unit tests of device power. Assumes self.uut is set. """ def test_power_cycle(self): """Test self.uut.device_power.power_cycle is called.""" with mock.patch.object(self.uut.device_power, "off"): with mock.patch.object(self.uut.device_power, "on"): self.uut.device_power.cycle() self.uut.device_power.off.assert_called_once() self.uut.device_power.on.assert_called_once()
nilq/baby-python
python
import numpy as np import tensorflow as tf import time import keras def KL_generated_images(dec, cla, N, dimZ, task, sample_W = True): z = tf.random_normal(shape=(N, dimZ)) x_gen = dec(z, sampling = sample_W) y_gen = tf.clip_by_value(cla(x_gen), 1e-9, 1.0) y_true = np.zeros([N, 10]); y_true[:, task] = 1 y_true = tf.constant(np.asarray(y_true, dtype='f')) kl = -tf.reduce_sum(y_true * tf.log(y_gen), 1) kl_mean = tf.reduce_mean(kl) kl_var = tf.reduce_mean((kl - kl_mean)**2) return kl_mean, kl_var def construct_eval_func(dec, cla, batch_size_ph, dimZ, task, sample_W = True): N_gen = 100 kl_mean, kl_var = KL_generated_images(dec, cla, N_gen, dimZ, task, sample_W) ops = [kl_mean, kl_var] def eval(sess): n_iter = 10 N = n_iter * N_gen begin = time.time() kl_total = 0.0; kl_var = 0.0 for j in xrange(0, n_iter): a, b = sess.run(ops, feed_dict={batch_size_ph: N_gen, keras.backend.learning_phase(): 0}) kl_total += a / n_iter kl_var += b / n_iter end = time.time() print "kl=%.2f, ste=%.2f, time=%.2f" \ % (kl_total, np.sqrt(kl_var / N), end - begin) return kl_total, np.sqrt(kl_var / N) return eval
nilq/baby-python
python
#!/usr/bin/python # Written By: Sahar Hathiramani # Date: 01/07/2021 import os import socket from termcolor import colored os.system("clear") print("🄱🄰🄳 🄱🄾🅈 🄱🅄🅃 🄰 🅂🄰🄳 🄱🄾🅈") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) socket.setdefaulttimeout(2) host = input("[*] Please Specify a Host to Scan: ") def portscanner(port): if sock.connect_ex((host,port)): print(colored("[-] Port %d is closed" % (port), 'red')) else: print(colored("[+] Port %d is open" % (port), 'green')) for port in range (1, 1000): portscanner(port); n = str(input("Enter y to return back:")) if n=="y": os.system("clear")
nilq/baby-python
python
import gzip import warnings from pkg_resources import resource_stream, resource_filename import numpy as np import matplotlib.image as mpimg from os.path import dirname, join def load_hill_topography(): """ Load hillshading and return elevation :return: np.array """ stream = resource_stream(__name__, 'data/jacksboro_fault_dem.npz') # module_path = dirname(__file__) # data_file_name = join(module_path, 'data', 'jacksboro_fault_dem.npz') with np.load(stream) as dem: elevation = dem["elevation"] return elevation def load_scan_image(): """ Load image of a medical scan :return: """ # module_path = dirname(__file__) # data_file_name = join(module_path, 'data', 's1045.ima.gz') stream = resource_stream(__name__, 'data/s1045.ima.gz') with gzip.open(stream) as dfile: scan_im = np.frombuffer(dfile.read(), np.uint16).reshape((256, 256)) return scan_im def load_pic(name="grmhd"): if not isinstance(name, str): TypeError("name should be a string") module_path = dirname(__file__) im_path = resource_filename(__name__, 'data/grmhd.png') # pic_path = join(module_path, 'data', 'grmhd.png') if name == "grmhd": im_path = resource_filename(__name__, 'data/grmhd.png') elif name == "vortex": im_path = resource_filename(__name__, 'data/vortex.jpg') elif name == "tng": im_path = resource_filename(__name__, 'data/tng.jpg') else: warnings.warn("Using a default image, name should be in ['grmhd', 'vortex', 'tng']") img = mpimg.imread(im_path) return img[:, :, 0]
nilq/baby-python
python
import textwrap import uuid from multiprocessing import Pool from pprint import pprint import oyaml as yaml from cloudmesh.common.DateTime import DateTime from cloudmesh.common.console import Console from cloudmesh.configuration.Config import Config from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate from cloudmesh.storage.Provider import Provider class StorageQueue: """ This class specifies a storage object queue, that allows the queuing of files to be copied between services. The queue has a maximal parallelism that can be set to execute the copy in multiple threads. Please note that actions only add modify the queue in the db, however, the run command executes them one by one. It will be up to thes method to quarantee order. For example, in case of a recursive copy it wwould make sens to create directories first. """ """ DB object cm: id: uuid collection: storage-queue-{source}-{destination} ... action: copy source: the/source/path destination: the/destination/path created: date status: Actions can be for example copy mkdir delete cancel cancel has a specific action allowing all jobs that have not yet been finished to be canceled. Each file can be in the state: completed, waiting, inprogress, canceled here is an example for the status of the queue. { "length": 100, "completed": 10, "waiting": 80, "inprogress": 10, "canceled": 0 } """ def __init__(self, source, destination, name="local", parallelism=4): """ :param name: The name of the queue (used as a collection in mongodb) :param source: The name of the service in cloudmesh.data from which to copy :param destination: The name of the service in cloudmesh.data from which to copy :param parallelism: The number of parallel threads """ self.source = source self.destination = destination self.parallelism = parallelism config = Config() self.source_spec = config[f"cloudmesh.storage.{source}"] self.destination_spec = config[f"cloudmesh.storage.{destination}"] self.provider_source = Provider(service=source) self.provider_destination = Provider(service=destination) self.name = name self.collection = f"storage-queue-{name}-{source}-{destination}" self.number = 0 # # TODO: create collection in mongodb # Console.ok(f"Collection: {self.name}") def _copy_file(self, sourcefile, destinationfile): """ adds a copy action to the queue copies the file from the source service to the destination service using the file located in the path and storing it into the remote. If remote is not specified path is used for it. The copy will not be performed if the files are the same. :param sourcefile: :param destinationfile: :return: """ date = DateTime.now() uuid_str = str(uuid.uuid1()) specification = textwrap.dedent(f""" cm: number: {self.number} name: "{self.source}:{sourcefile}" kind: storage id: {uuid_str} cloud: {self.collection} collection: {self.collection} created: {date} action: copy source: service: {self.source} path: {sourcefile} destination: service: {self.destination} path: {destinationfile} status: waiting """) entries = yaml.load(specification, Loader=yaml.SafeLoader) self.number = self.number + 1 return entries @DatabaseUpdate() def copy_file(self, sourcefile, destinationfile): """ adds a copy action to the queue copies the file from the source service to the destination service using the file located in the path and storing it into the remote. If remote is not specified path is used for it. The copy will not be performed if the files are the same. :param sourcefile: :param destinationfile: :return: """ self._copy_file(sourcefile, destinationfile) @DatabaseUpdate() def copy_tree(self, sourcetree, destinationtree): """ adds a tree to be copied to the queue it will recursively add all files within the tree :param sourcetree: :param destinationtree: :return: """ # goes recursively through the dree and adds_the file sources = self.provider_source.list(sourcetree, recursive=True) files = [] dirs = [] for source in sources: if bool(source['file']): files.append(source) else: dirs.append((source)) # create dirs first actions = [] for file in dirs: location = file["cm"]["location"] actions.append(self.mkdir(self.destination, location)) # now copy files for file in files: location = file["cm"]["location"] actions.append(self._copy_file(location, location)) return actions def sync(self, sourcetree, destinationtree): """ just a more convenient name for copy_tree :param sourcetree: :param destinationtree: :return: """ self.copy_tree(sourcetree, destinationtree) def mkdir(self, service, path): """ adds a mkdir action to the queue create the directory in the storage service :param service: service must be either source or destination :param path: :return: """ date = DateTime.now() uuid_str = str(uuid.uuid1()) specification = textwrap.dedent(f""" cm: number: {self.number} name: "{service}:{path}" kind: storage id: {uuid_str} cloud: {self.collection} collection: {self.collection} created: {date} action: mkdir source: service: {service} path: {path} status: waiting """) entries = yaml.load(specification, Loader=yaml.SafeLoader) self.number = self.number + 1 return entries def delete(self, service, path): """ adds a deleta action to the queue :param service: service must be either source or destination :param path: :return: """ date = DateTime.now() uuid_str = str(uuid.uuid1()) specification = textwrap.dedent(f""" cm: number: {self.number} name: "{service}:{path}" kind: storage id: {uuid_str} cloud: {self.collection} collection: {self.collection} created: {date} action: delete source: service: {service} path: {path} status: waiting """) entries = yaml.load(specification, Loader=yaml.SafeLoader) self.number = self.number + 1 return entries def status(self): """ provides that status of the queue { "length": 100, "completed": 10, "waiting": 80, "inprogress": 10 } :return: """ # find all teh values from within the MongoDB raise NotImplementedError def cancel(self, id=None): """ cancels a job with a specific id :param id: :return: """ # if None all are canceled raise NotImplementedError def action(self, specification): """ executes the action identified by the specification. This is used by the run command. :param specification: :return: """ action = specification["action"] if action == "copy": print ("COPY", specification) # update status elif action == "delete": print ("DELETE", specification) # update status elif action == "mkdir": print ("MKDIR", specification) # update status def get_actions(self): cm = CmDatabase() entries = cm.find(cloud=self.collection, kind='storage') mkdir = [] copy = [] for entry in entries: pprint (entry) if entry['action'] == 'mkdir': mkdir.append(entry) elif entry['action'] == 'copy': copy.append(entry) return mkdir, copy def run(self): """ runs the copy process for all jobs in the queue and completes when all actions are completed :return: """ mkdir, copy = self.get_actions() # create directories # p = Pool(self.parallelism) # p.map(self.action, mkdir) # COPY FILES # p = Pool(self.parallelism) # p.map(self.action, copy)
nilq/baby-python
python
import collections.abc import re import numpy as np import pandas as pd import torch import joblib import os from pathlib import Path from loguru import logger from sklearn.model_selection import train_test_split from torch._six import string_classes, int_classes from ivadomed import utils as imed_utils from ivadomed.keywords import SplitDatasetKW, LoaderParamsKW, ROIParamsKW, ContrastParamsKW import nibabel as nib import random __numpy_type_map = { 'float64': torch.DoubleTensor, 'float32': torch.FloatTensor, 'float16': torch.HalfTensor, 'int64': torch.LongTensor, 'int32': torch.IntTensor, 'int16': torch.ShortTensor, 'int8': torch.CharTensor, 'uint8': torch.ByteTensor, } TRANSFORM_PARAMS = ['elastic', 'rotation', 'scale', 'offset', 'crop_params', 'reverse', 'translation', 'gaussian_noise'] # Ordered list of supported file extensions # TODO: Implement support of the following OMETIFF formats (#739): # [".ome.tif", ".ome.tiff", ".ome.tf2", ".ome.tf8", ".ome.btf"] # They are included in the list to avoid a ".ome.tif" or ".ome.tiff" following the ".tif" or ".tiff" pipeline EXT_LST = [".nii", ".nii.gz", ".ome.tif", ".ome.tiff", ".ome.tf2", ".ome.tf8", ".ome.btf", ".tif", ".tiff", ".png", ".jpg", ".jpeg"] def split_dataset(df, split_method, data_testing, random_seed, train_frac=0.8, test_frac=0.1): """Splits dataset into training, validation and testing sets by applying train, test and validation fractions according to the split_method. The "data_testing" parameter can be used to specify the data_type and data_value to include in the testing set, the dataset is then split as not to mix the data_testing between the training/validation set and the testing set. Args: df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata. split_method (str): Used to specify on which metadata to split the dataset (eg. "participant_id", "sample_id", etc.) data_testing (dict): Used to specify data_type and data_value to include in the testing set. random_seed (int): Random seed to ensure reproducible splits. train_frac (float): Between 0 and 1. Represents the train set proportion. test_frac (float): Between 0 and 1. Represents the test set proportion. Returns: list, list, list: Train, validation and test filenames lists. """ # Get data_type and data_value from split parameters # If no data_type is provided, data_type is the same as split_method data_type = data_testing['data_type'] if data_testing['data_type'] else split_method data_value = data_testing['data_value'] if not split_method in df: raise KeyError("No split_method '{}' was not found in metadata".format(split_method)) if not data_type in df: logger.warning("No data_type named '{}' was found in metadata. Not taken into account " "to split the dataset.".format(data_type)) data_type = split_method # Filter dataframe with rows where split_method is not NAN df = df[df[split_method].notna()] # If no data_value list is provided, create a random data_value according to data_type and test_fraction # Split the TEST and remainder set using sklearn function if len(data_value) == 0 and test_frac != 0: data_value = sorted(df[data_type].unique().tolist()) test_frac = test_frac if test_frac >= 1 / len(data_value) else 1 / len(data_value) data_value, _ = train_test_split(data_value, train_size=test_frac, random_state=random_seed) if len(data_value) != 0: for value in data_value: if value not in df[data_type].values: logger.warning("No data_value '{}' was found in '{}'. Not taken into account " "to split the dataset.".format(value, data_type)) X_test = df[df[data_type].isin(data_value)]['filename'].unique().tolist() X_remain = df[~df[data_type].isin(data_value)][split_method].unique().tolist() # List dataset unique values according to split_method # Update train fraction to apply to X_remain data = sorted(df[split_method].unique().tolist()) train_frac_update = train_frac * len(data) / len(X_remain) if ((train_frac_update > (1 - 1 / len(X_remain)) and len(X_remain) < 2) or train_frac_update > 1): raise RuntimeError("{}/{} '{}' remaining for training and validation sets, train_fraction {} is too large, " "validation set would be empty.".format(len(X_remain), len(data), split_method, train_frac)) # Split remainder in TRAIN and VALID sets according to train_frac_update using sklearn function X_train, X_val = train_test_split(X_remain, train_size=train_frac_update, random_state=random_seed) # Print the real train, validation and test fractions after splitting real_train_frac = len(X_train)/len(data) real_valid_frac = len(X_val)/len(data) real_test_frac = 1 - real_train_frac - real_valid_frac logger.warning("After splitting: train, validation and test fractions are respectively {}, {} and {}" " of {}.".format(round(real_train_frac, 3), round(real_valid_frac, 3), round(real_test_frac, 3), split_method)) # Convert train and valid sets from list of "split_method" to list of "filename" X_train = df[df[split_method].isin(X_train)]['filename'].unique().tolist() X_val = df[df[split_method].isin(X_val)]['filename'].unique().tolist() # Make sure that test dataset is unseen during training # (in cases where there are multiple "data_type" for a same "split_method") X_train = list(set(X_train) - set(X_test)) X_val = list(set(X_val) - set(X_test)) return X_train, X_val, X_test def get_new_subject_file_split(df, split_method, data_testing, random_seed, train_frac, test_frac, path_output, balance, subject_selection=None): """Randomly split dataset between training / validation / testing. Randomly split dataset between training / validation / testing\ and save it in path_output + "/split_datasets.joblib". Args: df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata. split_method (str): Used to specify on which metadata to split the dataset (eg. "participant_id", "sample_id", etc.) data_testing (dict): Used to specify the data_type and data_value to include in the testing set. random_seed (int): Random seed. train_frac (float): Training dataset proportion, between 0 and 1. test_frac (float): Testing dataset proportionm between 0 and 1. path_output (str): Output folder. balance (str): Metadata contained in "participants.tsv" file with categorical values. Each category will be evenly distributed in the training, validation and testing datasets. subject_selection (dict): Used to specify a custom subject selection from a dataset. Returns: list, list list: Training, validation and testing filenames lists. """ if subject_selection is not None: # Verify subject_selection format if not (len(subject_selection["metadata"]) == len(subject_selection["n"]) == len(subject_selection["value"])): raise ValueError("All lists in subject_selection parameter should have the same length.") sampled_dfs = [] random.seed(random_seed) for m, n, v in zip(subject_selection["metadata"], subject_selection["n"], subject_selection["value"]): participants = random.sample(df[df[m] == v]['participant_id'].unique().tolist(), n) for participant in participants: sampled_dfs.append(df[df['participant_id'] == participant]) if len(sampled_dfs) != 0: df = pd.concat(sampled_dfs) # If balance, then split the dataframe for each categorical value of the "balance" column if balance: if balance in df.keys(): df_list = [df[df[balance] == k] for k in df[balance][df[balance].notna()].unique().tolist()] else: logger.warning("No column named '{}' was found in 'participants.tsv' file. Not taken into account to split " "the dataset.".format(balance)) df_list = [df] else: df_list = [df] train_lst, valid_lst, test_lst = [], [], [] for df_tmp in df_list: # Split dataset on each section of subjects train_tmp, valid_tmp, test_tmp = split_dataset(df=df_tmp, split_method=split_method, data_testing=data_testing, random_seed=random_seed, train_frac=train_frac, test_frac=test_frac) # Update the dataset lists train_lst += train_tmp valid_lst += valid_tmp test_lst += test_tmp # save the subject distribution split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst} split_path = Path(path_output, "split_datasets.joblib") joblib.dump(split_dct, split_path) return train_lst, valid_lst, test_lst def get_subdatasets_subject_files_list(split_params, df, path_output, subject_selection=None): """Get lists of subject filenames for each sub-dataset between training / validation / testing. Args: split_params (dict): Split parameters, see :doc:`configuration_file` for more details. df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata. path_output (str): Output folder. subject_selection (dict): Used to specify a custom subject selection from a dataset. Returns: list, list list: Training, validation and testing filenames lists. """ if split_params[SplitDatasetKW.FNAME_SPLIT]: # Load subjects lists old_split = joblib.load(split_params[SplitDatasetKW.FNAME_SPLIT]) train_lst, valid_lst, test_lst = old_split['train'], old_split['valid'], old_split['test'] # Backward compatibility for subject_file_lst containing participant_ids instead of filenames df_subjects = df[df['filename'].isin(train_lst)] if df_subjects.empty: df_train = df[df['participant_id'].isin(train_lst)] train_lst = sorted(df_train['filename'].to_list()) df_subjects = df[df['filename'].isin(valid_lst)] if df_subjects.empty: df_valid = df[df['participant_id'].isin(valid_lst)] valid_lst = sorted(df_valid['filename'].to_list()) df_subjects = df[df['filename'].isin(test_lst)] if df_subjects.empty: df_test = df[df['participant_id'].isin(test_lst)] test_lst = sorted(df_test['filename'].to_list()) else: train_lst, valid_lst, test_lst = get_new_subject_file_split(df=df, split_method=split_params[SplitDatasetKW.SPLIT_METHOD], data_testing=split_params[SplitDatasetKW.DATA_TESTING], random_seed=split_params[SplitDatasetKW.RANDOM_SEED], train_frac=split_params[SplitDatasetKW.TRAIN_FRACTION], test_frac=split_params[SplitDatasetKW.TEST_FRACTION], path_output=path_output, balance=split_params[SplitDatasetKW.BALANCE] if SplitDatasetKW.BALANCE in split_params else None, subject_selection=subject_selection) return train_lst, valid_lst, test_lst def imed_collate(batch): """Collates data to create batches Args: batch (dict): Contains input and gt data with their corresponding metadata. Returns: list or dict or str or tensor: Collated data. """ error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if torch.is_tensor(batch[0]): stacked = torch.stack(batch, 0) return stacked elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return __numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], collections.abc.Mapping): return {key: imed_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], collections.abc.Sequence): return [imed_collate(samples) for samples in batch] return batch def filter_roi(roi_data, nb_nonzero_thr): """Filter slices from dataset using ROI data. This function filters slices (roi_data) where the number of non-zero voxels within the ROI slice (e.g. centerline, SC segmentation) is inferior or equal to a given threshold (nb_nonzero_thr). Args: roi_data (nd.array): ROI slice. nb_nonzero_thr (int): Threshold. Returns: bool: True if the slice needs to be filtered, False otherwise. """ # Discard slices with less nonzero voxels than nb_nonzero_thr return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr def orient_img_hwd(data, slice_axis): """Orient a given RAS image to height, width, depth according to slice axis. Args: data (ndarray): RAS oriented data. slice_axis (int): Indicates the axis used for the 2D slice extraction: Sagittal: 0, Coronal: 1, Axial: 2. Returns: ndarray: Array oriented with the following dimensions: (height, width, depth). """ if slice_axis == 0: return data.transpose(2, 1, 0) elif slice_axis == 1: return data.transpose(2, 0, 1) elif slice_axis == 2: return data def orient_img_ras(data, slice_axis): """Orient a given array with dimensions (height, width, depth) to RAS orientation. Args: data (ndarray): Data with following dimensions (Height, Width, Depth). slice_axis (int): Indicates the axis used for the 2D slice extraction: Sagittal: 0, Coronal: 1, Axial: 2. Returns: ndarray: Array oriented in RAS. """ if slice_axis == 0: return data.transpose(2, 1, 0) if len(data.shape) == 3 else data.transpose(0, 3, 2, 1) elif slice_axis == 1: return data.transpose(1, 2, 0) if len(data.shape) == 3 else data.transpose(0, 2, 3, 1) elif slice_axis == 2: return data def orient_shapes_hwd(data, slice_axis): """Swap dimensions according to match the height, width, depth orientation. Args: data (list or tuple): Shape or numbers associated with each image dimension (e.g. image resolution). slice_axis (int): Indicates the axis used for the 2D slice extraction: Sagittal: 0, Coronal: 1, Axial: 2. Returns: ndarray: Reoriented vector. """ if slice_axis == 0: return np.array(data)[[2, 1, 0]] elif slice_axis == 1: return np.array(data)[[2, 0, 1]] elif slice_axis == 2: return np.array(data) def update_metadata(metadata_src_lst, metadata_dest_lst): """Update metadata keys with a reference metadata. A given list of metadata keys will be changed and given the values of the reference metadata. Args: metadata_src_lst (list): List of source metadata used as reference for the destination metadata. metadata_dest_lst (list): List of metadate that needs to be updated. Returns: list: updated metadata list. """ if metadata_src_lst and metadata_dest_lst: if not isinstance(metadata_dest_lst[0], list): # annotation from one rater only metadata_dest_lst[0]._update(metadata_src_lst[0], TRANSFORM_PARAMS) else: # annotations from several raters for idx, _ in enumerate(metadata_dest_lst[0]): metadata_dest_lst[0][idx]._update(metadata_src_lst[0], TRANSFORM_PARAMS) return metadata_dest_lst def reorient_image(arr, slice_axis, nib_ref, nib_ref_canonical): """Reorient an image to match a reference image orientation. It reorients a array to a given orientation and convert it to a nibabel object using the reference nibabel header. Args: arr (ndarray): Input array, array to re orient. slice_axis (int): Indicates the axis used for the 2D slice extraction: Sagittal: 0, Coronal: 1, Axial: 2. nib_ref (nibabel): Reference nibabel object, whose header is used. nib_ref_canonical (nibabel): `nib_ref` that has been reoriented to canonical orientation (RAS). """ # Orient image in RAS according to slice axis arr_ras = orient_img_ras(arr, slice_axis) # https://gitship.com/neuroscience/nibabel/blob/master/nibabel/orientations.py ref_orientation = nib.orientations.io_orientation(nib_ref.affine) ras_orientation = nib.orientations.io_orientation(nib_ref_canonical.affine) # Return the orientation that transforms from ras to ref_orientation trans_orient = nib.orientations.ornt_transform(ras_orientation, ref_orientation) # apply transformation return nib.orientations.apply_orientation(arr_ras, trans_orient) def get_file_extension(filename): """ Get file extension if it is supported Args: filename (str): Path of the file. Returns: str: File extension """ # Find the first match from the list of supported file extensions extension = next((ext for ext in EXT_LST if filename.lower().endswith(ext)), None) return extension def update_filename_to_nifti(filename): """ Update filename extension to 'nii.gz' if not a NifTI file. This function is used to help make non-NifTI files (e.g. PNG/TIF/JPG) compatible with NifTI-only pipelines. The expectation is that a NifTI version of the file has been created alongside the original file, which allows the extension to be cleanly swapped for a `.nii.gz` extension. Args: filename (str): Path of original file. Returns: str: Path of the corresponding NifTI file. """ extension = get_file_extension(filename) if not "nii" in extension: filename = filename.replace(extension, ".nii.gz") return filename def dropout_input(seg_pair): """Applies input-level dropout: zero to all channels minus one will be randomly set to zeros. This function verifies if some channels are already empty. Always at least one input channel will be kept. Args: seg_pair (dict): Batch containing torch tensors (input and gt) and metadata. Return: seg_pair (dict): Batch containing torch tensors (input and gt) and metadata with channel(s) dropped. """ n_channels = seg_pair['input'].size(0) # Verify if the input is multichannel if n_channels > 1: # Verify if some channels are already empty n_unique_values = [len(torch.unique(input_data)) > 1 for input_data in seg_pair['input']] idx_empty = np.where(np.invert(n_unique_values))[0] # Select how many channels will be dropped between 0 and n_channels - 1 (keep at least one input) n_dropped = random.randint(0, n_channels - 1) if n_dropped > len(idx_empty): # Remove empty channel to the number of channels to drop n_dropped = n_dropped - len(idx_empty) # Select which channels will be dropped idx_dropped = [] while len(idx_dropped) != n_dropped: idx = random.randint(0, n_channels - 1) # Don't include the empty channel in the dropped channels if idx not in idx_empty: idx_dropped.append(idx) else: idx_dropped = idx_empty seg_pair['input'][idx_dropped] = torch.zeros_like(seg_pair['input'][idx_dropped]) else: logger.warning("\n Impossible to apply input-level dropout since input is not multi-channel.") return seg_pair
nilq/baby-python
python
""" tunning, featuralization, output formatting """ import numpy as np import time def functionongraph(graphs_, i, key='deg', edge_flag=False): # for graphs_[i], get the key-val distribution components = len(graphs_[i]); lis = [] for j in range(components): g = graphs_[i][j] try: assert (str(type(g)) == "<class 'networkx.classes.graphviews.SubGraph'>") or (str(type(g))) == "<class 'networkx.classes.graph.Graph'>" except AssertionError: if g is None: print('wired case: g is None') return [0] else: print('Unconsidered Cases in function on graph') if edge_flag==False: tmp = [g.nodes[k][key] for k in g.nodes] lis += tmp return lis def hisgram_single_feature(graphs_, n_bin, key='deg', his_norm_flag='yes', edge_flag=False, lowerbound=-1, upperbound=1, cdf_flag=False, uniform_flag = True): import numpy as np n = len(graphs_) feature_vec = np.zeros((n, n_bin)) for i in range(n): lis = functionongraph(graphs_, i, key, edge_flag=edge_flag) if lis == []: feature_vec[i] = 0 feature_vec[i] = hisgram(lis, n_bin, his_norm_flag=his_norm_flag, lowerbound=lowerbound, upperbound=upperbound, cdf_flag=cdf_flag, uniform_flag=uniform_flag) return feature_vec def hisgram(lis, n_bin=100, his_norm_flag='yes', lowerbound=-1, upperbound=1, cdf_flag=False, uniform_flag=True): if lis == []: print ('lis is empty') return [0]*n_bin # normalize lis # needs to be more rigirous # TODO: test if it helps to normalize lis if his_norm_flag == 'yes': try: assert max(lis) < 1.1 # * 100000 # delelte 100 later except AssertionError: print ('The max of list is %s' %max(lis)), assert min(lis) > -1.1 max_ = max(lis) # if max_ !=0: # lis = [i/float(max_) for i in lis] if not uniform_flag: assert lowerbound + 1e-3 > 0 n_bin_ = np.logspace(np.log(lowerbound + 1e-3), np.log(upperbound),n_bin+1, base = np.e) else: n_bin_ = n_bin if cdf_flag == True: from statsmodels.distributions.empirical_distribution import ECDF ecdf = ECDF(lis) if uniform_flag: return ecdf([i / np.float(n_bin) for i in range(0, n_bin)]) else: return ecdf([i / np.float(n_bin) for i in range(0, n_bin)]) result = np.histogram(lis, bins=n_bin_, range=(lowerbound,upperbound)) return result[0] def remove_zero_col(data, cor_flag=False): # data = np.zeros((2,10)) # data[1,3] = data[1,5] = data[1,7] = 1 n_col = np.shape(data)[1] del_col_idx = np.where(~data.any(axis=0))[0] remain_col_idx = set(range(n_col)) - set(del_col_idx) correspondence_dict = dict(zip(range(len(remain_col_idx)), remain_col_idx)) inverse_correspondence_dict = dict(zip(remain_col_idx, range(len(remain_col_idx)))) X = np.delete(data, np.where(~data.any(axis=0))[0], axis=1) print('the shape after removing zero columns is ', np.shape(X)) if cor_flag == True: return (X, correspondence_dict, inverse_correspondence_dict) else: return X def merge_features(graph, graphs_, allowed, n_bin=30, his_norm_flag='yes', edge_flag=False, cdf_flag=False, uniform_flag = True): print('Number of bins are %s'%n_bin) n = len(graphs_) X = np.zeros((n, 1)) for key in allowed: # print(key) if (key=='label') : if graph == 'dd_test': nbin = 90 else: nbin = 40 tmp = hisgram_single_feature(graphs_, nbin, 'label', his_norm_flag=his_norm_flag, edge_flag=edge_flag, lowerbound=0, upperbound=1, cdf_flag=cdf_flag, uniform_flag=uniform_flag) elif key == 'ricciCurvature': # use default bound for ricci curvature tmp = hisgram_single_feature(graphs_, n_bin, key, his_norm_flag=his_norm_flag, edge_flag=edge_flag, cdf_flag=cdf_flag, uniform_flag=uniform_flag) else: tmp = hisgram_single_feature(graphs_, n_bin, key, his_norm_flag=his_norm_flag, edge_flag=edge_flag, cdf_flag=cdf_flag, uniform_flag=uniform_flag, lowerbound=0) X = np.append(X, tmp, axis=1) return remove_zero_col(X[:,1:])
nilq/baby-python
python
import discord from discord.ext import commands from discord.ext.commands.cooldowns import BucketType from .utils.video import run_command, video_size, has_audio from .utils.message import get_nearest, get_msg_video, get_msg_image, get_msg_video_or_img import tempfile import os import io from PIL import Image import random class FFmpegError(Exception): def __init__(self, process): self.ret = process.ret self.error = process.err.decode('utf-8') class Video(commands.Cog): __slots__ = 'bot', def __init__(self, bot): self.bot: commands.Bot = bot async def basic_ffmpeg_command(self, ctx: commands.Context, ffmpeg_func, *args, filename='video.mp4', lookup=get_msg_video): msg = await ctx.send('Looking for video...') video = await get_nearest(ctx, lookup=lookup) if video: try: await msg.edit(content='Rendering video...') video = await self.bot.loop.run_in_executor(None, ffmpeg_func, video, *args) video = io.BytesIO(video) await msg.edit(content='Uploading video...') await ctx.send(file=discord.File(video, filename=filename)) await msg.delete() except FFmpegError as error: await msg.edit(content=f'FFmpeg error:\n```\n{error.error[:1500]}```') self.bot.logger.error(error.error) else: await msg.edit(content='No video found') @staticmethod def _sound_ffmpeg(media, media_type: str, sound: str): ext = 'webm' if media_type == 'video' else 'mp4' with tempfile.TemporaryDirectory() as folder: outpath = os.path.join(folder, 'out.' + ext) # enums?? what are those if media_type == 'image': inpath = os.path.join(folder, 'input.png') Image.open(io.BytesIO(media)).convert('RGB').save(inpath) cmd = [ 'ffmpeg', '-i', sound, '-loop', '1', '-i', inpath, '-shortest', '-pix_fmt', 'yuv420p', '-filter_complex', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', '-c:v', 'mpeg4', '-f', 'mp4', outpath ] elif media_type == 'video': inpath = os.path.join(folder, 'input') with open(inpath, 'wb') as file: file.write(media) cmd = [ 'ffmpeg', '-v', 'error', '-i', inpath, '-i', sound, '-map', '0:v', '-map', '1:a', '-shortest', '-f', 'webm', outpath ] else: # ??? raise Exception(f'What {media_type!r}') process = run_command(cmd) if process.ret: raise FFmpegError(process) with open(outpath, 'rb') as file: data = file.read() return (data, ext) async def sound_ffmpeg_command(self, ctx: commands.Context, sound: str, filename: str='video'): msg = await ctx.send('Looking for media...') media = await get_nearest(ctx, lookup=get_msg_video_or_img) if media: try: await msg.edit(content='Rendering video...') video, ext = await self.bot.loop.run_in_executor(None, self._sound_ffmpeg, media[0], media[1], sound) video = io.BytesIO(video) await msg.edit(content='Uploading video...') await ctx.send(file=discord.File(video, filename=filename + f'.{ext}')) await msg.delete() except FFmpegError as error: await msg.edit(content=f'FFmpeg error:\n```\n{error.error[:500]}```') self.bot.logger.error(error.error) else: await msg.edit(content='No media found') def how_ffmpeg(self, video) -> bytes: with tempfile.TemporaryDirectory() as folder: inpath = os.path.join(folder, 'input') with open(inpath, 'wb') as file: file.write(video) outpath = os.path.join(folder, 'out.mp4') cmd = [ 'ffmpeg', '-i', inpath, '-i', 'assets/how.jpg', '-c:v', 'h264', '-c:a', 'copy', '-filter_complex', '[0]scale=height=529:width=544[scaled];[1][scaled]overlay=88:0[out]', '-map', '0:a?', '-map', '[out]', '-f', 'mp4', outpath, '-hide_banner', '-v', 'error' ] process = run_command(cmd) if process.ret: raise FFmpegError(process) with open(outpath, 'rb') as file: data = file.read() return data @commands.command(aliases=['howvideo']) @commands.cooldown(2, 20, BucketType.default) async def howv(self, ctx): """ HOW (video) looks for recent video and runs command on it """ return await self.basic_ffmpeg_command(ctx, self.how_ffmpeg, filename='HOW.mp4') def keem_ffmpeg(self, video) -> bytes: # hard coded as to not do an unecessary ffprobe command everytime keem_length = 9.985 keem_size = (118, 94) with tempfile.TemporaryDirectory() as folder: inpath = os.path.join(folder, 'input') with open(inpath, 'wb') as file: file.write(video) outpath = os.path.join(folder, 'out.mp4') size = video_size(inpath) if size[0] < size[1]: w = size[0] // 3 h = (keem_size[1] * w) // keem_size[0] else: h = size[1] // 3 w = (keem_size[0] * h) // keem_size[1] cmd = [ 'ffmpeg', '-i', 'assets/keem.mp4', '-i', inpath, '-c:v', 'h264', '-filter_complex', f'[0]scale=width={w}:height={h}[scaled];[1][scaled]overlay=x=main_w-overlay_w:y=0:eval=init:eof_action=endall[final];[final]pad=ceil(iw/2)*2:ceil(ih/2)*2', '-shortest', '-f', 'mp4', outpath, '-hide_banner', '-v', 'error' ] # only add amix if video has audio # as it would error otherwise if has_audio(inpath): cmd[8] = 'amix=duration=shortest;' + cmd[8] process = run_command(cmd) if process.ret: raise FFmpegError(process) with open(outpath, 'rb') as file: data = file.read() return data @commands.command(aliases=['keemstar', 'keemscream']) @commands.cooldown(2, 20, BucketType.default) async def keem(self, ctx): """ keemstar scream looks for recent video and runs command on it """ return await self.basic_ffmpeg_command(ctx, self.keem_ffmpeg, filename='keem.mp4') def vibrato_ffmpeg(self, video, f) -> bytes: with tempfile.TemporaryDirectory() as folder: inpath = os.path.join(folder, 'input') with open(inpath, 'wb') as file: file.write(video) if not has_audio(inpath): return None outpath = os.path.join(folder, 'out.mp4') cmd = [ 'ffmpeg', '-i', inpath, '-af', f'vibrato={f:.2f}:1,aformat=s16p', '-c:v', 'copy', '-f', 'mp4', outpath, '-hide_banner', '-v', 'error' ] process = run_command(cmd) if process.ret: raise FFmpegError(process) with open(outpath, 'rb') as file: data = file.read() return data @commands.command() @commands.cooldown(2, 20, BucketType.default) async def vibrato(self, ctx, modulation: float=0.5): """ vibrato audio ooOoOooOOOooooOoo looks for recent video and runs command on it """ f = modulation * 16 if f >= 20000 or f <= 0: return await ctx.send(f'Modulation is too big, has to be in range of [0.1 - 1250]') return await self.basic_ffmpeg_command(ctx, self.vibrato_ffmpeg, f, filename='vibrato.mp4') @commands.command(aliases=['cave']) @commands.cooldown(2, 20, BucketType.default) async def cavesounds(self, ctx): """ minecraft cave sound to a picture looks for recent image/video and runs command on it """ return await self.sound_ffmpeg_command(ctx, f'assets/cave/cave{random.randint(0, 7)}.mp3', filename='cave') @commands.command(aliases=['fnaf']) @commands.cooldown(2, 20, BucketType.default) async def fnafsounds(self, ctx, fnaf=None): """ fnaf sound looks for recent image/video and runs command on it `fnaf` can be either set to `1`, `2`, `3`, `4`, `6`, `sl` or `ucn`. defaults to random """ options = ('1', '2', '3', '4', '6', 'sl', 'ucn') if fnaf is None or fnaf not in options: fnaf = random.choice(options) folder = os.path.join('assets/fnaf', fnaf) sounds = os.listdir(folder) sound = os.path.join(folder, random.choice(sounds)) return await self.sound_ffmpeg_command(ctx, sound, filename='fnaf') @commands.command(aliases=['amongus']) @commands.cooldown(2, 20, BucketType.default) async def amongussounds(self, ctx, sfx=None): """among us sound fx on video or img""" options = ('amongus', 'death', 'drip', 'report', 'vent') if sfx not in options: sfx = random.choice(options) return await self.sound_ffmpeg_command(ctx, f'assets/amongus/{sfx}.mp3', filename='amongus') def setup(bot): bot.add_cog(Video(bot))
nilq/baby-python
python
# This file contain all routes of secretary #################################################################### # import #################################################################### from flask_restx import Resource, reqparse # to use Resource, that expose http request method from application.api.secretary.database_functions import * from application.api.secretary.models import * from application.api.database_config import DatabaseConnector #################################################################### # object #################################################################### # instance of the database connection # connection = DatabaseConnector('localhost', 'my_university_db', 'root', '') connection = DatabaseConnector('bvrhcrukmknumkqtieuk-mysql.services.clever-cloud.com', 3306, 'bvrhcrukmknumkqtieuk', 'ud3untakpkengqz5', 'kHbxAB3JuoNygcXdXbH9') #################################################################### # routing #################################################################### # ============================ sede ========================== # @secretary.route('/sede') class HeadOffice(Resource): @secretary.marshal_with(get_head_office_model) def get(self): print(get_all_head_offices(connection.get_connection())) return get_all_head_offices(connection.get_connection()), 250 @secretary.expect(insert_headoffice_model) @secretary.marshal_with(insert_headoffice_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome della sede universitaria') parser.add_argument('orario_apertura', type=int, help='orario apertura della sede universitaria') parser.add_argument('orario_chiusura', type=int, help='orario chiusura della sede universitaria') parser.add_argument('numero_piani', type=int, help='numero piani della sede universitaria') parser.add_argument('cap', type=int, help='cap della sede universitaria') parser.add_argument('via_piazza', type=str, help='cap della sede universitaria') parser.add_argument('civico', type=str, help='civico della sede universitaria') args = parser.parse_args(strict=True) insertHeadOffice(args['nome_sede'], args['orario_apertura'], args['orario_chiusura'], args['numero_piani'], args['cap'], args['via_piazza'], args['civico'], connection.get_connection()) return args, 250 # ============================ aggiungi contatto ========================== # @secretary.route('/contatto_sede') class Contact(Resource): @secretary.expect(insert_contact_model) @secretary.marshal_with(insert_contact_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome della sede universitaria') parser.add_argument('tipo_contatto', type=str, help='tipo contatto della sede universitaria') parser.add_argument('valore_contatto', type=str, help='valore contatto della sede universitaria') args = parser.parse_args(strict=True) insertHeadOfficeContact(args['nome_sede'], args['tipo_contatto'], args['valore_contatto'], connection.get_connection()) return args, 250 # ============================ cancella sede ========================== # @secretary.route('/cancella_sede') class DelHeadOffice(Resource): @secretary.expect(delete_head_office_model) @secretary.marshal_with(delete_head_office_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome della sede universitaria') args = parser.parse_args(strict=True) deleteHeadOffice(args['nome_sede'], connection.get_connection()) return args, 250 # ============================ aula ========================== # @secretary.route('/aula') class Room(Resource): @secretary.expect(insert_room_model) @secretary.marshal_with(insert_room_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome della sede universitaria') parser.add_argument('numero_piano', type=int, help='numero piano dell\' aula universitaria') parser.add_argument('numero_aula', type=int, help='numero aula universitaria') parser.add_argument('capienza', type=int, help='capienza dell\' aula universitaria') args = parser.parse_args(strict=True) insertRoom(args['nome_sede'], args['numero_piano'], args['numero_aula'], args['capienza'], connection.get_connection()) return args, 250 # ============================ cancella aula ========================== # @secretary.route('/cancella_aula') class DelRoom(Resource): @secretary.expect(delete_room_model) @secretary.marshal_with(delete_room_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome della sede universitaria') parser.add_argument('numero_piano', type=int, help='numero piano dell\' aula universitaria') parser.add_argument('numero_aula', type=int, help='numero aula universitaria') args = parser.parse_args(strict=True) deleteRoom(args['nome_sede'], args['numero_piano'], args['numero_aula'], connection.get_connection()) return args, 250 # ============================ corso laurea ========================== # @secretary.route('/corso_laurea') class DegreeCourse(Resource): @secretary.marshal_with(insert_degree_course_model) def get(self): return get_all_degree_courses(connection.get_connection()), 250 @secretary.expect(insert_degree_course_model) @secretary.marshal_with(insert_degree_course_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice corso universitario') parser.add_argument('nome_corso', type=str, help='nome corso universitario') parser.add_argument('durata_corso_laurea', type=int, help='durata corso laurea universitario') args = parser.parse_args(strict=True) insertDegreeCourse(args['codice_corso'], args['nome_corso'], args['durata_corso_laurea'], connection.get_connection()) return args, 250 # ============================ cancella corso laurea ========================== # @secretary.route('/cancella_corso_laurea') class DelDegreeCourse(Resource): @secretary.expect(delete_degree_course_model) @secretary.marshal_with(delete_degree_course_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice corso universitario') args = parser.parse_args(strict=True) deleteDegreeCourse(args['codice_corso'], connection.get_connection()) return args, 250 # ============================ locazione ========================== # @secretary.route('/locazione') class Located(Resource): @secretary.marshal_with(get_all_location_model) def get(self): return get_all_locations(connection.get_connection()), 250 @secretary.expect(insert_location_model) @secretary.marshal_with(insert_location_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome sede universitaria') parser.add_argument('codice_corso', type=str, help='codice corso universitario') args = parser.parse_args(strict=True) insertLocation(args['nome_sede'], args['codice_corso'], connection.get_connection()) return args, 250 # ============================ cancella locazione ========================== # @secretary.route('/cancella_locazione') class DelLocated(Resource): @secretary.expect(insert_location_model) @secretary.marshal_with(insert_location_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('nome_sede', type=str, help='nome sede universitaria') parser.add_argument('codice_corso', type=str, help='codice corso universitario') args = parser.parse_args(strict=True) deleteLocation(args['nome_sede'], args['codice_corso'], connection.get_connection()) return args, 250 # ============================ disciplina ========================== # @secretary.route('/disciplina') class Discipline(Resource): @secretary.marshal_with(get_all_discipline_model) def get(self): return get_all_discipline(connection.get_connection()) @secretary.expect(insert_discipline_model) @secretary.marshal_with(insert_discipline_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice corso universitario') parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria') parser.add_argument('nome_disciplina', type=str, help='nome della disciplina universitaria') parser.add_argument('cfu', type=int, help='numero di cfu della disciplina universitaria') parser.add_argument('semestre', type=int, help='semestre della disciplina universitaria') parser.add_argument('anno', type=int, help='anna della disciplina universitaria') args = parser.parse_args(strict=True) insertDiscipline(args['codice_corso'], args['codice_disciplina'], args['nome_disciplina'], args['cfu'], args['semestre'], args['anno'], connection.get_connection()) return args, 250 # ============================ cancella disciplina ========================== # @secretary.route('/cancella_disciplina') class DelDiscipline(Resource): @secretary.expect(delete_discipline_model) @secretary.marshal_with(delete_discipline_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice corso universitario') parser.add_argument('codice_disciplina', type=str, help='codice disciplina universitaria') args = parser.parse_args(strict=True) deleteDiscipline(args['codice_corso'], args['codice_disciplina'], connection.get_connection()) return args, 250 # ============================ docente ========================== # @secretary.route('/docente') class Professor(Resource): @secretary.marshal_with(get_all_teacher_model) def get(self): # print(get_all_teachers(connection.get_connection())) return get_all_teachers(connection.get_connection()), 250 @secretary.expect(insert_teacher_model) @secretary.marshal_with(insert_teacher_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf del docente') parser.add_argument('nome', type=str, help='nome del docente') parser.add_argument('cognome', type=str, help='cognome del docente') parser.add_argument('data_di_nascita', type=str, help='data di nascita del docente') parser.add_argument('luogo_di_nascita', type=str, help='luogo di nascita del docente') parser.add_argument('cap', type=int, help='cap del docente') parser.add_argument('via_piazza', type=str, help='indirizzo del docente') parser.add_argument('civico', type=str, help='civico del docente') parser.add_argument('matricola_docente', type=str, help='matricola del docente') parser.add_argument('email_docente', type=str, help='email del docente') parser.add_argument('password_docente', type=str, help='password del docente') args = parser.parse_args(strict=True) insertTeacher(args['cf'], args['nome'], args['cognome'], args['data_di_nascita'], args['luogo_di_nascita'], args['cap'], args['via_piazza'], args['civico'], args['matricola_docente'], args['email_docente'], args['password_docente'], connection.get_connection()) return args, 250 # ============================ cancella docente ========================== # @secretary.route('/cancella_docente') class DelProfessor(Resource): @secretary.expect(delete_teacher_model) @secretary.marshal_with(delete_teacher_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf del docente') parser.add_argument('matricola_docente', type=str, help='matricola del docente') args = parser.parse_args(strict=True) deleteTeacher(args['cf'], args['matricola_docente'], connection.get_connection()) return args, 250 # ============================ studente ========================== # @secretary.route('/studente') class Student(Resource): @secretary.marshal_with(get_all_student_model) def get(self): return get_all_students(connection.get_connection()) @secretary.expect(insert_student_model) @secretary.marshal_with(insert_student_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf dello studente') parser.add_argument('nome', type=str, help='nome dello studente') parser.add_argument('cognome', type=str, help='cognome dello studente') parser.add_argument('data_di_nascita', type=str, help='data di nascita dello studente') parser.add_argument('luogo_di_nascita', type=str, help='luogo di nascita dello studente') parser.add_argument('cap', type=str, help='cap dello studente') parser.add_argument('via_piazza', type=str, help='via piazza dello studente') parser.add_argument('civico', type=str, help='civico dello studente') parser.add_argument('matricola_studente', type=str, help='matricola studente dello studente') parser.add_argument('email_studente', type=str, help='email dello studente') parser.add_argument('data_immatricolazione', type=str, help='data immatricolazione dello studente') parser.add_argument('password_studente', type=str, help='password dello studente') parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario') args = parser.parse_args(strict=True) insertStudent(args['cf'], args['nome'], args['cognome'], args['data_di_nascita'], args['luogo_di_nascita'], args['cap'], args['via_piazza'], args['civico'], args['matricola_studente'], args['email_studente'], args['data_immatricolazione'], args['password_studente'], args['codice_corso'], connection.get_connection()) return args, 201 # ============================ cancella studente ========================== # @secretary.route('/cancella_studente') class DelStudent(Resource): @secretary.expect(delete_student_model) @secretary.marshal_with(delete_student_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf dello studente') parser.add_argument('matricola_studente', type=str, help='matricola studente universitario') args = parser.parse_args(strict=True) deleteStudent(args['cf'], args['matricola_studente'], connection.get_connection()) return args, 250 # ============================ insegnamento ========================== # @secretary.route('/insegnamento') class Teaching(Resource): @secretary.marshal_with(get_all_teachings_model) def get(self): return get_all_teachings(connection.get_connection()), 250 @secretary.expect(delete_teach_model) @secretary.marshal_with(delete_teach_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('matricola_docente', type=str, help='matricola del docente') parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario') parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria') args = parser.parse_args(strict=True) insertTeach(args['matricola_docente'], args['codice_corso'], args['codice_disciplina'], connection.get_connection()) return args, 250 # ============================ cancella insegnamento ========================== # @secretary.route('/cancella_insegnamento') class DelTeaching(Resource): @secretary.expect(delete_teach_model) @secretary.marshal_with(delete_teach_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('matricola_docente', type=str, help='matricola del docente') parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario') parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria') args = parser.parse_args(strict=True) deleteTeach(args['matricola_docente'], args['codice_corso'], args['codice_disciplina'], connection.get_connection()) return args, 250 # ================= @secretary.route('/aggiorna_anno_in_corso') class UpdateYearStudent(Resource): @secretary.expect(update_anno_in_corso_studente_model) @secretary.marshal_with(update_anno_in_corso_studente_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('anno_in_corso', type=str, help='anno in corso dello studente') parser.add_argument('matricola_studente', type=str, help='matricola studente universitario') args = parser.parse_args(strict=True) updateAnnoInCorso(args['anno_in_corso'], args['matricola_studente'], connection.get_connection()) return args, 250 # ============================ lavora ========================== # @secretary.route('/lavora') class Lavora(Resource): @secretary.expect(insert_lavora_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice del corso di laurea') parser.add_argument('matricola_docente', type=int, help='matricola del docente') args = parser.parse_args(strict=True) insertLavora(args['codice_corso'], args['matricola_docente'], connection.get_connection()) return args, 250 @secretary.route('/delete_lavora') class DeleteLavora(Resource): @secretary.expect(insert_lavora_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('codice_corso', type=str, help='codice del corso di laurea') parser.add_argument('matricola_docente', type=int, help='matricola del docente') args = parser.parse_args(strict=True) deleteLavora(args['codice_corso'], args['matricola_docente'], connection.get_connection()) return args, 250 # ============================ insert_contatto_persona ========================== # @secretary.route('/insert_contatto_persona') class InsertCOntattoPersona(Resource): @secretary.expect(person_contact_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf') parser.add_argument('tipo_contatto', type=str, help='tipo_contatto') parser.add_argument('valore_contatto', type=str, help='valore_contatto') args = parser.parse_args(strict=True) insertContattoPersona(args['tipo_contatto'], args['valore_contatto'], args['cf'], connection.get_connection()) return args, 250 # ============================ delete_contatto_persona ========================== # @secretary.route('/delete__contatto_persona') class DeleteContattoPersona(Resource): @secretary.expect(person_contact_model) def post(self): # arguments parser = reqparse.RequestParser() parser.add_argument('cf', type=str, help='cf') parser.add_argument('tipo_contatto', type=str, help='tipo_contatto') parser.add_argument('valore_contatto', type=str, help='valore_contatto') args = parser.parse_args(strict=True) deleteContattoPersona(args['cf'], args['tipo_contatto'], args['valore_contatto'], connection.get_connection()) return args, 250
nilq/baby-python
python
from view import View from serialConnection import SerialConnection from PyQt5.QtWidgets import QApplication import time class Controller: def __init__(self, serialConnection, Instructions): self.serialConnection = serialConnection self.Instructions = Instructions self.samples = [] self.times = [] self.stop = False self.stepsPerMM = 0.018 def handleCalibrate(self): self.handleStop() self.serialConnection.sendInstruction(self.Instructions.CALIBRATE) self.stepsPerMM = 1 / (self.serialConnection.readSample() / 100) print("NUMSTEPS ",self.stepsPerMM) def handleScanBetween(self, P1, P2, sampleDuration, stepLength, stepSize): """ Sends the TWOPOS_SCAN instruction along with its associated values. :param: P1 - the first position from the top slider widget :param: P2 - the second position from the bottom slider widget :param: sampleDuration - the value from sampleDuration spinbox indicating how low to sample for at each step. :param: stepLength - the value selected in the stepLength_combobox (Full, Half, or Quarter) """ self.handleStop() self.stop = False self.serialConnection.sendInstruction(self.Instructions.TWOPOS_SCAN) self.serialConnection.sendValue(P1) self.serialConnection.sendValue(P2) sampleDurationWhole = int(float(sampleDuration) * 1000) sampleDurationLower = sampleDurationWhole & 0xFF sampleDurationMiddle = (sampleDurationWhole & 0xFF00) >> 8 sampleDurationUpper = (sampleDurationWhole & 0xFF0000) >> 16 self.serialConnection.sendValue(sampleDurationLower) self.serialConnection.sendValue(sampleDurationMiddle) self.serialConnection.sendValue(sampleDurationUpper) stepLengthWhole = int(float(stepLength) * 1000) stepLengthLower = stepLengthWhole & 0xFF stepLengthUpper = (stepLengthWhole & 0xFF00) >> 8 self.serialConnection.sendValue(stepLengthLower) self.serialConnection.sendValue(stepLengthUpper) if (stepSize == "Full"): self.serialConnection.sendValue(2) elif (stepSize == "Half"): self.serialConnection.sendValue(1) elif (stepSize == "Quarter"): self.serialConnection.sendValue(0) else: self.serialConnection.sendValue(2) while(not self.stop): currentSample = self.serialConnection.readSample() if currentSample == 0xFFFF: break currentTime = self.serialConnection.readTime() print(currentTime) self.samples.append(currentSample) self.times.append(currentTime) QApplication.processEvents() def handleStop(self): """ Sets the stop boolean to true so that we cease reading samples. """ self.stop = True self.serialConnection.sendStopInstruction(self.Instructions.STOP) def handleGoToPoint(self, P1): """ sends the GOTO instruction to move to position 1 """ self.handleStop() self.serialConnection.sendInstruction(self.Instructions.GOTO) self.serialConnection.sendValue(P1) def handleStartSample(self, averageInterval): """ Sends the START_SAMPLE instruction to turn on the ADC clock and wait to receive samples from the sensor through the tiva UART connection. Samples until the stop button is pressed. :param: sampleDuration - [Deprecated] :param: averageInterval - the amount of samples to take to average on. """ self.handleStop() self.stop = False self.serialConnection.sendInstruction(self.Instructions.START_SAMPLE) #self.serialConnection.sendValue(sampleDuration) #self.serialConnection.sendValue(averageInterval) while(not self.stop): currentSample = self.serialConnection.readSample() # print("Current sample", currentSample) if currentSample == 0xFFFF: break self.samples.append(currentSample) self.times.append(20) # print(self.samples) QApplication.processEvents() def handleClearSamples(self): """ Clear the samples list for the controller. [ Need to relink the list on the view. ] """ self.samples = []
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from configurable import Configurable #*************************************************************** class BaseOptimizer(Configurable): """""" #============================================================= def __init__(self, *args, **kwargs): """""" self._global_step = kwargs.pop('global_step', tf.Variable(0., trainable=False)) super(BaseOptimizer, self).__init__(*args, **kwargs) self._accumulators = {} return #============================================================= def minimize(self, loss, name=None): """""" # Error checking var_list = tf.trainable_variables() for x_tm1 in var_list: if not isinstance(x_tm1, tf.Variable): raise TypeError("Argument is not a tf.Variable: %s" % x_tm1) if not var_list: raise ValueError("No variables to optimize") if loss.dtype.base_dtype != tf.float32: raise ValueError('Loss is not float32') # Compute gradients var_refs = [x_tm1._ref() for x_tm1 in var_list] grads = tf.gradients(loss, var_refs, colocate_gradients_with_ops=True, gate_gradients=True, aggregation_method=2) for x_tm1, g_t in zip(var_list, grads): if g_t is not None: if x_tm1.dtype.base_dtype != tf.float32: raise ValueError('%s is not float32' % x_tm1.name) # Apply gradients with tf.control_dependencies(None): self._init_acc(var_list, grads) with tf.name_scope(name, self._name,[]) as name: caches = filter(lambda cache: cache['g_t'] is not None, self._prepare(var_list, grads)) for cache in caches: x_tm1, g_t = cache['x_tm1'], cache['g_t'] with tf.name_scope("update_" + x_tm1.op.name), tf.device(x_tm1.device): if isinstance(g_t, tf.Tensor): cache['g_t'] = tf.select(tf.is_finite(g_t), g_t, tf.zeros_like(g_t)) self._apply_dense(cache) else: cache['g_t'] = tf.select(tf.is_finite(g_t.values), g_t.values, tf.zeros_like(g_t.values)) cache['idxs'] = g_t.indices self._apply_sparse(cache) with tf.control_dependencies([self._finish(caches)]): with tf.device(self.global_step.device): return tf.assign_add(self.global_step, 1, name=name).op #============================================================= def _init_acc(self, var_list, grads): """""" for x_tm1, g_t in zip(var_list, grads): if self.chi > 0: tf.add_to_collection(self.get_accumulator(x_tm1, 'x'), tf.GraphKeys.MOVING_AVERAGE_VARIABLES) shape = self.get_variable_shape(x_tm1) if isinstance(g_t, tf.Tensor): self.get_accumulator(x_tm1, 'x/tm1', []) else: self.get_accumulator(x_tm1, 'x/tm1', [shape[0]]+[1]*(len(shape)-1)) return #============================================================= def _prepare(self, var_list, grads): """""" caches = [] for x_tm1, g_t in zip(var_list, grads): caches.append({'x_tm1': x_tm1, 'g_t': g_t, 'updates': []}) return caches #============================================================= def _apply_dense(self, cache): """""" raise NotImplementedError() #============================================================= def _apply_sparse(self, cache): """""" raise NotImplementedError() #============================================================= @staticmethod def get_variable_shape(x_tm1): return x_tm1.initialized_value().get_shape().as_list() #============================================================= def get_accumulator(self, x_tm1, acc_name, shape=None): """""" if shape is None: shape = self.get_variable_shape(x_tm1) if acc_name not in self._accumulators: self._accumulators[acc_name] = {} accumulator = self._accumulators[acc_name] if x_tm1 not in accumulator: new_name = '%s/%s' % (self.name, acc_name) zeros = tf.zeros(shape, dtype=x_tm1.dtype) with tf.name_scope('%s/%s' % (x_tm1.op.name, new_name)) as scope: with tf.device(x_tm1.device): accumulator[x_tm1] = b_tm1 = tf.Variable(zeros, name=scope, trainable=False) if isinstance(x_tm1, tf.Variable) and x_tm1._save_slice_info: real_acc_name = scope[len(x_tm1.op.name + '/'):-1] slice_info = x_tm1._save_slice_info b_tm1._set_save_slice_info(tf.Variable.SaveSliceInfo( '%s/%s' % (slice_info.full_name, real_slot_name), slice_info.full_shape[:], slice_info.var_offset[:], slice_info.var_shape[:])) return accumulator[x_tm1] #============================================================= def _dense_moving_average(self, x_tm1, a_t, name, beta=.9): """""" b_tm1 = self.get_accumulator(x_tm1, '%s' % name) tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[]) t = tf.assign_add(tm1, 1) if beta < 1: beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name) beta_t = beta_t * (1-beta**tm1) / (1-beta**t) else: beta_t = tm1 / t b_t = tf.assign(b_tm1, beta_t*b_tm1) b_t = tf.assign_add(b_t, (1-beta_t)*a_t) return b_t, t #============================================================= def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9): """""" b_tm1 = self.get_accumulator(x_tm1, '%s' % name) b_tm1_ = tf.gather(b_tm1, idxs) shape = self.get_variable_shape(x_tm1) tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1)) tm1_ = tf.gather(tm1, idxs) t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_)) t_ = tf.gather(t, idxs) if beta < 1: beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name) beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_) else: beta_t_ = tm1_/t_ b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_) b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_) return b_t, t #============================================================= def _finish(self, caches): """""" if self.clip > 0: S_t = [cache['s_t'] for cache in caches] S_t, _ = tf.clip_by_global_norm(S_t, self.clip) for cache, s_t in zip(caches, S_t): cache['s_t'] = s_t for cache in caches: x_tm1 = cache['x_tm1'] s_t = cache['s_t'] updates = cache['updates'] with tf.name_scope('update_' + x_tm1.op.name), tf.device(x_tm1.device): if 'idxs' in cache: idxs = cache['idxs'] x_t = tf.scatter_sub(x_tm1, idxs, s_t) if self.chi > 0: x_t_ = tf.gather(x_t, idxs) x_bar_t, t_x_bar = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi) else: x_t = tf.assign_sub(x_tm1, s_t) if self.chi > 0: x_bar_t, t_x_bar = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi) updates.append(x_t) if self.chi > 0: updates.extend([x_bar_t, t_x_bar]) update_ops = [tf.group(*cache['updates']) for cache in caches] return tf.group(*update_ops, name='update') #============================================================== def average(self, x_tm1): """""" if 'x' in self._accumulators: return x_tm1 #return self._accumulators['x'].get(x_tm1, x_tm1) else: return x_tm1 #============================================================== def average_name(self, x_tm1): """""" return x_tm1.op.name + '/' + self._name + '/' + 'x' #============================================================== def variables_to_restore(self, moving_avg_variables=None): """""" name_map = {} if moving_avg_variables is None: moving_avg_variables = tf.trainable_variables() moving_avg_variables += tf.moving_average_variables() # Remove duplicates moving_avg_variables = set(moving_avg_variables) # Collect all the variables with moving average, for v in moving_avg_variables: name_map[self.average_name(v)] = v # Make sure we restore variables without moving average as well. for v in list(set(tf.all_variables()) - moving_avg_variables): if v.op.name not in name_map: name_map[v.op.name] = v return name_map #=============================================================== @property def learning_rate(self): if self.decay_steps > 0: return super(BaseOptimizer, self).learning_rate * self.decay**(self.global_step / self.decay_steps) else: return super(BaseOptimizer, self).learning_rate @property def global_step(self): return self._global_step @property def accumulators(self): return self._accumulators
nilq/baby-python
python
# %% from sre_constants import error import pandas as pd import openpyxl as pxl from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.action_chains import ActionChains from selenium.common import exceptions as e import time import datetime import re import traceback #! EXTRACTION def call_driver_(url): """ This function instantiates a remotely operated browser. Returns: [WebDriver]: A driver. """ DRIVER_PATH = r'/Users/studocu/Downloads/chromedriver' driver = webdriver.Chrome(DRIVER_PATH) driver.get(url) driver.maximize_window() return driver def load_button_(driver): """ This function clicks load button until the last load. """ LOAD_MORE_XPATH = r'//span[text()="Load More"]' while True: try: load_more = driver.find_element_by_xpath(LOAD_MORE_XPATH) actions = ActionChains(driver) actions.move_to_element(load_more).perform() driver.execute_script('arguments[0].scrollIntoView({behavior: "smooth", block: "center", inline: "center"});', load_more) WebDriverWait(driver, 4).until( EC.element_to_be_clickable((By.XPATH, LOAD_MORE_XPATH))) load_more.click() except: break def get_links_(driver): LINKS_PATH = r'//ul[@class="MuiList-root MuiList-padding"]//a' WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH,LINKS_PATH))) links_web_elem = driver.find_elements_by_xpath(LINKS_PATH) links = [] for link in links_web_elem: links.append(link.get_attribute('href')) return links def pull_association_info_(links, driver): all_rows = [] error_links = [] for i, link in enumerate(links): driver.get(link) try: NAME_XPATH = r'//h1' DESC_XPATH = r'//div[@class="bodyText-large userSupplied"]' ADDRESS_XPATH = r'//span[text()="Address"]/..' EMAIL_XPATH = r'//span[text()="Contact Email"]/..' PHONE_XPATH = r'//span[text()="Phone Number"]/..' XPATH_LINK = r'' INFO_XPATHS = [NAME_XPATH, DESC_XPATH, ADDRESS_XPATH, EMAIL_XPATH, PHONE_XPATH, XPATH_LINK] INFO_NAMES = ['ASSOCIATION NAME','ASSOCIATION DESCRIPTION', 'ASSOCIATION ADDRESS', 'ASSOCIATION EMAIL', 'ASSOCIATION PHONE', 'ASSOCIATION LINK'] WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, INFO_XPATHS[0]))) all_info_row = [] print('PULLING DATA FROM ASSOCIATION ' + str(i) + ' OUT OF ' + str(len(links)) + ' ASSOCIATIONS...') for info_name, info_xpath in zip(INFO_NAMES, INFO_XPATHS): try: if info_xpath != '': info_data_web_elem = driver.find_element_by_xpath(info_xpath) info_data = info_data_web_elem.text if info_name == 'ASSOCIATION NAME': info_data = info_data_web_elem.text.title() # treating if description is empty if info_data == '': all_info_row.append('Null') # treating if address is empty elif info_data == 'Address': all_info_row.append('Null') # treating if email is empty elif info_data == 'Contact Email\nE: ': all_info_row.append('Null') # cleaning email data elif info_data.startswith('Contact Email'): info_data = re.sub('Contact Email\nE: ', '', info_data) all_info_row.append(info_data.lower()) # cleaning phone data elif info_data.startswith('Phone'): info_data = re.sub('Phone Number\nP: ', '', info_data) all_info_row.append(info_data) else: all_info_row.append(info_data) else: all_info_row.append(link) except: all_info_row.append('Null') except: print(e) traceback.print_exc() error_links.append(link) pass all_rows.append(all_info_row) return all_rows, error_links def extract_(url): print('CALLING DRIVER...') driver = call_driver_(url) print('DRIVER CALLED.') print('LOADIND BUTTONS...') load_button_(driver) print('ALL BUTTONS LOADED.') print('PULLING LINKS...') links = get_links_(driver) print('LINKS PULLED.') print('PULLING ASSOCIATION DATA...') all_rows, error_links = pull_association_info_(links, driver) print('ASSOCIATION DATA PULLED') print('CLOSING DRIVER...') driver.close() print('DRIVER CLOSED.') if len(error_links)==0: return all_rows else: if((len(error_links)))>1: print(str(len(error_links)) + ' association sites failed.\n') for link in error_links: print(link) elif((len(error_links)))==1: print('One association link failed: ' + error_links) #! here we could call the function again on the error_links elif ((len(error_links)))==0: print('All associations was scraped.') return all_rows # ! WRANGLING def transform_(all_rows): try: df = pd.DataFrame(all_rows, columns=['Name', 'Descrip', 'Address', 'Email', 'Phone', 'Link']) df = df[['Name', 'Email']] df = df.loc[(df['Name'] != 'Null') & (df['Email'] != 'Null')] print(df) except: print(e) traceback.print_exc() pass return df def load_(file_name, df): """ This function gets a file name and a DataFrame and converts into a excel file, and save it at excel_files folder. Args: file_name (str): the excel file name that it will be created, WITHOUT the extension. df (pd.DataFrame): a DataFrame containing the code (if any) and courses name. """ EXCEL_FILES_PATH = r'/Users/studocu/Downloads' EXTENSION = '.xlsx' PATH_FILE = EXCEL_FILES_PATH + '/' + file_name + EXTENSION df.to_excel(PATH_FILE, index=False, engine='xlsxwriter') def pipeline(url, uniID, uni_name): file_name = uniID + ' ' + uni_name + ' ASSOCIATIONS' file_name = re.sub(' ', '_', file_name) all_rows = extract_(url) df_ = transform_(all_rows) load_(file_name, df_) def scrape_single(url, uniID, uni_name): pipeline(url, uniID, uni_name) def scrape_multiples(): start_time = time.time() EXCEL_PATH = r'/Users/studocu/Desktop/excel_input/input.xlsx' df_ = pd.read_excel(EXCEL_PATH) urls = df_.iloc[:,0] uni_IDS = df_.iloc[:,1] uni_names = df_.iloc[:,2] for i, url in enumerate(urls): uni_ID = str(uni_IDS[i]) uni_name = uni_names[i] print('PULLING DATA FROM: ' + uni_name) pipeline(url,uni_ID, uni_name) total_seconds = time.time() - start_time if total_seconds <= 60: print("EXECUTION TIME: {:.2f} SECONDS".format(total_seconds)) else: print("EXECUTION TIME: {} ".format(datetime.timedelta(seconds=total_seconds))) # %% # %%
nilq/baby-python
python
import unittest from libpysal.examples import load_example import geopandas as gpd import numpy as np from segregation.aspatial import MultiRelativeDiversity class Multi_Relative_Diversity_Tester(unittest.TestCase): def test_Multi_Relative_Diversity(self): s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp")) groups_list = ['WHITE', 'BLACK', 'ASIAN','HISP'] df = s_map[groups_list] index = MultiRelativeDiversity(df, groups_list) np.testing.assert_almost_equal(index.statistic, 0.15820019878220337) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- ### BEGIN LICENSE #Copyright (c) 2009 Eugene Kaznacheev <qetzal@gmail.com> #Copyright (c) 2013 Joshua Tasker <jtasker@gmail.com> #Permission is hereby granted, free of charge, to any person #obtaining a copy of this software and associated documentation #files (the "Software"), to deal in the Software without #restriction, including without limitation the rights to use, #copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the #Software is furnished to do so, subject to the following #conditions: #The above copyright notice and this permission notice shall be #included in all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, #WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #OTHER DEALINGS IN THE SOFTWARE. ### END LICENSE """ Fetches weather reports from Yahoo! Weather, Weather.com and NOAA """ __version__ = "0.3.8" try: # Python 3 imports from urllib.request import urlopen from urllib.parse import quote from urllib.parse import urlencode from urllib.error import URLError # needed for code to work on Python3 xrange = range unicode = str except ImportError: # Python 2 imports from urllib2 import urlopen from urllib import quote from urllib import urlencode from urllib2 import URLError import sys import re from math import pow from xml.dom import minidom import json try: from unidecode import unidecode except ImportError: pass GOOGLE_COUNTRIES_URL = 'http://www.google.com/ig/countries?output=xml&hl=%s' GOOGLE_CITIES_URL = 'http://www.google.com/ig/cities?output=xml&' + \ 'country=%s&hl=%s' YAHOO_WEATHER_URL = 'http://xml.weather.yahoo.com/forecastrss/%s_%s.xml' YAHOO_WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0' NOAA_WEATHER_URL = 'http://www.weather.gov/xml/current_obs/%s.xml' WEATHER_COM_URL = 'http://wxdata.weather.com/wxdata/weather/local/%s?' + \ 'unit=%s&dayf=5&cc=*' LOCID_SEARCH_URL = 'http://wxdata.weather.com/wxdata/search/search?where=%s' WOEID_SEARCH_URL = 'http://query.yahooapis.com/v1/public/yql' WOEID_QUERY_STRING = 'select line1, line2, line3, line4, ' + \ 'woeid from geo.placefinder where text="%s"' #WXUG_BASE_URL = 'http://api.wunderground.com/auto/wui/geo' #WXUG_FORECAST_URL = WXUG_BASE_URL + '/ForecastXML/index.xml?query=%s' #WXUG_CURRENT_URL = WXUG_BASE_URL + '/WXCurrentObXML/index.xml?query=%s' #WXUG_GEOLOOKUP_URL = WXUG_BASE_URL + '/GeoLookupXML/index.xml?query=%s' #WXUG_ALERTS_URL = WXUG_BASE_URL + '/AlertsXML/index.xml?query=%s' class WindUnits: """Class for available wind unit systems""" MPS = 1 MPH = 2 BEAUFORT = 3 KPH = 4 KNOTS = 5 def get_weather_from_weather_com(location_id, units = 'metric'): """Fetches weather report from Weather.com Parameters: location_id: A five digit US zip code or location ID. To find your location ID, use function get_loc_id_from_weather_com(). units: type of units. 'metric' for metric and 'imperial' for non-metric. Note that choosing metric units changes all the weather units to metric. For example, wind speed will be reported as kilometers per hour and barometric pressure as millibars. Returns: weather_data: a dictionary of weather data that exists in XML feed. """ location_id = quote(location_id) if units == 'metric': unit = 'm' elif units == 'imperial' or units == '': # for backwards compatibility unit = '' else: unit = 'm' # fallback to metric url = WEATHER_COM_URL % (location_id, unit) try: handler = urlopen(url) except URLError: return {'error': 'Could not connect to Weather.com'} if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() try: weather_dom = dom.getElementsByTagName('weather')[0] except IndexError: error_data = {'error': dom.getElementsByTagName('error')[ 0].getElementsByTagName('err')[0].firstChild.data} dom.unlink() return error_data key_map = {'head':'units', 'ut':'temperature', 'ud':'distance', 'us':'speed', 'up':'pressure', 'ur':'rainfall', 'loc':'location', 'dnam':'name', 'lat':'lat', 'lon':'lon', 'cc':'current_conditions', 'lsup':'last_updated', 'obst':'station', 'tmp':'temperature', 'flik':'feels_like', 't':'text', 'icon':'icon', 'bar':'barometer', 'r':'reading', 'd':'direction', 'wind':'wind', 's':'speed', 'gust':'gust', 'hmid':'humidity', 'vis':'visibility', 'uv':'uv', 'i':'index', 'dewp':'dewpoint', 'moon':'moon_phase', 'hi':'high', 'low':'low', 'sunr':'sunrise', 'suns':'sunset', 'bt':'brief_text', 'ppcp':'chance_precip'} data_structure = {'head': ('ut', 'ud', 'us', 'up', 'ur'), 'loc': ('dnam', 'lat', 'lon'), 'cc': ('lsup', 'obst', 'tmp', 'flik', 't', 'icon', 'hmid', 'vis', 'dewp')} cc_structure = {'bar': ('r','d'), 'wind': ('s','gust','d','t'), 'uv': ('i','t'), 'moon': ('icon','t')} # sanity check, skip missing items try: for (tag, list_of_tags2) in data_structure.items(): for tag2 in list_of_tags2: if weather_dom.getElementsByTagName(tag)[0].childNodes.length == 0: data_structure[tag] = [] except IndexError: error_data = {'error': 'Error parsing Weather.com response. Full response: %s' % xml_response} return error_data try: weather_data = {} for (tag, list_of_tags2) in data_structure.items(): key = key_map[tag] weather_data[key] = {} for tag2 in list_of_tags2: key2 = key_map[tag2] try: weather_data[key][key2] = weather_dom.getElementsByTagName( tag)[0].getElementsByTagName(tag2)[0].firstChild.data except AttributeError: # current tag has empty value weather_data[key][key2] = unicode('') except IndexError: error_data = {'error': 'Error parsing Weather.com response. Full response: %s' % xml_response} return error_data if weather_dom.getElementsByTagName('cc')[0].childNodes.length > 0: cc_dom = weather_dom.getElementsByTagName('cc')[0] for (tag, list_of_tags2) in cc_structure.items(): key = key_map[tag] weather_data['current_conditions'][key] = {} for tag2 in list_of_tags2: key2 = key_map[tag2] try: weather_data['current_conditions'][key][key2] = cc_dom.getElementsByTagName( tag)[0].getElementsByTagName(tag2)[0].firstChild.data except AttributeError: # current tag has empty value weather_data['current_conditions'][key][key2] = unicode('') forecasts = [] if len(weather_dom.getElementsByTagName('dayf')) > 0: time_of_day_map = {'d':'day', 'n':'night'} for forecast in weather_dom.getElementsByTagName('dayf')[0].getElementsByTagName('day'): tmp_forecast = {} tmp_forecast['day_of_week'] = forecast.getAttribute('t') tmp_forecast['date'] = forecast.getAttribute('dt') for tag in ('hi', 'low', 'sunr', 'suns'): key = key_map[tag] try: tmp_forecast[key] = forecast.getElementsByTagName( tag)[0].firstChild.data except AttributeError: # if nighttime on current day, key 'hi' is empty tmp_forecast[key] = unicode('') for part in forecast.getElementsByTagName('part'): time_of_day = time_of_day_map[part.getAttribute('p')] tmp_forecast[time_of_day] = {} for tag2 in ('icon', 't', 'bt', 'ppcp', 'hmid'): key2 = key_map[tag2] try: tmp_forecast[time_of_day][ key2] = part.getElementsByTagName(tag2)[0].firstChild.data except AttributeError: # if nighttime on current day, keys 'icon' and 't' are empty tmp_forecast[time_of_day][key2] = unicode('') tmp_forecast[time_of_day]['wind'] = {} for tag2 in ('s', 'gust', 'd', 't'): key2 = key_map[tag2] tmp_forecast[time_of_day]['wind'][key2] = part.getElementsByTagName( 'wind')[0].getElementsByTagName(tag2)[0].firstChild.data forecasts.append(tmp_forecast) weather_data['forecasts'] = forecasts dom.unlink() return weather_data def get_weather_from_google(location_id, hl = ''): """Fetches weather report from Google. No longer functional, since Google discontinued their Weather API as of Sep 2012. Method retained for backwards compatibility. Returns: weather_data: a dictionary containing only the key 'error' """ weather_data = {'error': 'The Google Weather API has been ' + \ 'discontinued as of September 2012.'} return weather_data def get_countries_from_google(hl = ''): """Get list of countries in specified language from Google Parameters: hl: the language parameter (language code). Default value is empty string, in this case Google will use English. Returns: countries: a list of elements(all countries that exists in XML feed). Each element is a dictionary with 'name' and 'iso_code' keys. For example: [{'iso_code': 'US', 'name': 'USA'}, {'iso_code': 'FR', 'name': 'France'}] """ url = GOOGLE_COUNTRIES_URL % hl try: handler = urlopen(url) except URLError: return [{'error':'Could not connect to Google'}] if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() countries = [] countries_dom = dom.getElementsByTagName('country') for country_dom in countries_dom: country = {} country['name'] = country_dom.getElementsByTagName( 'name')[0].getAttribute('data') country['iso_code'] = country_dom.getElementsByTagName( 'iso_code')[0].getAttribute('data') countries.append(country) dom.unlink() return countries def get_cities_from_google(country_code, hl = ''): """Get list of cities of necessary country in specified language from Google Parameters: country_code: code of the necessary country. For example 'de' or 'fr'. hl: the language parameter (language code). Default value is empty string, in this case Google will use English. Returns: cities: a list of elements(all cities that exists in XML feed). Each element is a dictionary with 'name', 'latitude_e6' and 'longitude_e6' keys. For example: [{'longitude_e6': '1750000', 'name': 'Bourges', 'latitude_e6': '47979999'}] """ url = GOOGLE_CITIES_URL % (country_code.lower(), hl) try: handler = urlopen(url) except URLError: return [{'error':'Could not connect to Google'}] if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() cities = [] cities_dom = dom.getElementsByTagName('city') for city_dom in cities_dom: city = {} city['name'] = city_dom.getElementsByTagName( 'name')[0].getAttribute('data') city['latitude_e6'] = city_dom.getElementsByTagName( 'latitude_e6')[0].getAttribute('data') city['longitude_e6'] = city_dom.getElementsByTagName( 'longitude_e6')[0].getAttribute('data') cities.append(city) dom.unlink() return cities def get_weather_from_yahoo(location_id, units = 'metric'): """Fetches weather report from Yahoo! Weather Parameters: location_id: A five digit US zip code or location ID. To find your location ID, use function get_location_ids(). units: type of units. 'metric' for metric and 'imperial' for non-metric. Note that choosing metric units changes all the weather units to metric. For example, wind speed will be reported as kilometers per hour and barometric pressure as millibars. Returns: weather_data: a dictionary of weather data that exists in XML feed. See http://developer.yahoo.com/weather/#channel """ location_id = quote(location_id) if units == 'metric': unit = 'c' elif units == 'imperial' or units == '': # for backwards compatibility unit = 'f' else: unit = 'c' # fallback to metric url = YAHOO_WEATHER_URL % (location_id, unit) try: handler = urlopen(url) except URLError: return {'error': 'Could not connect to Yahoo! Weather'} if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() weather_data = {} try: weather_data['title'] = dom.getElementsByTagName( 'title')[0].firstChild.data weather_data['link'] = dom.getElementsByTagName( 'link')[0].firstChild.data except IndexError: error_data = {'error': dom.getElementsByTagName('item')[ 0].getElementsByTagName('title')[0].firstChild.data} dom.unlink() return error_data ns_data_structure = { 'location': ('city', 'region', 'country'), 'units': ('temperature', 'distance', 'pressure', 'speed'), 'wind': ('chill', 'direction', 'speed'), 'atmosphere': ('humidity', 'visibility', 'pressure', 'rising'), 'astronomy': ('sunrise', 'sunset'), 'condition': ('text', 'code', 'temp', 'date') } for (tag, attrs) in ns_data_structure.items(): weather_data[tag] = xml_get_ns_yahoo_tag( dom, YAHOO_WEATHER_NS, tag, attrs ) weather_data['geo'] = {} try: weather_data['geo']['lat'] = dom.getElementsByTagName( 'geo:lat')[0].firstChild.data weather_data['geo']['long'] = dom.getElementsByTagName( 'geo:long')[0].firstChild.data except AttributeError: weather_data['geo']['lat'] = unicode() weather_data['geo']['long'] = unicode() weather_data['condition']['title'] = dom.getElementsByTagName( 'item')[0].getElementsByTagName('title')[0].firstChild.data weather_data['html_description'] = dom.getElementsByTagName( 'item')[0].getElementsByTagName('description')[0].firstChild.data forecasts = [] for forecast in dom.getElementsByTagNameNS(YAHOO_WEATHER_NS, 'forecast'): forecasts.append(xml_get_attrs(forecast,('day', 'date', 'low', 'high', 'text', 'code'))) weather_data['forecasts'] = forecasts dom.unlink() return weather_data def get_everything_from_yahoo(country_code, cities): """Get all weather data from yahoo for a specific country. Parameters: country_code: A four letter code of the necessary country. For example 'GMXX' or 'FRXX'. cities: The maximum number of cities for which to get data. Returns: weather_reports: A dictionary containing weather data for each city. """ city_codes = yield_all_country_city_codes_yahoo(country_code, cities) weather_reports = {} for city_c in city_codes: weather_data = get_weather_from_yahoo(city_c) if ('error' in weather_data): return weather_data city = weather_data['location']['city'] weather_reports[city] = weather_data return weather_reports def yield_all_country_city_codes_yahoo(country_code, cities): """Yield all cities codes for a specific country. Parameters: country_code: A four letter code of the necessary country. For example 'GMXX' or 'FRXX'. cities: The maximum number of cities to yield. Returns: country_city_codes: A generator containing the city codes. """ # cities stands for the number of available cities for i in range(1, cities + 1): yield ''.join([country_code, (4 - len(str(i))) * '0', str(i)]) def get_weather_from_noaa(station_id): """Fetches weather report from NOAA: National Oceanic and Atmospheric Administration (United States) Parameter: station_id: the ID of the weather station near the desired location To find your station ID, perform the following steps: 1. Open this URL: http://www.weather.gov/xml/current_obs/seek.php?state=az&Find=Find 2. Select the necessary state state. Click 'Find'. 3. Find the necessary station in the 'Observation Location' column. 4. The station ID is in the URL for the weather page for that station. For example if the weather page is http://weather.noaa.gov/weather/current/KPEO.html -- the station ID is KPEO. Another way to get the station ID: use the 'Weather.location2station' function of this library: http://code.google.com/p/python-weather/ Returns: weather_data: a dictionary of weather data that exists in XML feed. ( useful icons: http://www.weather.gov/xml/current_obs/weather.php ) """ station_id = quote(station_id) url = NOAA_WEATHER_URL % (station_id) try: handler = urlopen(url) except URLError: return {'error': 'Could not connect to NOAA'} if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() data_structure = ('suggested_pickup', 'suggested_pickup_period', 'location', 'station_id', 'latitude', 'longitude', 'observation_time', 'observation_time_rfc822', 'weather', 'temperature_string', 'temp_f', 'temp_c', 'relative_humidity', 'wind_string', 'wind_dir', 'wind_degrees', 'wind_mph', 'wind_gust_mph', 'pressure_string', 'pressure_mb', 'pressure_in', 'dewpoint_string', 'dewpoint_f', 'dewpoint_c', 'heat_index_string', 'heat_index_f', 'heat_index_c', 'windchill_string', 'windchill_f', 'windchill_c', 'icon_url_base', 'icon_url_name', 'two_day_history_url', 'ob_url' ) weather_data = {} current_observation = dom.getElementsByTagName('current_observation')[0] for tag in data_structure: try: weather_data[tag] = current_observation.getElementsByTagName( tag)[0].firstChild.data except IndexError: pass dom.unlink() return weather_data def xml_get_ns_yahoo_tag(dom, ns, tag, attrs): """Parses the necessary tag and returns the dictionary with values Parameters: dom: DOM ns: namespace tag: necessary tag attrs: tuple of attributes Returns: a dictionary of elements """ element = dom.getElementsByTagNameNS(ns, tag)[0] return xml_get_attrs(element,attrs) def xml_get_attrs(xml_element, attrs): """Returns the list of necessary attributes Parameters: element: xml element attrs: tuple of attributes Returns: a dictionary of elements """ result = {} for attr in attrs: result[attr] = xml_element.getAttribute(attr) return result def wind_direction(degrees): """ Convert wind degrees to direction """ try: degrees = int(degrees) except ValueError: return '' if degrees < 23 or degrees >= 338: return 'N' elif degrees < 68: return 'NE' elif degrees < 113: return 'E' elif degrees < 158: return 'SE' elif degrees < 203: return 'S' elif degrees < 248: return 'SW' elif degrees < 293: return 'W' elif degrees < 338: return 'NW' def wind_beaufort_scale(value, wind_units = WindUnits.KPH): """Convert wind speed value to Beaufort number (0-12) The Beaufort wind force scale is an empirical measure that relates wind speed to observed conditions at sea or on land. Parameters: value: wind speed value to convert wind_units: unit system of value, defaults to km/h Returns: a string containing the Beaufort number from 0 to 12 """ if wind_units == WindUnits.BEAUFORT: return str(value) try: value = float(value) except ValueError: return '' if value < 0.0: return '' if wind_units == WindUnits.KPH: if value < 1: # Calm return '0' elif value <= 5.5: # Light air return '1' elif value <= 11: # Light breeze return '2' elif value <= 19: # Gentle breeze return '3' elif value <= 28: # Moderate breeze return '4' elif value <= 38: # Fresh breeze return '5' elif value <= 49: # Strong breeze return '6' elif value <= 61: # High wind, moderate gale, near gale return '7' elif value <= 74: # Gale, fresh gale return '8' elif value <= 88: # Strong gale return '9' elif value <= 102: # Storm, whole gale return '10' elif value <= 117: # Violent storm return '11' else: # Hurricane return '12' if wind_units == WindUnits.MPH: if value < 1: return '0' elif value <= 3: return '1' elif value <= 7: return '2' elif value <= 12: return '3' elif value <= 17: return '4' elif value <= 24: return '5' elif value <= 30: return '6' elif value <= 38: return '7' elif value <= 46: return '8' elif value <= 54: return '9' elif value <= 63: return '10' elif value <= 73: return '11' else: return '12' if wind_units == WindUnits.MPS: if value < 0.3: return '0' elif value <= 1.5: return '1' elif value <= 3.4: return '2' elif value <= 5.4: return '3' elif value <= 7.9: return '4' elif value <= 10.7: return '5' elif value <= 13.8: return '6' elif value <= 17.1: return '7' elif value <= 20.7: return '8' elif value <= 24.4: return '9' elif value <= 28.4: return '10' elif value <= 32.6: return '11' else: return '12' if wind_units == WindUnits.KNOTS: if value < 1: return '0' if value <= 3: return '1' if value <= 6: return '2' if value <= 10: return '3' if value <= 16: return '4' if value <= 21: return '5' if value <= 27: return '6' if value <= 33: return '7' if value <= 40: return '8' if value <= 47: return '9' if value <= 55: return '10' if value <= 63: return '11' else: return '12' def get_wind_direction(degrees): """ Same as wind_direction """ return wind_direction(degrees) def getText(nodelist): rc = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc def get_location_ids(search_string): """Get location IDs for place names matching a specified string. Same as get_loc_id_from_weather_com() but different return format. Parameters: search_string: Plaintext string to match to available place names. For example, a search for 'Los Angeles' will return matches for the city of that name in California, Chile, Cuba, Nicaragua, etc as well as 'East Los Angeles, CA', 'Lake Los Angeles, CA', etc. Returns: location_ids: A dictionary containing place names keyed to location ID """ loc_id_data = get_loc_id_from_weather_com(search_string) if 'error' in loc_id_data: return loc_id_data location_ids = {} for i in xrange(loc_id_data['count']): location_ids[loc_id_data[i][0]] = loc_id_data[i][1] return location_ids def get_loc_id_from_weather_com(search_string): """Get location IDs for place names matching a specified string. Same as get_location_ids() but different return format. Parameters: search_string: Plaintext string to match to available place names. For example, a search for 'Los Angeles' will return matches for the city of that name in California, Chile, Cuba, Nicaragua, etc as well as 'East Los Angeles, CA', 'Lake Los Angeles, CA', etc. Returns: loc_id_data: A dictionary of tuples in the following format: {'count': 2, 0: (LOCID1, Placename1), 1: (LOCID2, Placename2)} """ # Weather.com stores place names as ascii-only, so convert if possible try: # search_string = unidecode(search_string.encode('utf-8')) search_string = unidecode(search_string) except NameError: pass url = LOCID_SEARCH_URL % quote(search_string) try: handler = urlopen(url) except URLError: return {'error': 'Could not connect to server'} if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': xml_response = handler.read().decode(charset).encode('utf-8') else: xml_response = handler.read() dom = minidom.parseString(xml_response) handler.close() loc_id_data = {} try: num_locs = 0 for loc in dom.getElementsByTagName('search')[0].getElementsByTagName('loc'): loc_id = loc.getAttribute('id') # loc id place_name = loc.firstChild.data # place name loc_id_data[num_locs] = (loc_id, place_name) num_locs += 1 loc_id_data['count'] = num_locs except IndexError: error_data = {'error': 'No matching Location IDs found'} return error_data finally: dom.unlink() return loc_id_data def get_where_on_earth_ids(search_string): """Get Yahoo 'Where On Earth' ID for the place names that best match the specified string. Same as get_woeid_from_yahoo() but different return format. Parameters: search_string: Plaintext string to match to available place names. Place can be a city, country, province, airport code, etc. Yahoo returns the 'Where On Earth' ID (WOEID) for the place name(s) that is the best match to the full string. For example, 'Paris' will match 'Paris, France', 'Deutschland' will match 'Germany', 'Ontario' will match 'Ontario, Canada', 'SFO' will match 'San Francisco International Airport', etc. Returns: where_on_earth_ids: A dictionary containing place names keyed to WOEID. """ woeid_data = get_woeid_from_yahoo(search_string) if 'error' in woeid_data: return woeid_data where_on_earth_ids = {} for i in xrange(woeid_data['count']): where_on_earth_ids[woeid_data[i][0]] = woeid_data[i][1] return where_on_earth_ids def get_woeid_from_yahoo(search_string): """Get Yahoo WOEID for the place names that best match the specified string. Same as get_where_on_earth_ids() but different return format. Parameters: search_string: Plaintext string to match to available place names. Place can be a city, country, province, airport code, etc. Yahoo returns the WOEID for the place name(s) that is the best match to the full string. For example, 'Paris' will match 'Paris, France', 'Deutschland' will match 'Germany', 'Ontario' will match 'Ontario, Canada', 'SFO' will match 'San Francisco International Airport', etc. Returns: woeid_data: A dictionary of tuples in the following format: {'count': 2, 0: (WOEID1, Placename1), 1: (WOEID2, Placename2)} """ ## This uses Yahoo's YQL tables to directly query Yahoo's database, e.g. ## http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20geo.placefinder%20where%20text%3D%22New%20York%22 if sys.version > '3': # Python 3 encoded_string = search_string else: # Python 2 encoded_string = search_string.encode('utf-8') params = {'q': WOEID_QUERY_STRING % encoded_string, 'format': 'json'} url = '?'.join((WOEID_SEARCH_URL, urlencode(params))) try: handler = urlopen(url) except URLError: return {'error': 'Could not connect to server'} if sys.version > '3': # Python 3 content_type = dict(handler.getheaders())['Content-Type'] else: # Python 2 content_type = handler.info().dict['content-type'] try: charset = re.search('charset\=(.*)', content_type).group(1) except AttributeError: charset = 'utf-8' if charset.lower() != 'utf-8': json_response = handler.read().decode(charset).encode('utf-8') else: json_response = handler.read() handler.close() yahoo_woeid_result = json.loads(json_response) try: result = yahoo_woeid_result['query']['results']['Result'] except KeyError: # On error, returned JSON evals to dictionary with one key, 'error' return yahoo_woeid_result except TypeError: return {'error': 'No matching place names found'} woeid_data = {} woeid_data['count'] = yahoo_woeid_result['query']['count'] for i in xrange(yahoo_woeid_result['query']['count']): try: place_data = result[i] except KeyError: place_data = result name_lines = [place_data[tag] for tag in ['line1','line2','line3','line4'] if place_data[tag] is not None] place_name = ', '.join(name_lines) woeid_data[i] = (place_data['woeid'], place_name) return woeid_data def heat_index(temperature, humidity, units = 'metric'): """Calculate Heat Index for the specified temperature and humidity The formula below approximates the heat index in degrees Fahrenheit, to within ±1.3 °F. It is the result of a multivariate fit (temperature equal to or greater than 80°F and relative humidity equal to or greater than 40%) to a model of the human body. Heat Index = c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * T^2) + (c_6 * R^2) + (c_7 * T^2 * R) + (c_8 * T * R^2) + (c_9 * T^2 * R^2) where: T = ambient dry-bulb temperature (in degrees Fahrenheit) R = relative humidity (percentage value between 0 and 100) Parameters: temperature: air temperature in specified units humidity: relative humidity (a percentage) at specified air temperature units: type of units. 'metric' for metric and 'imperial' for non-metric. Returns: heat_index: a numerical value representing the heat index in the temperature scale of the specified unit system. Returns None if the specified temperature is less than 80°F or the specified relative humidity is less than 40%. """ # fallback to metric if units != 'imperial' and units != '' and units != 'metric': units = 'metric' R = float(humidity) if units == 'imperial' or units == '': # for backwards compatibility T = float(temperature) elif units == 'metric': # Heat Index is calculated in F T = (float(temperature) * 9.0/5.0) + 32.0 # Heat Index is only valid for temp >= 80°F and humidity >= 40%) if (R < 40.0 or T < 80.0): return None Rsquared = pow(R, 2.0) Tsquared = pow(T, 2.0) # coefficients for calculation c = [None, -42.379, 2.04901523, 10.14333127, -0.22475541, -6.83783 * pow(10.0,-3.0), -5.481717 * pow(10.0,-2.0), 1.22874 * pow(10.0,-3.0), 8.5282 * pow(10.0,-4.0), -1.99 * pow(10.0,-6.0)] heat_index = ( c[1] + (c[2]* T) + (c[3]* R) + (c[4]* T * R) + (c[5]* Tsquared) + (c[6]* Rsquared) + (c[7]* Tsquared * R) + (c[8]* T * Rsquared) + (c[9]* Tsquared * Rsquared) ) # round to one decimal place if units == 'metric': return round(((heat_index - 32.0) * 5.0/9.0), 1) else: return round(heat_index, 1)
nilq/baby-python
python
#! /usr/bin/python3 from __future__ import unicode_literals # from _typeshed import NoneType import math import os import sys import datetime from typing import TextIO import python_magnetrun import numpy as np import matplotlib # print("matplotlib=", matplotlib.rcParams.keys()) matplotlib.rcParams['text.usetex'] = True # matplotlib.rcParams['text.latex.unicode'] = True key not available import matplotlib.pyplot as plt import pandas as pd import water as w import ht import tabulate import datatools tables = [] def mixingTemp(Flow1, P1, T1, Flow2, P2, T2): """ computes the mixing temperature """ Flow = Flow1 + Flow2 Tmix = w.getRho(P1, T1) * w.getCp(P1, T1) * T1 * Flow1 Tmix += w.getRho(P2, T2) * w.getCp(P2, T2) * T2 * Flow2 Tmix /= w.getRho((P1+P2)/2., T2) * w.getCp((P1+P2)/2., T2) * Flow return Tmix def display_Q(inputfile, f_extension, mrun, debit_alim, ohtc, dT, show=False, extension="-Q.png"): """ plot Heat profiles """ df = mrun.getData() # print("type(mrun.getData()):", type(mrun.getData())) # print("type(df):", type(df), type(df['Tin'])) df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*debit_alim)/3600.), axis=1) df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1) if ohtc != "None": df['QNTU'] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[2]/1.e+6, axis=1) else: df['QNTU'] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[2]/1.e+6, axis=1) df["Qhot"] = df.apply(lambda row: ((row.Flow)*1.e-3+0/3600.)*(w.getRho(row.BP, row.Tout)*w.getCp(row.BP, row.Tout)*(row.Tout)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.Tin)/1.e+6, axis=1) df["Qhot1"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.Thi)*w.getCp(row.BP, row.Thi)*(row.Thi)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.Tin)/1.e+6, axis=1) df["Qcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.tsb)*w.getCp(10, row.tsb)*row.tsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1) # print("df.keys:", df.columns.values.tolist(), "mrun.keys=", mrun.getKeys()) # heat Balance on Magnet side ax = plt.gca() df.plot(x='t', y='Qhot', ax=ax, color='red') df.plot(x='t', y='Pt', ax=ax, color='yellow', marker='o', alpha = .5, markevery=args.markevery) df.plot(x='t', y='Pmagnet', ax=ax, color='yellow') plt.ylabel(r'Q[MW]') plt.xlabel(r't [s]') plt.grid(b=True) if ohtc != "None": if isinstance(ohtc, (float, int, str)): plt.title("HeatBalance Magnet side:" + mrun.getInsert().replace(r"_",r"\_") + ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT)) else: #if isinstance(ohtc, type(df['Tin'])): plt.title("HeatBalance Magnet side:" + mrun.getInsert().replace(r"_",r"\_") + ": h=%s $W/m^2/K$, dT=%g" % ("formula",dT)) if show: plt.show() else: extension="-Q_magnetside.png" imagefile = inputfile.replace(f_extension, extension) print("save to %s" % imagefile) plt.savefig(imagefile, dpi=300) plt.close() # heat Balance on HX side ax = plt.gca() df.plot(x='t', y='Qhot1', ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery) df.plot(x='t', y='Qcold', ax=ax, color='blue') plt.ylabel(r'Q[MW]') plt.xlabel(r't [s]') plt.grid(b=True) if ohtc != "None": if isinstance(ohtc, (float, int, str)): plt.title("HeatBalance HX side:" + mrun.getInsert().replace(r"_",r"\_") + ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT)) else: #if isinstance(ohtc, type(df['Tin'])): plt.title("HeatBalance HX side:" + mrun.getInsert().replace(r"_",r"\_") + ": h=%s $W/m^2/K$, dT=%g" % ("formula",dT)) if show: plt.show() else: extension="-Q_hxside.png" imagefile = inputfile.replace(f_extension, extension) print("save to %s" % imagefile) plt.savefig(imagefile, dpi=300) plt.close() def display_T(inputfile, f_extension, mrun, tsb_key, tin_key, debit_alim, ohtc, dT, show=False, extension="-coolingloop.png", debug=False): """ plot Temperature profiles """ print("othc=", ohtc) df = mrun.getData() df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*debit_alim)/3600.), axis=1) df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1) if ohtc != "None": df[tin_key] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1) df[tsb_key] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1) else: df[tin_key] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1) df[tsb_key] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1) ax = plt.gca() df.plot(x='t', y=tsb_key, ax=ax, color='blue', marker='o', alpha = .5, markevery=args.markevery) df.plot(x='t', y='tsb', ax=ax, color='blue') df.plot(x='t', y='teb', ax=ax, color='blue', linestyle='--') df.plot(x='t', y=tin_key, ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery) df.plot(x='t', y='Tin', ax=ax, color='red') df.plot(x='t', y='Tout', ax=ax, color='red', linestyle='--') df.plot(x='t', y='Thi', ax=ax, color='yellow', marker='o', alpha = .5, markevery=args.markevery) plt.xlabel(r't [s]') plt.grid(b=True) if ohtc != "None": if isinstance(ohtc, (float, int, str)): plt.title(mrun.getInsert().replace(r"_",r"\_") + ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT)) else: plt.title(mrun.getInsert().replace(r"_",r"\_") + ": h=%s $W/m^2/K$, dT=%g" % ("computed",dT)) if show: plt.show() else: imagefile = inputfile.replace(f_extension, extension) print("save to %s" % imagefile) plt.savefig(imagefile, dpi=300) plt.close() def heatBalance(Tin, Pin, Debit, Power, debug=False): """ Computes Tout from heatBalance inputs: Tin: input Temp in K Pin: input Pressure (Bar) Debit: Flow rate in kg/s """ dT = Power / ( w.getRho(Tin, Pin) * Debit * w.getCp(Tin, Pin) ) Tout = Tin + dT return Tout def heatexchange(h, Tci, Thi, Debitc, Debith, Pci, Phi, debug=False): """ NTU Model for heat Exchanger compute the output temperature for the heat exchanger as a function of input temperatures and flow rates Tci: input Temp on cold side Thi: input Temp on hot side TA: output from cooling alim (on hot side) Debitc: m^3/h Debith: l/s """ # if debug: # print("heatexchange:", # "h=", U, # "Tci=", Tci, "Thi=", Thi, # "Pci=", Pci, "Phi=", Phi, # "Debitc=", Debitc, "Debith=", Debith, "DebitA=", DebitA) A = 1063.4 # m^2 Cp_cold = w.getCp(Pci, Tci) # J/kg/K Cp_hot = w.getCp(Phi, Thi) # J/kg/K m_hot = w.getRho(Phi, Thi) * Debith # kg/s m_cold = w.getRho(Pci, Tci) * Debitc # kg/s # For plate exchanger result = ht.hx.P_NTU_method(m_hot, m_cold, Cp_hot, Cp_cold, UA=h*A, T1i=Thi, T2i=Tci, subtype='1/1') # returns a dictionnary: # Q : Heat exchanged in the heat exchanger, [W] # UA : Combined area-heat transfer coefficient term, [W/K] # T1i : Inlet temperature of stream 1, [K] # T1o : Outlet temperature of stream 1, [K] # T2i : Inlet temperature of stream 2, [K] # T2o : Outlet temperature of stream 2, [K] # P1 : Thermal effectiveness with respect to stream 1, [-] # P2 : Thermal effectiveness with respect to stream 2, [-] # R1 : Heat capacity ratio with respect to stream 1, [-] # R2 : Heat capacity ratio with respect to stream 2, [-] # C1 : The heat capacity rate of fluid 1, [W/K] # C2 : The heat capacity rate of fluid 2, [W/K] # NTU1 : Thermal Number of Transfer Units with respect to stream 1 [-] # NTU2 : Thermal Number of Transfer Units with respect to stream 2 [-] NTU = result["NTU1"] if NTU == float('inf') or math.isnan(NTU): print("Tci=", Tci, "Thi=", Thi) print("Pci=", Pci, "Phi=", Phi) print("Debitc=", Debitc, "Debith=", Debith) raise Exception("NTU not valid") Q = result["Q"] if Q == float('inf') or math.isnan(Q): print("Tci=", Tci, "Thi=", Thi) print("Pci=", Pci, "Phi=", Phi) print("Debitc=", Debitc, "Debith=", Debith) raise Exception("Q not valid") Tco = result["T2o"] if Tco == None: print("h=", h) print("Tci=", Tci, "Thi=", Thi) print("Pci=", Pci, "Phi=", Phi) print("Debitc=", Debitc, "Debith=", Debith) raise Exception("Tco not valid") Tho = result["T1o"] if Tho == None: print("h=", h) print("Tci=", Tci, "Thi=", Thi) print("Pci=", Pci, "Phi=", Phi) print("Debitc=", Debitc, "Debith=", Debith) raise Exception("Tho not valid") """ if dT != 0 and m_alim_A1A2*m_alim_A3A4 != 0: dT -= Thi * ( m_hot/(m_hot + m_alim_A1A2 + m_alim_A3A4) -1) dT_alim = ( dT/(m_alim_A1A2/(m_hot + m_alim_A1A2 + m_alim_A3A4)) ) / 2. - Tho P_A1A2 = dT_alim*m_alim_A1A2*Cp_hot P_A3A4 = dT_alim*m_alim_A3A4*Cp_hot if debug: print("heatexchange: ", NTU, Tco, Tho, Q) print("m_alim: ", m_alim_A1A2 + m_alim_A1A2, "m_hot:", m_hot, "%.2f" % ((m_alim_A1A2 + m_alim_A1A2)/m_hot*100), "%") # TODO check with site print("dT_alim:", dT_alim, "P_A1A2[MW]:", P_A1A2/1.e+6, "%.2f" % (P_A1A2/abs(PowerH)*100), "%", "P_A3A4[MW]:", P_A3A4/1.e+6, "%.2f" % (P_A3A4/abs(PowerB)*100), "%", "PH[MW]", abs(PowerH/1.e+6), "PB[MW]", abs(PowerB/1.e+6)) """ return (Tco, Tho, Q) def find(df, unknows: list, dTini: float, hini: float, hmin: float, hmax: float, algo: str, lalgo: str, maxeval: float, stopval: float, select=0, site="M9", debit_alim="30", debug=False): """ Use nlopt to find h, dT that give the best approximation for Hx output temperature unknows = list of optim var (eg ["dT"] or ["h", "dT"]) returns a dict """ tables = [] headers = ["dT[C]", "h[W/m\u00b2/K]", "e_Tin[]", "e_tsb[]", "e_T[]", "Heat Balance[MW]"] import nlopt print("find %d params:" % len(unknows), unknows) opt = None if algo == "Direct": opt = nlopt.opt(nlopt.GN_DIRECT, len(unknows)) elif algo == "Direct_L": opt = nlopt.opt(nlopt.GN_DIRECT_L, len(unknows)) elif algo == "CRS2": opt = nlopt.opt(nlopt.GN_CRS2_LM, len(unknows)) elif algo == "MLSL": opt = nlopt.opt(nlopt.G_MLSL, len(unknows)) # if lalgo == "Nelder-Mead": # local_opt = nlopt.opt(nlopt.LN_NELDER_MEAD, len(unknows)) # elif lalgo == "Cobyla": # local_opt = nlopt.opt(nlopt.LN_LN_COBYLA, len(unknows)) # local_opt.set_maxeval(maxeval) # local_opt.set_ftol_rel(stopval) # if lalgo != "None": # opt.set_local_optimizer(local_opt) opt.set_maxeval(maxeval) opt.set_ftol_rel(stopval) opt.set_ftol_abs(1.e-5) # opt.set_xtol_rel([tol, tol]) if 2 params? or float? # opt.set_xtol_abs([1.e-5, 1.e-5]) if 2 opt params if args.debug: print("nlopt [ftol fabs xtol xabs]: ", opt.get_ftol_rel(), opt.get_ftol_abs() , opt.get_xtol_rel(), opt.get_xtol_abs() ) print("nlopt [ftol fabs xtol xabs]: ", opt.get_ftol_rel(), opt.get_ftol_abs() , opt.get_xtol_rel(), opt.get_xtol_abs() ) # bounds lbounds = [] ubounds = [] for unknow in unknows: if unknow == "dT": lbounds.append(-10) ubounds.append(10) elif unknow == "h": lbounds.append(hmin) ubounds.append(hmax) opt.set_lower_bounds(lbounds) opt.set_upper_bounds(ubounds) print("bound:", lbounds, ubounds) # init_vals init_vals = [] for unknow in unknows: if unknow == "dT": init_vals.append(dTini) elif unknow == "h": init_vals.append(hini) print("init_vals:", init_vals) # use *f_data to pass extra args: df_, subtype # ex: # fdata = (df_, sbutype) # error_Tin(x, **fdata) # df_ = fdata[0], subtype = fdata[1], debug = fdata[2] # eventually check type with isinstanceof() / type() # question: how to take into account error_tsb also?? if len(unknows) == 2: select = 2 print("select: ", select) def error_Tin(x, df_=df, unknows: list=unknows, hini: float=hini, dTini: float=dTini, select: int=select, debug: bool=debug): """compute error between measures and computed data""" ohtc = hini dT = dTini if len(unknows) == 1: if unknows[0] == "dT": dT = x[0] elif unknows[0] == "h": ohtc = x[0] else: ohtc = x[1] dT = x[0] df['cThi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1) df_['cTin'] = df_.apply(lambda row: heatexchange(ohtc, row.teb, row.cThi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1) diff = np.abs(df_["Tin"] - df_['cTin']) L2_Tin = math.sqrt(np.dot( df_['Tin'], df_['Tin'] )) error_Tin = math.sqrt(np.dot( diff, diff )) /L2_Tin # diff.size df_['ctsb'] = df_.apply(lambda row: heatexchange(ohtc, row.teb, row.cThi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1) diff = np.abs(df_["tsb"] - df_['ctsb']) L2_tsb = math.sqrt(np.dot( df_['tsb'], df_['tsb'] )) error_tsb = math.sqrt(np.dot( diff, diff )) / L2_tsb #diff.size df["cQhot"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.cThi)*w.getCp(row.BP, row.cThi)*(row.cThi)-w.getRho(row.HP, row.cTin)*w.getCp(row.HP, row.cTin)*row.cTin)/1.e+6, axis=1) df["cQcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.ctsb)*w.getCp(10, row.ctsb)*row.ctsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1) df["cdQ"] = df.apply(lambda row: row.cQhot - row.cQcold, axis=1) df["Qhot"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.Thi)*w.getCp(row.BP, row.Thi)*(row.Thi)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.cTin)/1.e+6, axis=1) df["Qcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.tsb)*w.getCp(10, row.tsb)*row.tsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1) df["dQ"] = df.apply(lambda row: row.Qhot - row.Qcold, axis=1) diff = np.abs(df_["Qhot"] - df_['cQhot']) L2_Qhot = math.sqrt(np.dot( df_['Qhot'], df_['Qhot'] )) error_qhot = math.sqrt(np.dot( diff, diff )) / L2_Qhot diff = np.abs(df_["Qcold"] - df_['cQcold']) L2_Qcold = math.sqrt(np.dot( df_['Qcold'], df_['Qcold'] )) error_qcold = math.sqrt(np.dot( diff, diff )) / L2_Qcold error_T = 0 if select == 0: error_T = math.sqrt(error_Tin*error_Tin) if select == 1: error_T = math.sqrt(error_tsb*error_tsb) if select == 2: error_T = math.sqrt(error_Tin*error_Tin + error_tsb*error_tsb) if select == 3: error_T = df["cdQ"].mean() if debug: print("error_Tin(%s)" % x, error_Tin, error_tsb, error_T, df["cdQ"].mean(), select, ohtc, dT) tables.append([dT, ohtc, error_Tin, error_tsb, error_T, df["cdQ"].mean()]) del df_['ctsb'] del df_['cTin'] return error_T def myfunc(x, grad): if grad.size > 0: grad[0] = 0.0 grad[1] = 0.0 return error_Tin(x) opt.set_min_objective(myfunc) x = opt.optimize(init_vals) minf = opt.last_optimum_value() status = opt.last_optimize_result() print("optimum: x=", x, "obj=", minf, "(code = ", status, ")") # how to mark line with optimum value in red?? # loop over tables, if line correspond to x[0] then change line to red: a = "\033[1;32m%s\033[0m" %a # #Color # R = "\033[0;31;40m" #RED # G = "\033[0;32;40m" # GREEN # Y = "\033[0;33;40m" # Yellow # B = "\033[0;34;40m" # Blue # N = "\033[0m" # Reset if status >= 0: for line in tables: tmp = 0 for i, unknow in enumerate(unknows): tmp += int(line[i] == x[i]) if tmp == len(unknows): for i,item in enumerate(line): line[i] = "\033[1;32m%s\033[0m" % item print( "\n", tabulate.tabulate(tables, headers, tablefmt="simple"), "\n") optval = {} for i,unknow in enumerate(unknows): optval[unknow] = x[i] return (optval, status) if __name__ == "__main__": command_line = None import argparse parser = argparse.ArgumentParser("Cooling loop Heat Exchanger") parser.add_argument("input_file", help="input txt file (ex. M10_2020.10.04_20-2009_43_31.txt)") parser.add_argument("--nhelices", help="specify number of helices", type=int, default=14) parser.add_argument("--ohtc", help="specify heat exchange coefficient (ex. 4000 W/K/m^2 or None)", type=str, default="None") parser.add_argument("--dT", help="specify dT for Tout (aka accounting for alim cooling, ex. 0)", type=float, default=0) parser.add_argument("--site", help="specify a site (ex. M8, M9,...)", type=str) parser.add_argument("--debit_alim", help="specify flowrate for power cooling - one half only (default: 30 m3/h)", type=float, default=30) parser.add_argument("--show", help="display graphs (requires X11 server active)", action='store_true') parser.add_argument("--debug", help="activate debug mode", action='store_true') # parser.add_argument("--save", help="save graphs to png", action='store_true') # raw|filter|smooth post-traitement of data parser.add_argument("--pre", help="select a pre-traitment for data", type=str, choices=['raw','filtered','smoothed'], default='smoothed') # define params for post traitment of data parser.add_argument("--pre_params", help="pass param for pre-traitment method", type=str, default='400') parser.add_argument("--markevery", help="set marker every ... display method", type=int, default='800') # define subparser: find subparsers = parser.add_subparsers(title="commands", dest="command", help='sub-command help') # make the following options dependent to find + nlopt parser_nlopt = subparsers.add_parser('find', help='findh help') #, parents=[parser]) parser_nlopt.add_argument("--error", help="specify error (0 for hot, 1 for cold, 2 for a mix)", type=int, choices=range(0, 2), default=0) parser_nlopt.add_argument("--unknows", help="specifiy optim keys (eg h or dTh or dT;h", type=str, default="dT;h") parser_nlopt.add_argument("--tol", help="specifiy relative tolerances (eg h or dTh or dT;h", type=str, default="1.e-5;1.e-5") parser_nlopt.add_argument("--abstol", help="specifiy absolute tolerances (eg h or dTh or dT;h", type=str, default="1.e-5;1.e-5") parser_nlopt.add_argument("--algo", help="specifiy optim algo", type=str, choices=["Direct_L", "Direct", "CRS2", "MLSL"], default="Direct_L") parser_nlopt.add_argument("--local", help="specifiy optim algo", type=str, choices=["None", "Nelder-Mead", "Cobyla"], default="None") parser_nlopt.add_argument("--stopval", help="stopping criteria for nlopt", type=float, default=1.e-2) parser_nlopt.add_argument("--maxeval", help="stopping max eval for nlopt", type=int, default=1000) #parser_nlopt.set_defaults(func=optim) args = parser.parse_args(command_line) tau = 400 if args.pre == 'smoothed': print("smoothed options") tau = float(args.pre_params) threshold = 0.5 twindows = 10 if args.pre == 'filtered': print("filtered options") params = args.pre_params.split(';') threshold = float(params[0]) twindows = int(params[1]) optkeys = [] if args.command == 'find': print("find options") optkeys = args.unknows.split(";") # returns a list # check valid keys # nlopt_args = parser_nlopt.parse_args() # smoothed_args = parser_smoothed.parse_args() print("args: ", args) # check extension f_extension = os.path.splitext(args.input_file)[-1] if f_extension != ".txt": print("so far only txt file support is implemented") sys.exit(0) filename = os.path.basename(args.input_file) result = filename.startswith("M") if result: try: index = filename.index("_") args.site = filename[0:index] print("site detected: %s" % args.site) except: print("no site detected - use args.site argument instead") pass mrun = python_magnetrun.MagnetRun.fromtxt(args.site, args.input_file) if not args.site: args.site = mrun.getSite() # Adapt filtering and smoothing params to run duration duration = mrun.getDuration() if duration <= 10*tau: tau = min(duration // 10, 10) print("Modified smoothing param: %g over %g s run" % (tau, duration) ) args.markevery = 8 * tau # print("type(mrun):", type(mrun)) mrun.getMData().addTime() start_timestamp = mrun.getMData().getStartDate() if not "Flow" in mrun.getKeys(): mrun.getMData().addData("Flow", "Flow = Flow1 + Flow2") if not "Tin" in mrun.getKeys(): mrun.getMData().addData("Tin", "Tin = (Tin1 + Tin2)/2.") if not "HP" in mrun.getKeys(): mrun.getMData().addData("HP", "HP = (HP1 + HP2)/2.") if not "Talim" in mrun.getKeys(): # Talim not defined, try to estimate it print("Talim key not present - set Talim=0") mrun.getMData().addData("Talim", "Talim = 0") # extract data keys = ["t", "teb", "tsb", "debitbrut", "Tout", "Tin", "Flow", "BP", "HP", "Pmagnet"] units = ["s","C","C","m\u00B3/h","C","C","l/s","bar", "MW"] # df = mrun.getMData().extractData(keys) if args.debug: pd.set_option("display.max_rows", None, "display.max_columns", None) # TODO: move to magnetdata max_tap = 0 for i in range(1,args.nhelices+1): ukey = "Ucoil%d" % i # print ("Ukey=%s" % ukey, (ukey in keys) ) if ukey in mrun.getKeys(): max_tap=i if max_tap != args.nhelices and max_tap != args.nhelices//2: print("Check data: inconsistant U probes and helices") sys.exit(1) missing_probes=[] for i in range(1,max_tap+1): ukey = "Ucoil%d" % i if not ukey in mrun.getKeys(): # Add an empty column # print ("Ukey=%s" % ukey, (ukey in keys) ) mrun.getMData().addData(ukey, "%s = 0" % ukey) missing_probes.append(i) if missing_probes: print("Missing U probes:", missing_probes) # TODO verify if Ucoil starts at 1 if nhelices < 14 formula = "UH = " for i in range(args.nhelices+1): ukey = "Ucoil%d" % i if ukey in mrun.getKeys(): if i != 1: formula += " + " formula += ukey # print("UH", formula) if not "UH" in mrun.getKeys(): mrun.getMData().addData("UH", formula) formula = "UB = Ucoil15 + Ucoil16" # print("UB", formula) if not "UB" in mrun.getKeys(): mrun.getMData().addData("UB", formula) if not "PH" in mrun.getKeys(): mrun.getMData().addData("PH", "PH = UH * IH") if not "PB" in mrun.getKeys(): mrun.getMData().addData("PB", "PB = UB * IB") if not "Pt" in mrun.getKeys(): mrun.getMData().addData("Pt", "Pt = (PH + PB)/1.e+6") # estimate dTH: PH / (rho * Cp * Flow1) mrun.getMData().addData("dTh", "dTh = PH / (1000 * 4180 * Flow1*1.e-3)") # estimate dTB: PB / (rho * Cp * Flow2) mrun.getMData().addData("dTb", "dTb = PB / (1000 * 4180 * Flow2*1.e-3)") # estimate Tout: ( (Tin1+dTh)*Flow1 + (Tin2+dTb)*Flow2 ) / (Flow1+Flow2) mrun.getMData().addData( "cTout", "( (Tin1+dTh)*Flow1 + (Tin2+dTb)*Flow2 ) / (Flow1+Flow2)") # Geom specs from HX Datasheet Nc = int((553 - 1)/2.) # (Number of plates -1)/2 Ac = 3.e-3 * 1.174 # Plate spacing * Plate width [m^2] de = 2 * 3.e-3 # 2*Plate spacing [m] # coolingparams = [0.207979, 0.640259, 0.397994] coolingparams = [0.07, 0.8, 0.4] # Compute OHTC df = mrun.getData() df['MeanU_h'] = df.apply(lambda row: ((row.Flow)*1.e-3+args.debit_alim/3600.) / (Ac * Nc), axis=1) df['MeanU_c'] = df.apply(lambda row: (row.debitbrut/3600.) / ( Ac * Nc), axis=1) df['Ohtc'] = df.apply(lambda row: w.getOHTC(row.MeanU_h, row.MeanU_c, de, row.BP, row.Tout, row.BP, row.teb, coolingparams), axis=1) ax = plt.gca() df.plot(x='t', y='Ohtc', ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery) plt.xlabel(r't [s]') plt.ylabel(r'$W/m^2/K$') plt.grid(b=True) plt.title(mrun.getInsert().replace(r"_",r"\_") + ": Heat Exchange Coefficient") if args.show: plt.show() else: imagefile = args.input_file.replace(".txt", "-ohtc.png") plt.savefig(imagefile, dpi=300 ) print("save to %s" % imagefile) plt.close() pretreatment_keys = ["debitbrut", "Flow", "teb", "Tout", "PH", "PB", "Pt"] if "TAlimout" in mrun.getKeys(): pretreatment_keys.append("TAlimout") else: mrun.getMData().addData("TAlimout", "TAlimout = 0") # filter spikes # see: https://ocefpaf.github.io/python4oceanographers/blog/2015/03/16/outlier_detection/ if args.pre == 'filtered': for key in pretreatment_keys: mrun = datatools.filterpikes(mrun, key, inplace=True, threshold=threshold, twindows=twindows, debug=args.debug, show=args.show, input_file=args.input_file) print("Filtered pikes done") # smooth data Locally Weighted Linear Regression (Loess) # see: https://xavierbourretsicotte.github.io/loess.html( if args.pre == 'smoothed': for key in pretreatment_keys: mrun = datatools.smooth(mrun, key, inplace=True, tau=tau, debug=args.debug, show=args.show, input_file=args.input_file) print("smooth data done") display_T(args.input_file, f_extension, mrun, 'itsb', 'iTin', args.debit_alim, args.ohtc, args.dT, args.show, "-coolingloop.png", args.debug) display_Q(args.input_file, f_extension, mrun, args.debit_alim, args.ohtc, args.dT, args.show, "-Q.png") if args.command == 'find': # Compute Tin, Tsb df = mrun.getData() if not "FlowH" in df: df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*args.debit_alim)/3600.), axis=1) if not "Thi" in df: df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout, 2*args.debit_alim/3600., row.BP, row.TAlimout), axis=1) (opt, status) = find(df, optkeys, args.dT, args.ohtc, 100, 6000, args.algo, args.local, args.maxeval, args.stopval, select=args.error, site=args.site, debit_alim=args.debit_alim, debug=args.debug) if status < 0: print("Optimization %s failed with %d error: ", (args.algo, status) ) sys.exit(1) dT = args.dT h = args.ohtc for key in optkeys: if key == "dT": dT = opt["dT"] elif key == "h": h = opt["h"] # Get solution for optimum display_T(args.input_file, f_extension, mrun, 'ctsb', 'cTin', args.debit_alim, h, dT, args.show, "-T-find.png") display_Q(args.input_file, f_extension, mrun, args.debit_alim, h, dT, args.show, "-Q-find.png")
nilq/baby-python
python
import itertools import numpy as np import pytest from pyquil import Program from pyquil.gate_matrices import QUANTUM_GATES as GATES from pyquil.gates import * from pyquil.numpy_simulator import targeted_einsum, NumpyWavefunctionSimulator, \ all_bitstrings, targeted_tensordot, _term_expectation from pyquil.paulis import sZ, sX from pyquil.pyqvm import PyQVM from pyquil.reference_simulator import ReferenceWavefunctionSimulator from pyquil.tests.test_reference_wavefunction_simulator import _generate_random_program, \ _generate_random_pauli def test_H_einsum(): h_mat = GATES['H'] one_q_wfn = np.zeros((2,), dtype=np.complex128) one_q_wfn[0] = 1 + 0.j one_q_wfn = targeted_einsum(gate=h_mat, wf=one_q_wfn, wf_target_inds=[0]) np.testing.assert_allclose(one_q_wfn, 1 / np.sqrt(2) * np.ones(2)) def test_H_tensordot(): h_mat = GATES['H'] one_q_wfn = np.zeros((2,), dtype=np.complex128) one_q_wfn[0] = 1 + 0.j one_q_wfn = targeted_tensordot(gate=h_mat, wf=one_q_wfn, wf_target_inds=[0]) np.testing.assert_allclose(one_q_wfn, 1 / np.sqrt(2) * np.ones(2)) def test_wfn_ordering_einsum(): h_mat = GATES['H'] two_q_wfn = np.zeros((2, 2), dtype=np.complex128) two_q_wfn[0, 0] = 1 + 0.j two_q_wfn = targeted_einsum(gate=h_mat, wf=two_q_wfn, wf_target_inds=[0]) np.testing.assert_allclose(two_q_wfn[:, 0], 1 / np.sqrt(2) * np.ones(2)) def test_wfn_ordering_tensordot(): h_mat = GATES['H'] two_q_wfn = np.zeros((2, 2), dtype=np.complex128) two_q_wfn[0, 0] = 1 + 0.j two_q_wfn = targeted_tensordot(gate=h_mat, wf=two_q_wfn, wf_target_inds=[0]) np.testing.assert_allclose(two_q_wfn[:, 0], 1 / np.sqrt(2) * np.ones(2)) def test_einsum_simulator_H(): prog = Program(H(0)) qam = PyQVM(n_qubits=1, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(prog) wf = qam.wf_simulator.wf np.testing.assert_allclose(wf, 1 / np.sqrt(2) * np.ones(2)) def test_einsum_simulator_1(): prog = Program(H(0), CNOT(0, 1)) qam = PyQVM(n_qubits=2, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(prog) wf = qam.wf_simulator.wf np.testing.assert_allclose(wf, 1 / np.sqrt(2) * np.reshape([1, 0, 0, 1], (2, 2))) def test_einsum_simulator_CNOT(): prog = Program(X(0), CNOT(0, 1)) qam = PyQVM(n_qubits=2, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(prog) wf = qam.wf_simulator.wf np.testing.assert_allclose(wf, np.reshape([0, 0, 0, 1], (2, 2))) def test_einsum_simulator_CCNOT(): prog = Program(X(2), X(0), CCNOT(2, 1, 0)) qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(prog) wf = qam.wf_simulator.wf should_be = np.zeros((2, 2, 2)) should_be[1, 0, 1] = 1 np.testing.assert_allclose(wf, should_be) def test_einsum_simulator_10q(): prog = Program(H(0)) for i in range(10 - 1): prog += CNOT(i, i + 1) qam = PyQVM(n_qubits=10, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(prog) wf = qam.wf_simulator.wf should_be = np.zeros((2,) * 10) should_be[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] = 1 / np.sqrt(2) should_be[1, 1, 1, 1, 1, 1, 1, 1, 1, 1] = 1 / np.sqrt(2) np.testing.assert_allclose(wf, should_be) def test_measure(): qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator) qam.execute(Program( H(0), CNOT(0, 1), MEASURE(0, 63) )) measured_bit = qam.ram['ro'][-1] should_be = np.zeros((2, 2, 2)) if measured_bit == 1: should_be[1, 1, 0] = 1 else: should_be[0, 0, 0] = 1 np.testing.assert_allclose(qam.wf_simulator.wf, should_be) @pytest.fixture(params=list(range(3, 5))) def n_qubits(request): return request.param @pytest.fixture(params=[2, 50, 100]) def prog_length(request): return request.param @pytest.fixture(params=[True, False]) def include_measures(request): return request.param def test_vs_ref_simulator(n_qubits, prog_length, include_measures): if include_measures: seed = 52 else: seed = None for _ in range(10): prog = _generate_random_program(n_qubits=n_qubits, length=prog_length, include_measures=include_measures) ref_qam = PyQVM(n_qubits=n_qubits, seed=seed, quantum_simulator_type=ReferenceWavefunctionSimulator) ref_qam.execute(prog) ref_wf = ref_qam.wf_simulator.wf es_qam = PyQVM(n_qubits=n_qubits, seed=seed, quantum_simulator_type=NumpyWavefunctionSimulator) es_qam.execute(prog) es_wf = es_qam.wf_simulator.wf # einsum has its wavefunction as a vector of shape (2, 2, 2, ...) where qubits are indexed # from left to right. We transpose then flatten. es_wf = es_wf.transpose().reshape(-1) np.testing.assert_allclose(ref_wf, es_wf, atol=1e-15) def test_all_bitstrings(): for n_bits in range(2, 10): bitstrings_ref = np.array(list(itertools.product((0, 1), repeat=n_bits))) bitstrings_new = all_bitstrings(n_bits) np.testing.assert_array_equal(bitstrings_ref, bitstrings_new) def test_sample_bitstrings(): prog = Program(H(0), H(1)) qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator, seed=52) qam.execute(prog) bitstrings = qam.wf_simulator.sample_bitstrings(10000) assert bitstrings.shape == (10000, 3) np.testing.assert_allclose([0.5, 0.5, 0], np.mean(bitstrings, axis=0), rtol=1e-2) def test_expectation_helper(): n_qubits = 3 wf = np.zeros(shape=((2,) * n_qubits), dtype=np.complex) wf[0, 0, 0] = 1 z0 = _term_expectation(wf, 0.4 * sZ(0)) assert z0 == 0.4 x0 = _term_expectation(wf, sX(2)) assert x0 == 0 def test_expectation(): wfn = NumpyWavefunctionSimulator(n_qubits=3) val = wfn.expectation(0.4 * sZ(0) + sX(2)) assert val == 0.4 def test_expectation_vs_ref_qvm(qvm, n_qubits): for repeat_i in range(20): prog = _generate_random_program(n_qubits=n_qubits, length=10) operator = _generate_random_pauli(n_qubits=n_qubits, n_terms=5) print(prog) print(operator) ref_wf = ReferenceWavefunctionSimulator(n_qubits=n_qubits).do_program(prog) ref_exp = ref_wf.expectation(operator=operator) np_wf = NumpyWavefunctionSimulator(n_qubits=n_qubits).do_program(prog) np_exp = np_wf.expectation(operator=operator) np.testing.assert_allclose(ref_exp, np_exp, atol=1e-15) # The following tests are lovingly copied with light modification from the Cirq project # https://github.com/quantumlib/Cirq # # With the original copyright disclaimer: # Copyright 2018 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def kron(*matrices: np.ndarray) -> np.ndarray: """Computes the kronecker product of a sequence of matrices. A *args version of lambda args: functools.reduce(np.kron, args). Args: *matrices: The matrices and controls to combine with the kronecker product. Returns: The resulting matrix. """ product = np.eye(1) for m in matrices: product = np.kron(product, m) return np.array(product) def test_einsum_matches_kron_then_dot(): t = np.array([1, 2, 3, 4, 5, 6, 7, 8]) m = np.array([[2, 3], [5, 7]]) i = np.eye(2) np.testing.assert_allclose( targeted_einsum(gate=m, wf=t.reshape((2, 2, 2)), wf_target_inds=[0]), np.dot(kron(m, i, i), t).reshape((2, 2, 2)), atol=1e-8) np.testing.assert_allclose( targeted_einsum(gate=m, wf=t.reshape((2, 2, 2)), wf_target_inds=[1]), np.dot(kron(i, m, i), t).reshape((2, 2, 2)), atol=1e-8) np.testing.assert_allclose( targeted_einsum(gate=m, wf=t.reshape((2, 2, 2)), wf_target_inds=[2]), np.dot(kron(i, i, m), t).reshape((2, 2, 2)), atol=1e-8) def test_tensordot_matches_kron_then_dot(): t = np.array([1, 2, 3, 4, 5, 6, 7, 8]) m = np.array([[2, 3], [5, 7]]) i = np.eye(2) np.testing.assert_allclose( targeted_tensordot(m, t.reshape((2, 2, 2)), [0]), np.dot(kron(m, i, i), t).reshape((2, 2, 2)), atol=1e-8) np.testing.assert_allclose( targeted_tensordot(m, t.reshape((2, 2, 2)), [1]), np.dot(kron(i, m, i), t).reshape((2, 2, 2)), atol=1e-8) np.testing.assert_allclose( targeted_tensordot(m, t.reshape((2, 2, 2)), [2]), np.dot(kron(i, i, m), t).reshape((2, 2, 2)), atol=1e-8) def test_einsum_reorders_matrices(): t = np.eye(4).reshape((2, 2, 2, 2)) m = np.array([ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, ]).reshape((2, 2, 2, 2)) np.testing.assert_allclose( targeted_einsum(gate=m, wf=t, wf_target_inds=[0, 1]), m, atol=1e-8) np.testing.assert_allclose( targeted_einsum(gate=m, wf=t, wf_target_inds=[1, 0]), np.array([ 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, ]).reshape((2, 2, 2, 2)), atol=1e-8) def test_tensordot_reorders_matrices(): t = np.eye(4).reshape((2, 2, 2, 2)) m = np.array([ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, ]).reshape((2, 2, 2, 2)) np.testing.assert_allclose( targeted_tensordot(gate=m, wf=t, wf_target_inds=[0, 1]), m, atol=1e-8) np.testing.assert_allclose( targeted_tensordot(gate=m, wf=t, wf_target_inds=[1, 0]), np.array([ 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, ]).reshape((2, 2, 2, 2)), atol=1e-8)
nilq/baby-python
python
from future.utils import iteritems from pandaharvester.harvestercore.plugin_base import PluginBase from pandaharvester.harvestermover import mover_utils # preparator plugin with RSE + no data motion class RseDirectPreparator(PluginBase): """The workflow for RseDirectPreparator is as follows. First panda makes a rule to transfer files to an RSE which is associated to the resource. Once files are transferred to the RSE, job status is changed to activated from assigned. Then Harvester fetches the job and constructs input file paths that point to pfns in the storage. This means that the job directly read input files from the storage. """ # constructor def __init__(self, **kwarg): PluginBase.__init__(self, **kwarg) # check status def check_stage_in_status(self, jobspec): return True, '' # trigger preparation def trigger_preparation(self, jobspec): return True, '' # resolve input file paths def resolve_input_paths(self, jobspec): # get input files inFiles = jobspec.get_input_file_attributes() # set path to each file for inLFN, inFile in iteritems(inFiles): inFile['path'] = mover_utils.construct_file_path(self.basePath, inFile['scope'], inLFN) # set jobspec.set_input_file_paths(inFiles) return True, ''
nilq/baby-python
python
''' Created on Jun 6, 2012 @author: kristof ''' import time import datetime import general_settings from twython import Twython from klout import KloutInfluence import tweeql.extras.sentiment import tweeql.extras.sentiment.analysis from pkg_resources import resource_filename from dateutil import parser import itertools from pygeocoder import Geocoder import language import urllib from collections import defaultdict import ordereddict import gzip import math import re import os import pickle import sys import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.mlab as mlab import matplotlib.cbook as cbook import matplotlib.ticker as ticker from mpl_toolkits.axes_grid.anchored_artists import AnchoredText from nltk import word_tokenize, sent_tokenize, corpus DEBUG = True MAIN_KEYWORD = 'koffie' COMPETITOR1_KEYWORD = 'koffieapparaat' COMPETITOR2_KEYWORD = '' MAIN_ENTERPRISE = 'PhilipsNL' MAIN_LOCATION = 'Amsterdam' MAIN_LANGUAGE = 'nl' MAIN_COUNTRY = 'The Netherlands' MAIN_SCREEN_NAME_LIST = ['PhilipsNL', 'PhilipsCare_NL'] MAIL_TO_LIST = ['kristof.leroux@gmail.com'] SEARCH_PAGES = 10 SEARCH_RPP = 5000 #REPORT1 import report report = report.Report() main_data = [] competitor1_data = [] competitor2_data = [] tweet_list = [] tweet_list2 = [] tweet_list3 = [] #init sentiment analysis classifier = None classinfo = None analysis = tweeql.extras.sentiment.analysis fname = resource_filename(tweeql.extras.sentiment.__name__, 'sentiment.pkl.gz') fp = gzip.open(fname) classifier_dict = pickle.load(fp) fp.close() classifier = classifier_dict['classifier'] classinfo = { classifier_dict['pos_label'] : { 'cutoff': classifier_dict['pos_cutoff'], 'value' : 1.0/classifier_dict['pos_recall'] }, classifier_dict['neg_label'] : { 'cutoff': classifier_dict['neg_cutoff'], 'value': -1.0/classifier_dict['neg_recall'] } } def sentiment(text): global classinfo words = analysis.words_in_tweet(text) features = analysis.word_feats(words) dist = classifier.prob_classify(features) retval = 0 maxlabel = dist.max() classinf = classinfo[maxlabel] if dist.prob(maxlabel) > classinf['cutoff']: retval = classinf['value'] return retval # search keywords twitter = Twython(app_key=general_settings.CONSUMER_KEY, app_secret=general_settings.CONSUMER_SECRET, oauth_token=general_settings.ACCESS_TOKEN, oauth_token_secret=general_settings.ACCESS_SECRET) for i in (map(lambda x : x+1, range(SEARCH_PAGES))): try: print "Searching tweets page %i" % i # TODO: country language search_results = twitter.search(q=MAIN_KEYWORD, page=i, rpp=SEARCH_RPP) except: pass print "Indexing tweets page %i" % i for tweet in search_results["results"]: print tweet tweet_data = {} print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at']) #print tweet['text'].encode('utf-8'),"\n" tweet_data['text'] = tweet['text']#.encode('utf-8') tweet_data['username'] = tweet['from_user'] tweet_data['created_at'] = tweet['created_at'] #=================================================================== # klout = KloutInfluence(tweet['from_user'].encode('utf-8')) # try: # tweet_data['influence'] = klout.score() # tweet_data['influences'] = klout.influences() # tweet_data['influence_topics'] = klout.topics() # except: # tweet_data['influence'] = 0 # tweet_data['influence_topics'] = {} #=================================================================== tweet_data['influence'] = 0 tweet_data['sentiment'] = sentiment(tweet['text']) tweet_data['ws'] = 0 tweet_data['hour_string'] = "00:00" #geo if tweet['geo']: print tweet['geo'] tweet_data['geo'] = tweet['geo'] results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1]) tweet_data['country'] = results[0].country tweet_data['city'] = results[0].locality tweet_data['postalcode'] = results[0].postal_code print results[0] else: tweet_data['geo'] = None tweet_data['country'] = None #gender #avatar tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https']) #number, save and use #language #ld = language.LangDetect() #tweet_data['lang'] = ld.detect(tweet_data['text']) tweet_data['lang'] = tweet['iso_language_code'] print tweet_data['lang'] #filter out retweets if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list): main_data.append(tweet_data) if tweet_data['text'] not in tweet_list: tweet_list.append(tweet_data['text']) main_data = sorted(main_data, key=lambda k: k['created_at']) report.spike_keyword = MAIN_KEYWORD report.spike_location = MAIN_LOCATION for i in (map(lambda x : x+1, range(SEARCH_PAGES))): try: print "Searching tweets page %i" % i # TODO: country language search_results = twitter.search(q=COMPETITOR1_KEYWORD, page=i, rpp=SEARCH_RPP) except: pass print "Indexing tweets page %i" % i for tweet in search_results["results"]: print tweet tweet_data = {} print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at']) #print tweet['text'].encode('utf-8'),"\n" tweet_data['text'] = tweet['text'].encode('utf-8') tweet_data['username'] = tweet['from_user'] tweet_data['created_at'] = tweet['created_at'] #=================================================================== # klout = KloutInfluence(tweet['from_user'].encode('utf-8')) # try: # tweet_data['influence'] = klout.score() # tweet_data['influences'] = klout.influences() # tweet_data['influence_topics'] = klout.topics() # except: # tweet_data['influence'] = 0 # tweet_data['influence_topics'] = {} #=================================================================== tweet_data['influence'] = 0 tweet_data['sentiment'] = sentiment(tweet['text']) tweet_data['ws'] = 0 tweet_data['hour_string'] = "00:00" #geo if tweet['geo']: print tweet['geo'] tweet_data['geo'] = tweet['geo'] results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1]) tweet_data['country'] = results[0].country tweet_data['city'] = results[0].locality tweet_data['postalcode'] = results[0].postal_code print results[0] else: tweet_data['geo'] = None tweet_data['country'] = None #gender #avatar tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https']) #language #ld = language.LangDetect() #tweet_data['lang'] = ld.detect(tweet_data['text']) tweet_data['lang'] = tweet['iso_language_code'] print tweet_data['lang'] #filter out retweets if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list2): competitor1_data.append(tweet_data) if tweet_data['text'] not in tweet_list2: tweet_list2.append(tweet_data['text']) competitor1_data = sorted(competitor1_data, key=lambda k: k['created_at']) for i in (map(lambda x : x+1, range(SEARCH_PAGES))): try: print "Searching tweets page %i" % i # TODO: country language search_results = twitter.search(q=COMPETITOR2_KEYWORD, page=i, rpp=SEARCH_RPP) except: pass print "Indexing tweets page %i" % i for tweet in search_results["results"]: print tweet tweet_data = {} print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at']) #print tweet['text'].encode('utf-8'),"\n" tweet_data['text'] = tweet['text'].encode('utf-8') tweet_data['username'] = tweet['from_user'] tweet_data['created_at'] = tweet['created_at'] #=================================================================== # klout = KloutInfluence(tweet['from_user'].encode('utf-8')) # try: # tweet_data['influence'] = klout.score() # tweet_data['influences'] = klout.influences() # tweet_data['influence_topics'] = klout.topics() # except: # tweet_data['influence'] = 0 # tweet_data['influence_topics'] = {} #=================================================================== tweet_data['influence'] = 0 tweet_data['sentiment'] = sentiment(tweet['text']) tweet_data['ws'] = 0 tweet_data['hour_string'] = "00:00" #geo if tweet['geo']: print tweet['geo'] tweet_data['geo'] = tweet['geo'] results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1]) tweet_data['country'] = results[0].country tweet_data['city'] = results[0].locality tweet_data['postalcode'] = results[0].postal_code #print results[0] else: tweet_data['geo'] = None tweet_data['country'] = None #gender #avatar tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https']) #language #ld = language.LangDetect() #tweet_data['lang'] = ld.detect(tweet_data['text']) tweet_data['lang'] = tweet['iso_language_code'] print tweet_data['lang'] #filter out retweets if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list3): competitor1_data.append(tweet_data) if tweet_data['text'] not in tweet_list3: tweet_list3.append(tweet_data['text']) competitor2_data = sorted(competitor2_data, key=lambda k: k['created_at']) print "Calculating cumulative volumes... comp2" x= [] y = [] volume = -1 for tweet_data in competitor2_data: d = parser.parse(tweet_data['created_at']).hour #daily or hourly tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute) if not d in x: if volume != -1: y.append(volume) volume = 0 x.append(d) volume += 1 y.append(volume) print x print y volumegraph3 = tuple(y) print "Calculating cumulative volumes... comp1" x= [] y = [] volume = -1 for tweet_data in competitor1_data: d = parser.parse(tweet_data['created_at']).hour #daily or hourly tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute) if not d in x: if volume != -1: y.append(volume) volume = 0 x.append(d) volume += 1 y.append(volume) print x print y volumegraph2 = tuple(y) print "Calculating cumulative volumes..." x= [] y = [] volume = -1 for tweet_data in main_data: d = parser.parse(tweet_data['created_at']).hour #daily or hourly tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute) if not d in x: if volume != -1: y.append(volume) volume = 0 x.append(d) volume += 1 y.append(volume) print x print y volumegraph1 = tuple(y) report.volumekeywords = [MAIN_KEYWORD, COMPETITOR1_KEYWORD, COMPETITOR2_KEYWORD] report.volumebegintime = str(parser.parse(main_data[0]['created_at']).hour) + ":" + str(parser.parse(main_data[0]['created_at']).minute) report.volumeendtime = str(parser.parse(main_data[-1]['created_at']).hour) + ":" + str(parser.parse(main_data[-1]['created_at']).minute) report.volumegraphs = [volumegraph1, volumegraph2, volumegraph3] print "Calculating the freq times..." def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) return itertools.izip(a, b) times = [item['created_at'] for item in main_data] sum_deltas = 0 count_deltas = 1 for (t0, t1) in pairwise(times): sum_deltas += (parser.parse(t1) - parser.parse(t0)).seconds #seconds, minutes, hours #print t0, t1, (sum_deltas) / count_deltas count_deltas += 1 delta_time = (sum_deltas) / count_deltas print(delta_time) #minutes or seconds ? report.freq_time = delta_time print "Calculating the delta's of Volume..." comb_list = itertools.combinations(y, 2) max_volume_delta = 0 max_volume_sign = 1 max_volume_s0 = 1 max_volume_s1 = 0 for comb in comb_list: delta = abs(comb[1] - comb[0]) if delta: sign = (comb[1] - comb[0]) / abs(comb[1] - comb[0]) else: sign = 1 if delta > max_volume_delta: max_volume_delta = delta max_volume_sign = sign if (comb[0] < comb[1]): max_volume_s0 = comb[0] max_volume_s1 = comb[1] else: max_volume_s0 = comb[1] max_volume_s1 = comb[0] max_volume_percentage = (max_volume_delta / max_volume_s0) * 100 print max_volume_s0, max_volume_s1 print "Creating sentiment plot..." x= [] y = [] sentiment = -100 counter = 0 for tweet_data in main_data: d = parser.parse(tweet_data['created_at']).hour if not d in x: if sentiment > -100: y.append((sentiment/counter)) sentiment = 0 counter = 0 x.append(d) sentiment += tweet_data['sentiment'] counter += 1 y.append(sentiment/counter) print x print y report.sentimentgraph = tuple(y) print "Calculating the delta's of sentiment..." comb_list = itertools.combinations(y, 2) max_sentiment_delta = 0 max_sentiment_sign = 1 max_sentiment_s0 = 1 max_sentiment_s1 = 0 for comb in comb_list: delta = abs(comb[1] - comb[0]) if delta: sign = (comb[1] - comb[0]) / abs(comb[1] - comb[0]) else: sign= 1 if delta > max_sentiment_delta: max_sentiment_delta = delta max_sentiment_sign = sign if comb[0] < comb[1]: max_sentiment_s0 = comb[0] max_sentiment_s1 = comb[1] else: max_sentiment_s0 = comb[1] max_sentiment_s1 = comb[0] max_sentiment_percentage = (max_sentiment_delta / max_sentiment_s0) * 100 print max_sentiment_s0, max_sentiment_s1 if max_volume_percentage > max_sentiment_percentage: report.spike_percentage = max_volume_sign * max_volume_percentage else: report.spike_percentage = max_sentiment_sign * max_sentiment_percentage report.mentions_percentage = max_volume_percentage report.sentiment_percentage = max_sentiment_percentage '''years = mdates.YearLocator() # every year months = mdates.MonthLocator() # every month days = mdates.DayLocator() hours = mdates.HourLocator(interval=2) fmt = mdates.DateFormatter('%d %b %Y') fig = plt.figure() ax = fig.add_subplot(111) # format the ticks ax.xaxis.set_major_locator(days) ax.xaxis.set_major_formatter(fmt) ax.xaxis.set_minor_locator(hours) datemin = min(x) datemax = max(x) ax.set_xlim(datemin, datemax) ax.set_ylim(0, max(y)) ax.format_xdata = mdates.DateFormatter('%a, %d %b %Y %H:%M:%S %z') ax.format_ydata = '$%1.2f' ax.grid(True) ax.plot(x, y) ''' a = np.diff(np.sign(np.diff(y))).nonzero()[0] + 1 # local min+max b = (np.diff(np.sign(np.diff(y))) > 0).nonzero()[0] + 1 # local min c = (np.diff(np.sign(np.diff(y))) < 0).nonzero()[0] + 1 # local max xmins = [x[i] for i in b] ymins = [y[i] for i in b] xmaxs = [x[i] for i in c] ymaxs = [y[i] for i in c] print xmins print ymins print xmaxs print ymaxs report.optima = zip(xmins, ymins) report.optima.extend(zip(xmaxs, xmins)) ''' if b.any(): ax.plot(xmins, ymins, "o", label="min") if c.any(): ax.plot(xmaxs, ymaxs, "o", label="max") plt.legend() ''' '''# rotates and right aligns the x labels, and moves the bottom of the # axes up to make room for them fig.autofmt_xdate()''' #plt.show() print "Calculating weighted scores..." for xmin, xmax in map(None, xmins, xmaxs): for tweet_data in main_data: if parser.parse(tweet_data['created_at']).hour == xmax: tweet_data['ws'] = 30 * tweet_data['sentiment'] + 1 * tweet_data['influence'] + 1000 * (xmaxs.index(xmax) + 1) if parser.parse(tweet_data['created_at']).hour == xmin: tweet_data['ws'] = -30 * tweet_data['sentiment'] - 1 * tweet_data['influence'] - 1000 * (xmins.index(xmin) + 1) conversationlist = [] #TODO: generalize for more clusters # calculate top 5 of ws in different maxima regions print "Creating clusters of local optima..." cluster1 = [] cluster2 = [] cluster3 = [] cluster4 = [] for tweet_data in main_data: ws = tweet_data['ws'] #todo: check for more clusters? if ws > 1999: cluster1.append(tweet_data) if ws > 999 and ws < 1190: cluster2.append(tweet_data) if ws < -1001 and ws > -1191: cluster3.append(tweet_data) if ws < -1189: cluster4.append(tweet_data) print "Sort clusters..." #todo: check is reverse or not sorted_cluster1 = sorted(cluster1, key=lambda k: k['ws'], reverse=True) sorted_cluster2 = sorted(cluster2, key=lambda k: k['ws'], reverse=True) sorted_cluster3 = sorted(cluster3, key=lambda k: k['ws']) sorted_cluster4 = sorted(cluster4, key=lambda k: k['ws']) print sorted_cluster1 print sorted_cluster2 print sorted_cluster3 print sorted_cluster4 #todo get conversation! get original tweet id print "Creating conversation list..." conversationlist.extend(sorted_cluster1[:3]) conversationlist.extend(sorted_cluster2[:3]) conversationlist.extend(sorted_cluster3[:3]) conversationlist.extend(sorted_cluster4[:3]) conversations = sorted(conversationlist, key=lambda k: k['created_at']) for conv in conversations: print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence']) report.conversationlist = conversations sorted_sentiment = sorted(main_data, key=lambda k: k['sentiment']) sorted_negative = sorted_sentiment[:5] sorted_positive = sorted_sentiment[-6:-1] report.top5positive = sorted_positive report.top5negative = sorted_negative print "Top 5 Positive:" for conv in sorted_positive: print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence']) print "Top 5 Negative:" for conv in sorted_negative: print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence']) word_cloud = {} key_infl = {} word_sent = {} word_klout = {} c = 0 #word cloud #TODO stop words and stem #TODO calculate KLOUT, partnership with KLOUT ??? for tweet in main_data: #for word in word_tokenize(tweet['text']): for word in tweet['text'].split(): word = word.lower() if len(word) > 5 and word not in corpus.stopwords.words('dutch') and word[0] != '@' and re.match("^[A-Za-z0-9_-]*(\#)*[A-Za-z0-9_-]*$", word): print word if word_cloud.has_key(word): word_cloud[word] += tweet['sentiment'] else: word_cloud[word] = tweet['sentiment'] key_infl[word] = tweet['username'] if word_sent.has_key(word): word_sent[word].append(tweet['sentiment']) else: word_sent[word] = list() word_sent[word].append(tweet['sentiment']) if not word_klout.has_key(word): try: klout = KloutInfluence(tweet['username'].encode('utf-8')) word_klout[word] = klout.score() except: word_klout[word] = -1 c += 1 if DEBUG: if c > 100: break report.word_cloud = sorted(word_cloud.items(), key=lambda k:k[1], reverse=True) report.key_infl = key_infl report.word_sent = word_sent report.word_klout = sorted(word_klout.items(), key=lambda k:k[1], reverse = True) report.create(MAIN_ENTERPRISE)
nilq/baby-python
python
import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) from src.Utils.Point import Point from src.Utils.Vector import Vector from src.Utils.LinearEquation import LinearEquation import math """ This module regroups a lot of class definition that are basic encapsulations of other concepts. Found here so far : - Player (Abstract definition of Defender and Opponent) - Defender - Opponent - Shot - Goal """ class Player: """ This class represents any player on the field. It is abstract (eventhough it doesn't mean much in Python) and therefore has no reason to be instantiated. """ def __init__(self, pos, radius): """ Constructs a new 'Player' object. :param pos: The initial position of the player. :param radius: The radius of the player. :return: returns nothing. """ self.pos = pos self.radius = radius def __str__(self): """ Allows the use of print(p) where p is a 'Player' object. :return: The corresponding string. """ res = "Pos: " + str(self.pos) + " Radius: " + str(self.radius) return res def collision(self, player, distance=None): """ Checks if this player and the given one collide. It can also be used to check if they are a certain distance apart with the optional parameter. :param player: The other player to check collisions with. :param distance (opt): The distance to have between the two robots to not have \ a collision. :return: True if there is a collision, False otherwise. """ if distance == None: distance = 2 * self.radius return self.pos.distance(player.pos) < distance def in_zone(self, bottom_left, top_right): """ Checks if this player is in a rectangular zone defined by its bottom left point and top right point. :param bottom_left: The bottom left point of the zone. :param top_right: The top right point of the zone. :return: True if the player is in the zone, False otherwise. """ return (bottom_left.x <= self.pos.x and self.pos.x <= bottom_left.x and top_right.y <= self.pos.y and self.pos.y <= top_right.y) class Defender(Player): """ This class represents a defender on the field. This is basically renaming what a player is which is about renaming what a point is. Although, in this case a radius needs to be specified. """ def __init__(self, pos, radius): """ Constructs a new 'Defender' object. :param pos: The initial position of the defender. :param radius: The radius of the defender. :return: returns nothing. """ super().__init__(pos, radius) def is_valid_pos(self, pos_step): """ Check if the position of this player is valid regarding the given step between two positions. :param pos_step: The distance between two positions next to each other \ in all four cardinal directions. :return: True if the position is valid, False otherwise. """ return not (self.pos.x % pos_step or self.pos.y % pos_step) class Opponent(Player): """ This class represents an opponent on the field. This is basically renaming what a player is which is about renaming what a point is. Although, in this case a radius doesn't need to be specified. """ def __init__(self, pos, radius=0): """ Constructs a new 'Opponent' object. :param pos: The initial position of the opponent. :param radius (opt): The radius of the opponent. :return: returns nothing. """ super().__init__(pos, radius) class Shot: """ This class represents what a shot is, which is an opponent and an angle. """ def __init__(self, opponent, angle): """ Constructs a new 'Shot' object. :param opponent: The opponent that is taking the shot. :param angle: The angle at which the opponent is shooting, with regard to the origin of the field (in the center). :return: returns nothing. """ self.opponent = opponent self.angle = angle def __str__(self): """ Allows the use of print(s) where s is a 'Shot' object. :return: The corresponding string. """ res = "Opponent: " + str(self.opponent) + " Angle: " + str(self.angle) return res def is_valid_angle(self, theta_step): """ Check if the angle of this shot is valid regarding the given step between two angles. :param theta_step: The angle between two consecutive angles. :return: True if the angle is valid, False otherwise. """ return not (self.angle % theta_step) class Goal: """ This class represents a Goal. A goal is a defined by two points (to form a segment) and a vector that defines the orientation of the goal (where you can score from). """ def __init__(self, start_pos, end_pos, direction): """ Creates a new 'Goal' object. :param start_pos: The starting point of the segment. :param end_pos: The ending point of the segment. :param direction: The orientation of the goal. :return: returns nothing. """ self.s_pos = start_pos self.e_pos = end_pos self.dir = direction def __str__(self): """ Allows the use of print(g) where g is a 'Goal' object. :return: The corresponding string. """ res = "Pos 1: " + str(self.s_pos) + " Pos 2: " + str(self.e_pos) + " Dir: " + str(self.dir) return res def is_in_interval(self, low, high, value): """ Check if the given value in in the interval [low ; high]. Useful method to make the code easier to read. It is not specific to this class and could be used in different classes but for now it will remain here. :param low: Low bound of the interval. :param high: High bound of the interval. :param value: The value to check. :return: True if value is in the interval, false otherwise. """ return low <= value and value <= high def check_position(self, player): """ Checks if the given player is correctly placed with regard to the orientation of the goal. If the player is 'behind' the goal, then it is not correctly placed, if it is in front of the goal, then it is correctly placed. This is done by checking the angle formed between the direction vector of the goal and the vector going from the center of the goal to the player. This angle must be in [-pi/2 ; pi/2] if the player is correctly placed (draw it yourself or check out paper about this problem for more information). :param player: The player to consider. :return: True if the player is correctly placed, False otherwise. """ # Mid point of the segment defining the goal mid = Point.mid_point(self.s_pos, self.e_pos) # Transposition of this point by the direction vector of the goal # to get the direction vector with its origin in the center of the goal mid_prime = self.dir + mid # Creating both needed vectors v1 = Vector.v_from_pp(mid, player.pos) v2 = Vector.v_from_pp(mid, mid_prime) # Getting the angle and checking if it is a valid one angle = v1.angle(v2) return self.is_in_interval(-math.pi / 2, math.pi / 2, angle) def check_shot_direction(self, shot): """ Checks if the given shot goes towards this goal. To do so, simply consider that whether the shot is valid or not, for it to be going towards the goal, it needs to go towards the half-plane define by the goal's segment (well, goal's line in this case, it is considered infinite here). For more information, check our paper on this subject or try drawing it yourself. To know if this is the case, the scalar product of the vector of the shot and the direction of the goal is checked. There are supposed to be going in opposite direction, therefore the scalar product must be negative. :param shot: The shot to consider. :return: True if the shot goes towards the goal (if it was infinite), False otherwise. """ return Vector.v_from_a(shot.angle) * self.dir < 0 def check_shot_on_target(self, shot): """ Checks if the shot (abstracted to an infinite line) intersects the goal's segment. To do so,find the intersection point between the shot corresponding linear equation and the goal's segment corresponding linear equation. Then check if this point is in the goal's segment. :param shot: The shot to consider. :return: True if the shot intersects the goal's segment, False otherwise. """ # Defining a few variables to ease the reading # Here we define the x and y interval of the goal's segment x_min = min(self.s_pos.x, self.e_pos.x) x_max = max(self.s_pos.x, self.e_pos.x) y_min = min(self.s_pos.y, self.e_pos.y) y_max = max(self.s_pos.y, self.e_pos.y) # Shortening variables names o_x = shot.opponent.pos.x o_y = shot.opponent.pos.y # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined # In these cases, the shot is vertical, therefore it is valid # iff the x coordinate of the opponent is in the goal's x interval if abs(shot.angle) == math.pi / 2: return self.is_in_interval(x_min, x_max, o_x) # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to # undefined intersection points (if the goal is vertical for example) # although there is an intersection point # # In these cases, the shot is horizontal, therefore it is valid # iff the y coordinate of the opponent is in the goal's y interval if abs(shot.angle) == math.pi or shot.angle == 0: return self.is_in_interval(y_min, y_max, o_y) # Using tan the least amount of time possible, for this is a slow function tan_theta = math.tan(shot.angle) # Define the LE of the shot le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x) le2 = None # If the goal is vertical, finding the intersection point # is not possible using the normal way # # That being said, unless the LE of the shot is vertical too (which it # isn't as it is checked before hand) there has to be an intersection point # This intersection must happen when at the x coodinate of the goal's segment # therefore, it is possible to compute the y coordinate of the intersection by # computing the application of the shot's LE on this ex coordinate # # Then, the resulting y is valid iff it is in the goal's segment interval if self.e_pos.x - self.s_pos.x == 0: y = le1.apply(self.e_pos.x) return self.is_in_interval(y_min, y_max, y) # The normal way of solving the intersection of these two LEs else: # Shortening variables by computing the coefficient of the goal's LE ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x) # If the lines are parallels (have the same coefficient) return False if math.tan(shot.angle) == ratio: return False # Defining the goal's LE le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio) # Finding the intersection point of the two LEs # If there isn't one, return False (but there should be one # given all the asserts we do before hand, this is just for completion sake) p_intersect = le1.intersection(le2) if p_intersect == None: return False # If the intersection point's abscissa is in the goal's x interval, then it is # a valid abstracted shot going return self.is_in_interval(x_min, x_max, p_intersect.x) def is_shot_valid(self, shot): """ Checks if a shot is valid (going in the goal) or not. To do so, three things are checked : 1 -> Is the player ABLE to shoot in the goal, namely is it in front of the goal and not behind? 2 -> Is the shot going towards the half plane defined by the goal? 3 -> Is the linear equation defined by the shot intersecting the goal's segment? (3) is obviously required. (2) is required because if it isn't checked, the player could shoot away from the goal and it would be considered valid since in (3) we consider a linear equation and not a half-line. (1) is required because otherwise it would be true even if the player shoots from behind the goal. :param shot: The shot to check. :return: True if the shot is valid, False otherwise. """ a = self.check_position(shot.opponent) b = self.check_shot_direction(shot) c = self.check_shot_on_target(shot) return a and b and c def shot_intercepted(self, defender, shot): """ Checks if the given shot is intercepted by the given player with regard to this goal. To do so, we check if the circle defined by the player and its radius intersects the shot. Then, it is checked if the intersection is between the opponent and the goal. There are plenty of special cases, find more information below. :param defender: The defender that should intercept the shot. :param shot: The shot to intercept. :return: True if the shot is intercepted, False otherwise. """ o_x = shot.opponent.pos.x o_y = shot.opponent.pos.y le1 = None le2 = None p = None q = None p = LinearEquation.intersection_circle(shot.opponent, shot.angle, defender.pos, defender.radius) if p == None: return False # If the goal is vertical, solving the intersection won't work # it is then done "by hand" if self.e_pos.x - self.s_pos.x == 0: # If the goal and the shot are vertical, return False if abs(shot.angle) == math.pi / 2: return False # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to # undefined behaviors (namely if the goal is vertical) # # In these cases, the shot is horizontal, therefore it is valid # iff the x coordinate of the intersection point of the defender and the shot # is between the goal and the opponent x coordinates if abs(shot.angle) == math.pi or shot.angle == 0: q = Point(self.e_pos.x, o_y) return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x) tan_theta = math.tan(shot.angle) le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x) q = Point(self.e_pos.x, le2.apply(self.e_pos.x)) return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x) # If the goal is not vertical, it is now possible to define the coefficient # of the goal's LE ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x) # If the shot is parallel to the goal (same coefficient) it doesn't # matter if it is intercepted (this method should only be used # with valid shot in the first place, this is just for completion sake) if math.tan(shot.angle) == ratio: return False # LE of the goal le1 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio) # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined # In these cases, the shot is vertical, therefore it is valid # iff the y coordinate of the intersection point of the defender and the shot # is between the goal and the opponent if abs(shot.angle) == math.pi / 2: q = Point(o_x, le1.apply(o_x)) return self.is_in_interval(min(q.y, o_y), max(q.y, o_y), p.y) # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to # undefined behaviors (namely if the goal is vertical) # # In these cases, the shot is horizontal, therefore it is valid # iff the x coordinate of the intersection point of the defender and the shot # is between the goal and the opponent y coordinates if abs(shot.angle) == math.pi or shot.angle == 0: q = Point(le1.reverse(o_y), o_y) return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x) tan_theta = math.tan(shot.angle) # LE of the shot le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x) # Find the intersection of the two lines and check if the defender # is between this point and the opponent q = le1.intersection(le2) return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x) def shot_intercepted_with_speed(self, defender, shot, ball_speed, player_speed): """ Checks if the given defender intercepts the given shot wrt this goal. This method also takes into account that the defender can move. :param defender: The defender that should intercept the shot. :param shot: The shot to intercept. :return: True if the shot is intercepted, False otherwise. """ o_x = shot.opponent.pos.x o_y = shot.opponent.pos.y le1 = None le2 = None q = None p_inter = None tan_theta = None # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined # In these cases, the shot is vertical, therefore it is valid # iff the y coordinate of the intersection point of the defender and the shot # is between the goal and the opponent if abs(shot.angle) == math.pi / 2: p_inter = Point(shot.opponent.pos.x, defender.pos.y) else: tan_theta = math.tan(shot.angle) # LE of the shot le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x) # check if this point is reachable by the defender quickly enough # first get the intersection point of the shot and the shortest line from the # defender to the shot p_inter = LinearEquation.perpendicular_intersection_point_line(le2, defender.pos) # compute the distances between p_inter and the defender/opponent d_opponent = p_inter.distance(shot.opponent.pos) d_defender = p_inter.distance(defender.pos) # check that the defender can reach this point before the ball # if not, this defender isn't correct if not ((d_defender) / player_speed <= d_opponent / ball_speed): return False # If the goal is vertical, solving the intersection won't work # it is then done "by hand" if self.e_pos.x - self.s_pos.x == 0: return self.is_in_interval(min(self.e_pos.x, o_x), max(self.e_pos.x, o_x), p_inter.x) # If the goal is not vertical, it is now possible to define the coefficient # of the goal's LE ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x) # If the shot is parallel to the goal (same coefficient) it doesn't # matter if it is intercepted (this method should only be used # with valid shot in the first place, this is just for completion sake) if math.tan(shot.angle) == ratio: return False # LE of the goal le1 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio) # Find the intersection of the two lines and check if the defender # is between this point and the opponent q = le1.intersection(le2) return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p_inter.x)
nilq/baby-python
python
#!/usr/bin/env python import glob import yaml import sys import argparse import cparser import generator import json from clang.cindex import Index, CursorKind, Config import codecs import re import os from typing import List, Dict file_cache = {} rules = [ [r'@c\s+(\w+)', 'inlinecode'], [r'\s*[@\\]code(.*?)\s+[@\\]endcode\s*', 'blockcode'], [r'\\f\\\((.*?)\\f\\\)', 'inlinemath'], [r'\\f\$(.*?)\\f\$', 'inlinemath'], [r'\s*\\f\[(.*?)\\f\]\s*', 'blockmath'], [r'@param\s+(\w+)', 'param'], ] def parse_description(s): if isinstance(s, str): for rule in rules: m = re.search(rule[0], s, re.MULTILINE | re.DOTALL) if m is not None: prefix = s[:m.start()] match = remove_padding(m.group(1)).strip() postfix = s[m.end():] return parse_description([prefix, {rule[1]: match}, postfix]) return s elif isinstance(s, List): r = [] for ss in s: if isinstance(ss, str): rr = parse_description(ss) if isinstance(rr, str): if len(rr) > 0: r.append(rr) else: r.extend(rr) else: r.append(ss) return r else: return s def clean_text(str): str = str.replace('\t', ' ') str = str.replace('\r', '') return str def remove_padding(s): lines = s.splitlines() minpadding = 100 for l in lines: if len(l) > 0: minpadding = min(minpadding, len(l) - len(l.lstrip(' '))) if minpadding == 100: return s lines = [l[minpadding:] for l in lines] return '\n'.join(lines) def get_location(node): if node is None: return '' if node.location is None: return '' if node.location.file is None: return '' return node.location.file.name def get_location_line(node): if node is None: return -1 if node.location is None: return -1 return node.location.line def get_source(cursor): assert cursor.extent.start.file.name == cursor.extent.end.file.name filename = cursor.extent.start.file.name if filename not in file_cache: file_cache[filename] = codecs.open( filename, 'r', encoding="utf-8").read() file_content = file_cache[filename].encode('utf-8') bytes = ' ' * (cursor.extent.start.column - 1) + clean_text( file_content[cursor.extent.start.offset:cursor.extent.end.offset].decode('utf-8')) return remove_padding(bytes) def clean_comment(s): s = s.strip() if s.startswith('///<'): return remove_padding(s[4:]) elif s.startswith('///'): return remove_padding(re.sub(r'^\s*///', '', s, flags=re.MULTILINE)) elif s.startswith('/**'): return remove_padding(re.sub(r'^\s*\*( |$)', '', s[3:-2], flags=re.MULTILINE)) return s def replace_macros(s: str, macros: Dict): for key, value in macros.items(): s = re.sub(r'\b'+key+r'\b', value, s) return s def same_location(x, y): return x == y def class_name(node): template = [] for c in node.get_children(): if c.kind in [CursorKind.TEMPLATE_TYPE_PARAMETER, CursorKind.TEMPLATE_NON_TYPE_PARAMETER]: template.append(get_source(c)) if template: template = 'template <' + ', '.join(template) + '>' else: template = '' return template + node.spelling def source_to_definition(source): source = re.sub(r'^(.*?)\{.*', r'\1', source, flags=re.DOTALL).strip() return source def parse_index(root_path, index: List[Dict], node, root_location, group: str, ns: str = '', macros={}): source = '' if node.brief_comment is not None: source = get_source(node) definition = source_to_definition(replace_macros(source, macros)) entity: Dict = {} if node.kind in [CursorKind.FUNCTION_TEMPLATE, CursorKind.FUNCTION_DECL, CursorKind.CXX_METHOD, CursorKind.CONSTRUCTOR, CursorKind.DESTRUCTOR]: entity['type'] = 'function' entity['name'] = node.spelling entity['definition'] = definition elif node.kind in [CursorKind.CLASS_TEMPLATE, CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION]: entity['type'] = 'class' entity['name'] = node.spelling entity['definition'] = class_name(node) entity['content'] = [] elif node.kind in [CursorKind.ENUM_DECL]: entity['type'] = 'enum' entity['name'] = node.spelling entity['definition'] = definition entity['content'] = [] elif node.kind in [CursorKind.ENUM_CONSTANT_DECL]: entity['type'] = 'enumerator' entity['name'] = node.spelling entity['definition'] = definition elif node.kind in [CursorKind.TYPEDEF_DECL, CursorKind.TYPE_ALIAS_DECL, CursorKind.TYPE_ALIAS_TEMPLATE_DECL]: entity['type'] = 'typedef' entity['name'] = node.spelling entity['definition'] = re.sub(r'(^|\s+)using\s+', r'', definition) elif node.kind in [CursorKind.VAR_DECL, CursorKind.UNEXPOSED_DECL]: entity['type'] = 'variable' entity['name'] = node.spelling entity['definition'] = definition elif node.kind in [CursorKind.NAMESPACE]: entity['type'] = 'namespace' entity['name'] = node.displayname entity['definition'] = definition entity['source'] = definition + ' { ... }' elif node.kind in [CursorKind.USING_DECLARATION]: entity['type'] = 'function' entity['name'] = node.spelling entity['definition'] = definition else: print('warning: Unknown cursor kind: {} for {}'.format( node.kind, node.displayname)) return entity['qualifiedname'] = re.sub('^::', '', ns + '::' + entity['name']) if 'source' not in entity: entity['source'] = source entity['file'] = os.path.relpath( get_location(node), root_path).replace('\\', '/') entity['line'] = get_location_line(node) description = clean_comment(clean_text(node.raw_comment)) m = re.match(r'[@\\]copybrief\s+([a-zA-Z0-9:\._-]+)', description.strip()) if m: copyFrom = m.group(1) print("Copying from {}".format(copyFrom)) description = {"copy": copyFrom} else: description = re.sub(r'\s*@brief\s*', '', description) description = parse_description(description) entity['description'] = description index.append(entity) entity['group'] = group if 'content' in entity: index = entity['content'] if node.kind == CursorKind.NAMESPACE: ns += ns+'::'+node.spelling if node.kind in [CursorKind.CLASS_TEMPLATE, CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION]: ns += ns+'::'+node.spelling if node.kind in [CursorKind.ENUM_DECL]: ns += ns+'::'+node.spelling for c in node.get_children(): if same_location(get_location(c), root_location): parse_index(root_path, index, c, root_location, group, ns, macros) def parse(root_path, filenames: List[str], clang_args: List[str], macros={}): index = [] for filename in filenames: print('Parsing ' + filename) group = '' with open(filename, 'r', encoding='utf-8') as strm: text = strm.read() m = re.search(r'@addtogroup\s+([a-zA-Z0-9_-]+)', text) if m: group = m.group(1) clangIndex = Index.create() tu = clangIndex.parse(None, [filename.replace('\\', '/')] + clang_args) if not tu: print('Unable to load input') exit(1) if len(tu.diagnostics): print('------------DIAGNOSTICS---------------') for diag in tu.diagnostics: print(diag) print('------------/DIAGNOSTICS---------------') count = len(index) parse_index(root_path, index, tu.cursor, tu.cursor.displayname, group, '', macros) print(' Found {} entities'.format(len(index) - count)) return index if __name__ == '__main__': import subprocess parser = argparse.ArgumentParser( description='Parse C++ sources to generate index') parser.add_argument('config_path', help='path to configuration file (YML)') parser.add_argument('output_path', help='path where generated index will be written (JSON)') parser.add_argument('--libclang', help='libclang path (.dll or .so)') parser.add_argument( '--git', help='Retrieve commit hash and branch', action='store_true') args = parser.parse_args() if args.libclang: Config.set_library_file(args.libclang) clang_args = [] config = None defaults = {'clang': {'arguments': []}, 'repository': '', 'postprocessor': {'ignore': []}, 'masks': [ '**/*.hpp', '**/*.cpp', '**/*.cxx', '**/*.hxx', '**/*.h'], 'groups': {}} config = yaml.load(open(args.config_path, 'r', encoding='utf-8')) print(config) config = {**defaults, **config} print('args.config_path: ', args.config_path) print('os.path.dirname(args.config_path): ', os.path.dirname(args.config_path)) print('os.path.dirname(args.config_path): ', os.path.join(os.path.dirname(args.config_path), config['input_directory'])) input_dir = os.path.normpath(os.path.join(os.path.dirname( args.config_path), config['input_directory'])) + os.path.sep print('Input directory:', input_dir) clang_args = config['clang']['arguments'] print('Clang arguments:', clang_args) macros = config['postprocessor']['ignore'] print('Ignore macros:', macros) git_tag = '' if args.git: git_tag = subprocess.check_output( ['git', 'describe', '--always', '--abbrev=0'], cwd=input_dir).strip() git_tag = codecs.decode(git_tag) print('GIT:') print(git_tag) file_masks = config['masks'] filenames = [] for mask in file_masks: filenames += glob.glob(input_dir + mask, recursive=True) print('Found', len(filenames), 'files') macros = {k: '' for k in macros} index = cparser.parse(input_dir, filenames, clang_args, macros) index = {'index': index, 'git_tag': git_tag, 'repository': config['repository'].replace( '{TAG}', git_tag), 'groups': config['groups']} json.dump(index, open(args.output_path, 'w', encoding='utf-8'), indent=4)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Created by yetongxue<me@xander-ye.com> import socket client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', 8001)) while True: re_data = input() client.send(re_data.encode('utf8')) data = client.recv(1024) print(data.decode('utf8'))
nilq/baby-python
python
from gym_nav.envs.nav_env import NavEnv from gym_nav.envs.multi_nav_env import MultiNavEnv
nilq/baby-python
python
from .get_data import get_all_data, get_data_from_api from ..dict_as_attribute import DictAsObj as DictToObj class Items: states = {} countries = {} total = {} data = get_data_from_api() for entity in data: if 'Countries' == entity: countries.update(data[entity]) elif 'States' == entity: states.update(data[entity]) elif 'TOTAL' == entity: total.update(data[entity]) data = {} data.update(countries) data.update(states) data.update({'Total'.upper(): total}) def __init__(self, s): self.fullJSON = self.data self.caller = s class Item(Items): def rtrn_item_json(self, name=None): self.json = self.fullJSON[self.caller] def rtrn_data(self): self.rtrn_item_json() self._confirmed() self._deaths() self._recovered() def _confirmed(self): self.confirmed = self.json['Confirmed'] self.cases = self.confirmed def _deaths(self): self.deaths = self.json['Deaths'] def _recovered(self): self.recovered = self.json['Recovered']
nilq/baby-python
python
# Copyright 2017, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from scipy import stats import math _DEFAULT_THRESHOLD = 1e-10 def scale(a, mul): return [x * mul for x in a] def cmp(a, b): return stats.ttest_ind(a, b) def speedup(new, old, threshold = _DEFAULT_THRESHOLD): if (len(set(new))) == 1 and new == old: return 0 s0, p0 = cmp(new, old) if math.isnan(p0): return 0 if s0 == 0: return 0 if p0 > threshold: return 0 if s0 < 0: pct = 1 while pct < 100: sp, pp = cmp(new, scale(old, 1 - pct / 100.0)) if sp > 0: break if pp > threshold: break pct += 1 return -(pct - 1) else: pct = 1 while pct < 10000: sp, pp = cmp(new, scale(old, 1 + pct / 100.0)) if sp < 0: break if pp > threshold: break pct += 1 return pct - 1 if __name__ == "__main__": new = [0.0, 0.0, 0.0, 0.0] old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06] print speedup(new, old, 1e-5) print speedup(old, new, 1e-5)
nilq/baby-python
python
#!/usr/bin/python3 """ Copyright 2018-2019 Firmin.Sun (fmsunyh@gmail.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from lxml.etree import Element, SubElement, tostring from xml.dom.minidom import parseString import os import cv2 class ImageInfo(object): def __init__(self,width,height,path,name,image_extension,channel=3): self.width = width self.height = height self.path = path self.name = name self.image_extension = image_extension self.channel = channel def save_image(self,out_path, image): # try: # image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) # except Exception as ex: # print(out_path) # try: if out_path is not None: dir = os.path.dirname(out_path) if not os.path.exists(dir): os.makedirs(dir) cv2.imwrite(out_path, image) except Exception as ex: print(ex) def save_annotations(self,save_dir, boxes, labels): if not os.path.exists(save_dir): os.makedirs(save_dir) dom = self.make_xml( boxes, labels) xml_path = os.path.join(save_dir, self.name + '.xml') with open(xml_path, 'w+') as f: dom.writexml(f, addindent='', newl='', encoding='utf-8') def make_xml(self, boxes, labels): node_root = Element('annotation') node_folder = SubElement(node_root, 'folder') node_folder.text = 'JPEGImages' node_filename = SubElement(node_root, 'filename') node_filename.text = self.name + '.' + self.image_extension node_path = SubElement(node_root, 'path') node_path.text = self.path node_size = SubElement(node_root, 'size') node_width = SubElement(node_size, 'width') node_width.text = str(self.width) node_height = SubElement(node_size, 'height') node_height.text = str(self.height) node_depth = SubElement(node_size, 'depth') node_depth.text = str(self.channel) node_segmented = SubElement(node_root, 'segmented') node_segmented.text = '0' for i in range(len(labels)): label = labels[i] b = boxes[i].split(',') node_object = SubElement(node_root, 'object') node_name = SubElement(node_object, 'name') caption = "{}".format(label) node_name.text = caption node_pose = SubElement(node_object, 'pose') node_pose.text = 'Unspecified' node_truncated = SubElement(node_object, 'truncated') node_truncated.text = '0' node_difficult = SubElement(node_object, 'difficult') node_difficult.text = '0' node_bndbox = SubElement(node_object, 'bndbox') node_xmin = SubElement(node_bndbox, 'xmin') node_xmin.text = str(int(b[0])) node_ymin = SubElement(node_bndbox, 'ymin') node_ymin.text = str(int(b[1])) node_xmax = SubElement(node_bndbox, 'xmax') node_xmax.text = str(int(b[2])) node_ymax = SubElement(node_bndbox, 'ymax') node_ymax.text = str(int(b[3])) xml = tostring(node_root, pretty_print=True) dom = parseString(xml) return dom
nilq/baby-python
python
import json from typing import Any import pytest from pydantic import BaseModel, ConfigError, NoneBytes, NoneStr, ValidationError, pretty_errors class UltraSimpleModel(BaseModel): a: float = ... b: int = 10 def test_ultra_simple_success(): m = UltraSimpleModel(a=10.2) assert m.a == 10.2 assert m.b == 10 def test_ultra_simple_missing(): with pytest.raises(ValidationError) as exc_info: UltraSimpleModel() assert """\ 1 error validating input a: field required (error_type=Missing)""" == str(exc_info.value) def test_ultra_simple_failed(): with pytest.raises(ValidationError) as exc_info: UltraSimpleModel(a='x', b='x') assert """\ 2 errors validating input a: could not convert string to float: 'x' (error_type=ValueError track=float) b: invalid literal for int() with base 10: 'x' (error_type=ValueError track=int)\ """ == str(exc_info.value) def test_ultra_simple_repr(): m = UltraSimpleModel(a=10.2) assert repr(m) == '<UltraSimpleModel a=10.2 b=10>' assert repr(m.fields['a']) == ("<Field a: type='float', required=True, " "validators=['float', 'number_size_validator']>") assert dict(m) == {'a': 10.2, 'b': 10} def test_comparing(): m = UltraSimpleModel(a=10.2, b='100') assert m == {'a': 10.2, 'b': 100} assert m == UltraSimpleModel(a=10.2, b=100) class ConfigModel(UltraSimpleModel): class Config: raise_exception = False def test_config_doesnt_raise(): m = ConfigModel() assert len(m.errors) == 1 assert m.errors['a'].exc.args[0] == 'field required' assert m.config.raise_exception is False assert m.config.max_anystr_length == 65536 def test_nullable_strings_success(): class NoneCheckModel(BaseModel): existing_str_value = 'foo' required_str_value: str = ... required_str_none_value: NoneStr = ... existing_bytes_value = b'foo' required_bytes_value: bytes = ... required_bytes_none_value: NoneBytes = ... m = NoneCheckModel( required_str_value='v1', required_str_none_value=None, required_bytes_value='v2', required_bytes_none_value=None, ) assert m.required_str_value == 'v1' assert m.required_str_none_value is None assert m.required_bytes_value == b'v2' assert m.required_bytes_none_value is None def test_nullable_strings_fails(): class NoneCheckModel(BaseModel): existing_str_value = 'foo' required_str_value: str = ... required_str_none_value: NoneStr = ... existing_bytes_value = b'foo' required_bytes_value: bytes = ... required_bytes_none_value: NoneBytes = ... class Config: raise_exception = False m = NoneCheckModel( required_str_value=None, required_str_none_value=None, required_bytes_value=None, required_bytes_none_value=None, ) assert """\ { "required_bytes_value": { "error_msg": "None is not an allow value", "error_type": "TypeError", "index": null, "track": "bytes" }, "required_str_value": { "error_msg": "None is not an allow value", "error_type": "TypeError", "index": null, "track": "str" } }""" == json.dumps(pretty_errors(m.errors), indent=2, sort_keys=True) class RecursiveModel(BaseModel): grape: bool = ... banana: UltraSimpleModel = ... def test_recursion(): m = RecursiveModel(grape=1, banana={'a': 1}) assert m.grape is True assert m.banana.a == 1.0 assert m.banana.b == 10 assert repr(m) == '<RecursiveModel grape=True banana=<UltraSimpleModel a=1.0 b=10>>' def test_recursion_fails(): with pytest.raises(ValidationError): RecursiveModel(grape=1, banana=123) class PreventExtraModel(BaseModel): foo = 'whatever' class Config: ignore_extra = False def test_prevent_extra_success(): m = PreventExtraModel() assert m.foo == 'whatever' m = PreventExtraModel(foo=1) assert m.foo == '1' def test_prevent_extra_fails(): with pytest.raises(ValidationError) as exc_info: PreventExtraModel(foo='ok', bar='wrong', spam='xx') assert exc_info.value.message == '2 errors validating input' assert """\ bar: extra fields not permitted (error_type=Extra) spam: extra fields not permitted (error_type=Extra)""" == exc_info.value.display_errors class InvalidValidator: @classmethod def get_validators(cls): yield cls.has_wrong_arguments @classmethod def has_wrong_arguments(cls, value, bar): pass def test_invalid_validator(): with pytest.raises(ConfigError) as exc_info: class InvalidValidatorModel(BaseModel): x: InvalidValidator = ... assert exc_info.value.args[0].startswith('Invalid signature for validator') def test_no_validator(): with pytest.raises(ConfigError) as exc_info: class NoValidatorModel(BaseModel): x: object = ... assert exc_info.value.args[0] == "no validator found for <class 'object'>" def test_unable_to_infer(): with pytest.raises(ConfigError) as exc_info: class InvalidDefinitionModel(BaseModel): x = None assert exc_info.value.args[0] == 'unable to infer type for attribute "x"' def test_not_required(): class Model(BaseModel): a: float = None assert Model(a=12.2).a == 12.2 assert Model().a is None assert Model(a=None).a is None def test_infer_type(): class Model(BaseModel): a = False b = '' c = 0 assert Model().a is False assert Model().b == '' assert Model().c == 0 def test_allow_extra(): class Model(BaseModel): a: float = ... class Config: allow_extra = True assert Model(a='10.2', b=12).values == {'a': 10.2, 'b': 12} def test_set_attr(): m = UltraSimpleModel(a=10.2) assert m.values == {'a': 10.2, 'b': 10} m.setattr('b', 20) assert m.values == {'a': 10.2, 'b': 20} def test_set_attr_invalid(): m = UltraSimpleModel(a=10.2) assert m.values == {'a': 10.2, 'b': 10} with pytest.raises(ValueError) as exc_info: m.setattr('c', 20) assert '"UltraSimpleModel" object has no field "c"' in str(exc_info) def test_any(): class AnyModel(BaseModel): a: Any = 10 assert AnyModel().a == 10 assert AnyModel(a='foobar').a == 'foobar' def test_alias(): class Model(BaseModel): a = 'foobar' class Config: fields = { 'a': {'alias': '_a'} } assert Model().a == 'foobar' assert Model().values == {'a': 'foobar'} assert Model(_a='different').a == 'different' assert Model(_a='different').values == {'a': 'different'}
nilq/baby-python
python
from accountancy.helpers import sort_multiple from nominals.models import NominalTransaction from vat.models import VatTransaction from cashbook.models import CashBookLine def create_lines(line_cls, header, lines): # DO WE NEED THIS? tmp = [] for i, line in enumerate(lines): line["line_no"] = i + 1 line["header"] = header tmp.append(line_cls(**line)) return line_cls.objects.bulk_create(tmp) def create_nom_trans(nom_tran_cls, line_cls, header, lines, bank_nominal, vat_nominal): nom_trans = [] for line in lines: if line.goods: nom_trans.append( nom_tran_cls( module="CB", header=header.pk, line=line.pk, nominal=line.nominal, value=-1 * line.goods, ref=header.ref, period=header.period, date=header.date, field="g", type=header.type ) ) if line.vat: nom_trans.append( nom_tran_cls( module="CB", header=header.pk, line=line.pk, nominal=vat_nominal, value=-1 * line.vat, ref=header.ref, period=header.period, date=header.date, field="v", type=header.type ) ) if line.goods or line.vat: nom_trans.append( nom_tran_cls( module="CB", header=header.pk, line=line.pk, nominal=bank_nominal, value=line.goods + line.vat, ref=header.ref, period=header.period, date=header.date, field="t", type=header.type ) ) nom_trans = NominalTransaction.objects.bulk_create(nom_trans) nom_trans = sort_multiple(nom_trans, *[(lambda n: n.line, False)]) goods_and_vat = nom_trans[:-1] for i, line in enumerate(lines): line.goods_nominal_transaction = nom_trans[3 * i] line.vat_nominal_transaction = nom_trans[(3 * i) + 1] line.total_nominal_transaction = nom_trans[(3 * i) + 2] line_cls.objects.bulk_update( lines, ["goods_nominal_transaction", "vat_nominal_transaction", "total_nominal_transaction"] ) def create_cash_book_trans(cash_book_tran_cls, header): cash_book_tran_cls.objects.create( module="CB", header=header.pk, line=1, value=header.total, ref=header.ref, period=header.period, date=header.date, field="t", cash_book=header.cash_book, type=header.type ) def create_vat_transactions(header, lines): vat_trans = [] for line in lines: vat_trans.append( VatTransaction( header=header.pk, line=line.pk, module="CB", ref=header.ref, period=header.period, date=header.date, field="v", tran_type=header.type, vat_type=header.vat_type, vat_code=line.vat_code, vat_rate=line.vat_code.rate, goods=line.goods, vat=line.vat ) ) vat_trans = VatTransaction.objects.bulk_create(vat_trans) vat_trans = sort_multiple(vat_trans, *[(lambda v: v.line, False)]) lines = sort_multiple(lines, *[(lambda l: l.pk, False)]) for i, line in enumerate(lines): line.vat_transaction = vat_trans[i] CashBookLine.objects.bulk_update(lines, ["vat_transaction"])
nilq/baby-python
python
import random from django.http import Http404, JsonResponse from django.shortcuts import render from .models import Tweet def home_view(request, *args, **kwargs): return render(request, "pages/home.html", context={}, status=200) def tweet_list_view(request, *args, **kwargs): qs = Tweet.objects.all() tweets_list = [{"id": x.id, "content": x.content, "likes": random.randint(0, 100)} for x in qs] data = { "is_user": False, "response": tweets_list } return JsonResponse(data) def tweet_detail_view(request, tweet_id, *args, **kwargs): data = { "id": tweet_id, } status = 200 try: obj = Tweet.objects.get(id=tweet_id) data['content'] = obj.content except: data['message'] = "Not found" status = 404 return JsonResponse(data, status= status)
nilq/baby-python
python
from models.db import db from models.post import Post from flask_restful import Resource from flask import request from sqlalchemy.orm import joinedload from resources.s3 import * class Posts(Resource): def get(self): posts = Post.find_all() return posts def post(self): data = request.get_json() params = {} for k in data.keys(): params[k] = data[k] post = Post(**params) post.create() return post.json(), 201 class PostDetail(Resource): def get(self, post_id): post = Post.query.options(joinedload( 'user')).filter_by(id=post_id).first() return {**post.json(), 'user': post.user.json()} def delete(self, post_id): post = Post.find_by_id(post_id) if not post: return {"msg": "Not found"} db.session.delete(post) db.session.commit() return {"msg": "Post Deleted", "payload": post_id} class PostActions(Resource): def put(self, post_id): post = Post.find_by_id(post_id) if not post: return {"msg": "Not found"} post.claps += 1 db.session.commit() return post.json() class PostImage(Resource): def post(self): file = request.files['file'] bucket.Object(file.filename).put(Body=file) return "uploaded"
nilq/baby-python
python
from .base import BaseAttack from .fgsm import FGSMAttack
nilq/baby-python
python
""" rg_utils load helpers methods from python """ import pandas as pd import re import robustnessgym as rg from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score def update_pred(dp, model, dp_only=False): """ Updating data panel with model prediction""" model.predict_batch(dp, ["sentence"]) dp = dp.update( lambda x: model.predict_batch(x, ["sentence"]), batch_size=4, is_batched_fn=True, pbar=True, ) if dp_only: return dp labels = pd.Series(["Negative Sentiment", "Positive Sentiment"]) probs = pd.Series(dp.__dict__["_data"]["probs"][0]) pred = pd.concat([labels, probs], axis=1) pred.columns = ["Label", "Probability"] return (dp, pred) def remove_slice(bench, slice_name="user_data"): """ Remove a slice from the rg dev bench""" # slices and identifiers are in the same order slice_list = [] slice_identifier = [] for i in bench.__dict__["_slices"]: # look-up the term name = str(i.__dict__["_identifier"]) if not re.search("new_words", name): slice_list = slice_list + [i] slice_identifier = slice_identifier + [name] # metrics put datain a different order metrics = {} for key in bench.metrics["model"].keys(): if not re.search("new_words", key): metrics[key] = bench.metrics["model"][key] # slice table, repeat for sanity check # slice_table = {} # for key in bench.__dict__["_slice_table"].keys(): # key = str(key) # if not re.search("new_words",key): # slice_table[key] = bench.__dict__["_slice_table"][key] bench.__dict__["_slices"] = set(slice_list) bench.__dict__["_slice_identifiers"] = set(slice_identifier) # bench.__dict__["_slice_table"] = set(slice_identifier) bench.metrics["model"] = metrics return bench def add_slice(bench, table, model, slice_name="user_data"): """ Adds a custom slice to RG """ # do it this way or it complains dp = rg.DataPanel( { "sentence": table["sentence"].tolist(), "label": table["label"].tolist(), "pred": table["pred"].tolist(), } ) # dp._identifier = slice_name # get prediction # add to bench # bench.add_slices([dp]) return dp def new_bench(): """ Create new rg dev bench""" bench = rg.DevBench() bench.add_aggregators( { # Every model can be associated with custom metric calculation functions #'distilbert-base-uncased-finetuned-sst-2-english': { "model": { # This function uses the predictions we stored earlier to calculate accuracy #'accuracy': lambda dp: (dp['label'].round() == dp['pred'].numpy()).mean() #'f1' : lambda dp: f1_score(dp['label'].round(),dp['pred'],average='macro',zero_division=1), "recall": lambda dp: recall_score( dp["label"].round(), dp["pred"], average="macro", zero_division=1 ), "precision": lambda dp: precision_score( dp["label"].round(), dp["pred"], average="macro", zero_division=1 ), "accuracy": lambda dp: accuracy_score(dp["label"].round(), dp["pred"]), } } ) return bench def get_sliceid(slices): """ Because RG stores data in a silly way""" ids = [] for slice in list(slices): ids = ids + [slice._identifier] return ids def get_sliceidx(slice_ids,name): """ get the index from an rg slice""" if name == "xyz_train": idx = [i for i, elem in enumerate(slice_ids) if ("split=train" in str(elem)) ] #and len(str(elem).split("->")) == 1)] elif name == "xyz_test": idx = [i for i, elem in enumerate(slice_ids) if ("split=test" in str(elem)) ] #and len(str(elem).split("->")) == 1)] else: idx = [i for i, elem in enumerate(slice_ids) if name in str(elem)] return idx[0] def get_prob(x,i): """ Helper to get probability""" return(float(x[i])) def slice_to_df(data): """ Convert slice to dataframe""" df = pd.DataFrame( { "sentence": list(data["sentence"]), "model label": ["Positive Sentiment" if int(round(x)) == 1 else "Negative Sentiment" for x in data["label"]], "model binary": [int(round(x)) for x in data["label"]], } ) prob = [] for i in range(0, len(data['probs'])): prob.append(get_prob(data['probs'][i],df["model binary"][i])) df["probability"] = prob return df def metrics_to_dict(metrics, slice_name): """ Convert metrics to dataframe""" all_metrics = {slice_name: {}} all_metrics[slice_name]["metrics"] = metrics[slice_name] all_metrics[slice_name]["source"] = "Custom Slice" return all_metrics
nilq/baby-python
python
# Generated by Django 3.0.7 on 2020-07-29 17:16 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('permafrost', '0012_auto_20200729_1710'), ('permafrost', '0015_auto_20200606_0042'), ] operations = [ ]
nilq/baby-python
python
from flask import Flask app = Flask(__name__) def wrap_html(message): html = """ <html> <body> <div style='font-size:80px;'> <center> <image height="600" width="531" src="https://secure.meetupstatic.com/photos/event/2/a/a/3/600_452110915.jpeg"> <br> {0}<br> </center> </div> </body> </html>""".format(message) return html @app.route('/') def hello_world(): message = 'Hello Python, The application is working --Rizwan!' html = wrap_html(message) return html if __name__ == '__main__': app.run(host='0.0.0.0', port=5000)
nilq/baby-python
python
""" Every issue is reported as ``robocop.rules.Message`` object. It can be later printed or used by post-run reports. Output message format --------------------- Output message of rules can be defined with ``-f`` / ``--format`` argument. Default value:: "{source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})" Available formats: * ``source``: path to the file where the issue occurred * ``source_rel``: path to the file where the issue occurred, relative to execution directory * ``line``: line number where the issue starts * ``end_line``: line number where the issue ends * ``col``: column number where the issue starts * ``end_col``: column number where the issue ends * ``severity``: severity of the issue, value of ``robocop.rules.RuleSeverity`` enum * ``rule_id``: rule id (e.g. 0501) * ``name``: rule name (e.g. ``line-too-long`) * ``desc``: description of the rule """ from enum import Enum from functools import total_ordering import robocop.exceptions @total_ordering class RuleSeverity(Enum): """ Rule severity. It can be configured with ``--configure id_or_msg_name:severity:value`` where value can be first letter of severity value or whole name, case-insensitive. For example :: -c line-too-long:severity:e will change `line-too-long` rule severity to error. You can filter out all rules below given severity value by using following option:: -t/--threshold <severity value> Example:: --threshold E will only report rules with severity E and above. """ INFO = "I" WARNING = "W" ERROR = "E" def __lt__(self, other): look_up = [sev.value for sev in RuleSeverity] return look_up.index(self.value) < look_up.index(other.value) class Rule: def __init__(self, rule_id, body): self.rule_id = rule_id self.name = "" self.desc = "" self.source = None self.enabled = True self.severity = RuleSeverity.INFO self.configurable = [] self.parse_body(body) def __str__(self): return ( f"Rule - {self.rule_id} [{self.severity.value}]: {self.name}: {self.desc} " f'({"enabled" if self.enabled else "disabled"})' ) def change_severity(self, value): severity = { "error": "E", "e": "E", "warning": "W", "w": "W", "info": "I", "i": "I", }.get(str(value).lower(), None) if severity is None: raise robocop.exceptions.InvalidRuleSeverityError(self.name, value) self.severity = RuleSeverity(severity) def get_configurable(self, param): for configurable in self.configurable: if configurable[0] == param: return configurable return None @staticmethod def get_configurable_desc(conf, default=None): desc = f"{conf[0]} = {default}\n" f" type: {conf[2].__name__}" if len(conf) == 4: desc += "\n" f" info: {conf[3]}" return desc @staticmethod def get_default_value(param, checker): return None if checker is None else checker.__dict__.get(param, None) def available_configurables(self, include_severity=True, checker=None): configurables = ["severity"] if include_severity else [] for conf in self.configurable: default = self.get_default_value(conf[1], checker) configurables.append(self.get_configurable_desc(conf, default)) if not configurables: return "" return "\n ".join(configurables) def parse_body(self, body): if isinstance(body, tuple) and len(body) >= 3: self.name, self.desc, self.severity, *self.configurable = body else: raise robocop.exceptions.InvalidRuleBodyError(self.rule_id, body) for configurable in self.configurable: if not isinstance(configurable, tuple) or len(configurable) not in (3, 4): raise robocop.exceptions.InvalidRuleConfigurableError(self.rule_id, body) def prepare_message(self, *args, source, node, lineno, col, end_lineno, end_col, ext_disablers): return Message( *args, rule=self, source=source, node=node, lineno=lineno, col=col, end_col=end_col, end_lineno=end_lineno, ext_disablers=ext_disablers, ) def matches_pattern(self, pattern): """check if this rule matches given pattern""" if isinstance(pattern, str): return pattern in (self.name, self.rule_id) return pattern.match(self.name) or pattern.match(self.rule_id) class Message: def __init__( self, *args, rule, source, node, lineno, col, end_lineno, end_col, ext_disablers=None, ): self.enabled = rule.enabled self.rule_id = rule.rule_id self.name = rule.name self.severity = rule.severity self.desc = rule.desc try: self.desc %= args except TypeError as err: raise robocop.exceptions.InvalidRuleUsageError(rule.rule_id, err) self.source = source self.line = 1 if node is not None and node.lineno > -1: self.line = node.lineno if lineno is not None: self.line = lineno self.col = 1 if col is None else col self.end_line = self.line if end_lineno is None else end_lineno self.end_col = self.col if end_col is None else end_col self.ext_disablers = ext_disablers if ext_disablers else [] def __lt__(self, other): return (self.line, self.col, self.rule_id) < ( other.line, other.col, other.rule_id, ) def get_fullname(self): return f"{self.severity.value}{self.rule_id} ({self.name})" def to_json(self): return { "source": self.source, "line": self.line, "column": self.col, "severity": self.severity.value, "rule_id": self.rule_id, "description": self.desc, "rule_name": self.name, }
nilq/baby-python
python
#! /usr/bin/env python # -*- coding: utf-8 -*- # # vim: fenc=utf-8 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # """ File name: favorites.py Version: 0.1 Author: dhilipsiva <dhilipsiva@gmail.com> Date created: 2015-07-26 """ __author__ = "dhilipsiva" __status__ = "development" """ """ fmt = """ "%i" { "name" "%i" "gamedir" "cstrike" "Players" "3" "maxplayers" "32" "map" "de_dust2" "address" "%s" "lastplayed" "0" "secure" "1" "type" "4" }\n""" f = open("ips.txt", "r") w = open("w.txt", "w") i = 5 for line in f: w.write(fmt % (i, i, line.replace("\n", ""))) i += 1
nilq/baby-python
python
__author__ = 'Pauli Salmenrinne' from setuptools import setup requires = [ ] setup( name='sarch2', version="1.1.0", description='Simple archiving solution', scripts=['bin/sarch2'], packages=['sarch2'], long_description=open('README.rst').read(), url='https://github.com/susundberg/python-sarch2', author='Pauli Salmenrinne', author_email='susundberg@gmail.com', license='MIT', install_requires=requires, test_suite="test", classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Programming Language :: Python :: 3.6', 'Topic :: System :: Archiving', 'Topic :: System :: Filesystems' ], zip_safe=True )
nilq/baby-python
python
#!/usr/bin/python # # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Recover duts. This module runs at system startup on Chromium OS test images. It runs through a set of hooks to keep a DUT from being bricked without manual intervention. Example hook: Check to see if ethernet is connected. If its not, unload and reload the ethernet driver. """ import logging import os import subprocess import time from logging import handlers LOGGING_SUBDIR = '/var/log/recover_duts' LOG_FILENAME = 'recover_duts.log' LOGGING_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' LONG_REBOOT_DELAY = 300 SLEEP_DELAY = 600 LOG_FILE_BACKUP_COUNT = 10 LOG_FILE_SIZE = 1024 * 5000 # 5000 KB def _setup_logging(log_file): """Setup logging. Args: log_file: path to log file. """ log_formatter = logging.Formatter(LOGGING_FORMAT) handler = handlers.RotatingFileHandler( filename=log_file, maxBytes=LOG_FILE_SIZE, backupCount=LOG_FILE_BACKUP_COUNT) handler.setFormatter(log_formatter) logger = logging.getLogger() log_level = logging.DEBUG logger.setLevel(log_level) logger.addHandler(handler) def main(): if not os.path.isdir(LOGGING_SUBDIR): os.makedirs(LOGGING_SUBDIR) log_file = os.path.join(LOGGING_SUBDIR, LOG_FILENAME) _setup_logging(log_file) hooks_dir = os.path.join(os.path.dirname(__file__), 'hooks') # Additional sleep as networking not be up in the case of a long reboot. time.sleep(LONG_REBOOT_DELAY) try: while True: for script in os.listdir(hooks_dir): script = os.path.join(hooks_dir, script) if os.path.isfile(script) and script.endswith('.hook'): logging.debug('Running hook: %s', script) popen = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = popen.communicate()[0] if popen.returncode == 0: logging.debug('Running of %s succeeded with output:\n%s', script, output) else: logging.warn('Running of %s failed with output:\n%s', script, output) time.sleep(SLEEP_DELAY) except Exception as e: # Since this is run from an upstart job we want to ensure we log this into # our log file before dying. logging.fatal(str(e)) raise if __name__ == '__main__': main()
nilq/baby-python
python
#-*-coding: utf8-*- import redis def connection(ip, port): r = redis.StrictRedis(host=ip, port=port, db=0) return r def add(r, query, suggestions): ''' :param query: string :param suggestions: {sug1:score1,sugg2:score2...} use SortedSet to store suggestions ''' r.zadd('suggestions', suggestions) def search(r, query): return r.zrange('suggestions', start=0, end=10) if __name__ == '__main__': pass
nilq/baby-python
python
import sys import click from tabulate import tabulate from . import admin from ...session import Session from ..pretty import print_error @admin.command() @click.option('--status', default='RUNNING', type=click.Choice(['PREPARING', 'BUILDING', 'RUNNING', 'RESTARTING', 'RESIZING', 'SUSPENDED', 'TERMINATING', 'TERMINATED', 'ERROR', 'ALL']), help='Filter by the given status') @click.option('--access-key', type=str, default=None, help='Get sessions for a specific access key ' '(only works if you are a super-admin)') @click.option('--id-only', is_flag=True, help='Display session ids only.') def sessions(status, access_key, id_only): ''' List and manage compute sessions. ''' fields = [ ('Session ID', 'sess_id'), ] if not id_only: fields.extend([ ('Lang/runtime', 'lang'), ('Tag', 'tag'), ('Created At', 'created_at',), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('CPU Cores', 'cpu_slot'), ('CPU Used (ms)', 'cpu_used'), ('Total Memory (MiB)', 'mem_slot'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('GPU Cores', 'gpu_slot'), ]) if access_key is None: q = 'query($status:String) {' \ ' compute_sessions(status:$status) { $fields }' \ '}' else: q = 'query($ak:String, $status:String) {' \ ' compute_sessions(access_key:$ak, status:$status) { $fields }' \ '}' q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = { 'status': status if status != 'ALL' else None, 'ak': access_key, } with Session() as session: try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) if len(resp['compute_sessions']) == 0: print('There are no compute sessions currently running.') return for item in resp['compute_sessions']: if 'mem_cur_bytes' in item: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) if 'mem_max_bytes' in item: item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) if id_only: for item in resp['compute_sessions']: print(item['sess_id']) else: print(tabulate((item.values() for item in resp['compute_sessions']), headers=(item[0] for item in fields))) @admin.command() @click.argument('sess_id_or_alias', metavar='SESSID') def session(sess_id_or_alias): ''' Show detailed information for a running compute session. SESSID: Session id or its alias. ''' fields = [ ('Session ID', 'sess_id'), ('Role', 'role'), ('Lang/runtime', 'lang'), ('Tag', 'tag'), ('Created At', 'created_at',), ('Terminated At', 'terminated_at'), ('Agent', 'agent'), ('Status', 'status',), ('Status Info', 'status_info',), ('CPU Cores', 'cpu_slot'), ('CPU Used (ms)', 'cpu_used'), ('Total Memory (MiB)', 'mem_slot'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('GPU Cores', 'gpu_slot'), ('Number of Queries', 'num_queries'), ('Network RX Bytes', 'net_rx_bytes'), ('Network TX Bytes', 'net_tx_bytes'), ('IO Read Bytes', 'io_read_bytes'), ('IO Write Bytes', 'io_write_bytes'), ('IO Max Scratch Size', 'io_max_scratch_size'), ('IO Current Scratch Size', 'io_cur_scratch_size'), ] q = 'query($sess_id:String) {' \ ' compute_session(sess_id:$sess_id) { $fields }' \ '}' q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = {'sess_id': sess_id_or_alias} with Session() as session: try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) if resp['compute_session']['sess_id'] is None: print('There is no such running compute session.') return print('Session detail:\n---------------') for i, value in enumerate(resp['compute_session'].values()): if fields[i][1] in ['mem_cur_bytes', 'mem_max_bytes']: value = round(value / 2 ** 20, 1) print(fields[i][0] + ': ' + str(value))
nilq/baby-python
python
import io import time from typing import Optional, Tuple from rich.console import Console from rich.live import Live from rich.panel import Panel from rich.progress import ( BarColumn, DownloadColumn, FileSizeColumn, MofNCompleteColumn, Progress, ProgressColumn, SpinnerColumn, Task, TaskID, TimeElapsedColumn, TimeRemainingColumn, ) from rich.table import Table from rich.text import Text class ImageDownloadUploadColumn(DownloadColumn): def render(self, task: Task) -> Text: if task.total is None or int(task.total) == 1: return Text("") else: return super().render(task) class TaskStatusColumn(ProgressColumn): def __init__(self): super().__init__() self.dots = 0 self.max_dots = 4 self.update_interval = 1.0 self.last_updated = time.time() def render(self, task: Task) -> Text: total = max(0, task.total or 0) completed = max(0, task.completed) if completed < total: now = time.time() if now - self.last_updated > self.update_interval: self.last_updated = now self.dots += 1 if self.dots > self.max_dots: self.dots = 0 return Text("waiting" + ("." * self.dots) + (" " * (self.max_dots - self.dots))) else: return Text("\N{check mark} finalized") class BufferedReaderWithProgress(io.BufferedReader): def __init__(self, buffered_reader: io.BufferedReader, progress: Progress, task_id: TaskID): super().__init__(buffered_reader.raw) self.buffered_reader = buffered_reader self.progress = progress self.task_id = task_id self.total_read = 0 def peek(self, size: int = 0) -> bytes: return self.buffered_reader.peek(size) def read(self, size: Optional[int] = None) -> bytes: out = self.buffered_reader.read(size) self.progress.advance(self.task_id, len(out)) self.total_read += len(out) return out def read1(self, size: int = -1) -> bytes: out = self.buffered_reader.read1(size) self.progress.advance(self.task_id, len(out)) self.total_read += len(out) return out def get_experiments_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", BarColumn(), MofNCompleteColumn(), "[progress.percentage]{task.percentage:>3.0f}%", disable=quiet, ) def get_jobs_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", TaskStatusColumn(), TimeElapsedColumn(), disable=quiet, ) def get_logs_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", SpinnerColumn(), FileSizeColumn(), TimeElapsedColumn(), disable=quiet, ) def get_group_experiments_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", SpinnerColumn(), FileSizeColumn(), TimeElapsedColumn(), disable=quiet, ) def get_exps_and_jobs_progress(quiet: bool = False) -> Tuple[Live, Progress, Progress]: experiments_progress = get_experiments_progress(quiet) jobs_progress = get_jobs_progress(quiet) progress_table = Table.grid() progress_table.add_row( Panel.fit(experiments_progress, title="Overall progress", padding=(1, 2)), Panel.fit(jobs_progress, title="Task progress", padding=(1, 2)), ) return ( Live(progress_table, console=None if not quiet else Console(quiet=True)), experiments_progress, jobs_progress, ) def get_dataset_sync_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", TimeElapsedColumn(), TimeRemainingColumn(), DownloadColumn(), disable=quiet, ) def get_sized_dataset_fetch_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", TimeElapsedColumn(), TimeRemainingColumn(), DownloadColumn(), disable=quiet, ) def get_unsized_dataset_fetch_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", SpinnerColumn(), TimeElapsedColumn(), FileSizeColumn(), disable=quiet, ) def get_image_upload_progress(quiet: bool = False) -> Progress: return Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", TimeRemainingColumn(), ImageDownloadUploadColumn(), disable=quiet, ) def get_image_download_progress(quiet: bool = False) -> Progress: return get_image_upload_progress(quiet)
nilq/baby-python
python
from django.apps import AppConfig class CapstoneConfig(AppConfig): name = 'capstone'
nilq/baby-python
python
""" Iterative deepening Depth-first Search specialization of a generic search algorithm. """ from typing import Optional from search.algorithms.search import Node, SearchAlgorithm from search.space import Space from search.algorithms.dfs import DFS import time from math import sqrt, pi class IDDFS(DFS): """Iterative deepening Depth-first Search.""" def __init__(self, problem): super().__init__(problem) self.max_expansions = 2 ** 64 def __str__(self) -> str: """The string representation of this Node.""" return "{}[]".format( self.__class__.__name__, ) @classmethod def name(cls) -> str: """Returns the name of the Algorithm.""" return "Iterative deepening Depth-first Search" # pylint: no-self-argument def create_starting_node(self, state: Space.State) -> Node: """Create an Starting Node.""" self.nodes_created += 1 return Node(state, action=None, parent=None) def reach(self, state: Space.State, action: Space.Action, parent: Node): """Reaches a state and updates Open.""" if state in self.open: # If the state was already in Open, then we discard this new path # as we don't have a way of telling which one is better. return # depth = 0 # node = parent # while node != None: # depth += 1 # node = node.parent # if depth > self.max_depth: # print("ignored a reach") # return self.nodes_created += 1 self.open.insert(Node(state, action, parent)) def _actually_search(self, depth) -> Optional[Node]: """Finds a single goal Node.""" node = self.open.pop() cost = 0 parent = node while parent != None: cost += 1 parent = parent.parent if cost > depth: for i in self.closed: if i == node.state: self.closed.remove(i) return None if self.problem.is_goal(node.state): return node self.expansions += 1 if self.expansions >= self.expansion_limit: print(str(self), ": giving up...") return None # Expand the node and consider all its neighboring states. self.closed.add(node.state) for action, state in self.problem.space.neighbors(node.state): self.states_generated += 1 if state in self.closed: # Déjà vu, we reached an expanded state. continue # Not falling for this (again?). # print(self.states_reached, self.max_states) self.states_reached += 1 self.reach(state, action, parent=node) result = self._actually_search(depth) if result != None: return result return None def search(self) -> Optional[Node]: """Finds a single goal Node.""" self.time_ns = time.perf_counter_ns() solution = None depth = 0 while self.expansions < self.expansion_limit and solution == None: self.open = self.create_open() self.closed = set() for start in self.problem.starting_states: self.open.insert(self.create_starting_node(start)) solution = self._actually_search(depth) depth += 5 self.time_ns = time.perf_counter_ns() - self.time_ns return solution
nilq/baby-python
python
from __clrclasses__.System import Comparison as _n_0_t_0 from __clrclasses__.System import ValueType as _n_0_t_1 from __clrclasses__.System import Predicate as _n_0_t_2 from __clrclasses__.System import Array as _n_0_t_3 from __clrclasses__.System import IDisposable as _n_0_t_4 from __clrclasses__.System import SystemException as _n_0_t_5 from __clrclasses__.System import Exception as _n_0_t_6 from __clrclasses__.System import Converter as _n_0_t_7 from __clrclasses__.System import Action as _n_0_t_8 from __clrclasses__.System import Func as _n_0_t_9 from __clrclasses__.System.Collections import IComparer as _n_1_t_0 from __clrclasses__.System.Collections import IDictionary as _n_1_t_1 from __clrclasses__.System.Collections import IDictionaryEnumerator as _n_1_t_2 from __clrclasses__.System.Collections import ICollection as _n_1_t_3 from __clrclasses__.System.Collections import IEqualityComparer as _n_1_t_4 from __clrclasses__.System.Collections import IEnumerable as _n_1_t_5 from __clrclasses__.System.Collections import IEnumerator as _n_1_t_6 from __clrclasses__.System.Collections import IList as _n_1_t_7 from __clrclasses__.System.Collections.ObjectModel import ReadOnlyCollection as _n_2_t_0 from __clrclasses__.System.Linq import ParallelQuery as _n_3_t_0 from __clrclasses__.System.Linq import IQueryable as _n_3_t_1 from __clrclasses__.System.Linq import IGrouping as _n_3_t_2 from __clrclasses__.System.Linq import IOrderedEnumerable as _n_3_t_3 from __clrclasses__.System.Linq import ILookup as _n_3_t_4 from __clrclasses__.System.Runtime.InteropServices import _Exception as _n_4_t_0 from __clrclasses__.System.Runtime.Serialization import ISerializable as _n_5_t_0 from __clrclasses__.System.Runtime.Serialization import IDeserializationCallback as _n_5_t_1 import typing T = typing.TypeVar('T') TKey = typing.TypeVar('TKey') TValue = typing.TypeVar('TValue') class Comparer(_n_1_t_0, IComparer[T], typing.Generic[T]): @property def Default(self) -> Comparer[T]:"""Default { get; } -> Comparer""" @staticmethod def Create(comparison: _n_0_t_0[T]) -> Comparer[T]:... class Dictionary(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], _n_5_t_0, _n_5_t_1, typing.Generic[TKey, TValue], typing.Iterable[TValue]): @property def Comparer(self) -> IEqualityComparer[TKey]:"""Comparer { get; } -> IEqualityComparer""" def __init__(self, dictionary: IDictionary[TKey, TValue]) -> Dictionary:... def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IEqualityComparer[TKey]) -> Dictionary:... def __init__(self, capacity: int, comparer: IEqualityComparer[TKey]) -> Dictionary:... def __init__(self, comparer: IEqualityComparer[TKey]) -> Dictionary:... def __init__(self, capacity: int) -> Dictionary:... def __init__(self) -> Dictionary:... def ContainsValue(self, value: TValue) -> bool:... class Enumerator(_n_0_t_1, IEnumerator[KeyValuePair[TKey, TValue]], _n_1_t_2, typing.Generic[TKey, TValue]): pass class KeyCollection(ICollection[TKey], _n_1_t_3, IReadOnlyCollection[TKey], typing.Generic[TKey, TValue]): def __init__(self, dictionary: Dictionary[TKey, TValue]) -> Dictionary.KeyCollection:... class Enumerator(_n_0_t_1, IEnumerator[TKey], typing.Generic[TKey, TValue]): pass class ValueCollection(ICollection[TValue], _n_1_t_3, IReadOnlyCollection[TValue], typing.Generic[TKey, TValue]): def __init__(self, dictionary: Dictionary[TKey, TValue]) -> Dictionary.ValueCollection:... class Enumerator(_n_0_t_1, IEnumerator[TValue], typing.Generic[TKey, TValue]): pass class EqualityComparer(_n_1_t_4, IEqualityComparer[T], typing.Generic[T]): @property def Default(self) -> EqualityComparer[T]:"""Default { get; } -> EqualityComparer""" class HashSet(ICollection[T], _n_5_t_0, _n_5_t_1, ISet[T], IReadOnlyCollection[T], typing.Generic[T]): @property def Comparer(self) -> IEqualityComparer[T]:"""Comparer { get; } -> IEqualityComparer""" def __init__(self) -> HashSet:... def __init__(self, comparer: IEqualityComparer[T]) -> HashSet:... def __init__(self, collection: IEnumerable[T]) -> HashSet:... def __init__(self, collection: IEnumerable[T], comparer: IEqualityComparer[T]) -> HashSet:... def __init__(self, capacity: int) -> HashSet:... def __init__(self, capacity: int, comparer: IEqualityComparer[T]) -> HashSet:... @staticmethod def CreateSetComparer() -> IEqualityComparer[HashSet[T]]:... def RemoveWhere(self, match: _n_0_t_2[T]) -> int:... def TrimExcess(self):... def TryGetValue(self, equalValue: T, actualValue: object) -> bool:... class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]): pass class ICollection(IEnumerable[T], typing.Generic[T]): @property def Count(self) -> int:"""Count { get; } -> int""" @property def IsReadOnly(self) -> bool:"""IsReadOnly { get; } -> bool""" def Add(self, item: T):... def Clear(self):... def Contains(self, item: T) -> bool:... def CopyTo(self, array: _n_0_t_3[T], arrayIndex: int):... def Remove(self, item: T) -> bool:... class IComparer(typing.Generic[T]): def Compare(self, x: T, y: T) -> int:... class IDictionary(ICollection[KeyValuePair[TKey, TValue]], typing.Generic[TKey, TValue], typing.Iterable[TValue]): @property def Item(self) -> TValue:"""Item { get; set; } -> TValue""" @property def Keys(self) -> ICollection[TKey]:"""Keys { get; } -> ICollection""" @property def Values(self) -> ICollection[TValue]:"""Values { get; } -> ICollection""" def ContainsKey(self, key: TKey) -> bool:... def TryGetValue(self, key: TKey, value: object) -> bool:... class IEnumerable(_n_1_t_5, typing.Generic[T]): def Aggregate(self, func: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Aggregate(self, seed: typing.Any, func: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Aggregate(self, seed: typing.Any, func: _n_0_t_9[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def All(self, predicate: _n_0_t_9[typing.Any, bool]) -> bool: """Extension from: System.Linq.Enumerable""" def Any(self) -> bool: """Extension from: System.Linq.Enumerable""" def Any(self, predicate: _n_0_t_9[typing.Any, bool]) -> bool: """Extension from: System.Linq.Enumerable""" def Append(self, element: typing.Any) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def AsEnumerable(self) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def AsParallel(self) -> _n_3_t_0[typing.Any]: """Extension from: System.Linq.ParallelEnumerable""" def AsQueryable(self) -> _n_3_t_1[typing.Any]: """Extension from: System.Linq.Queryable""" def Average(self) -> float: """Extension from: System.Linq.Enumerable""" def Average(self, selector: _n_0_t_9[typing.Any, int]) -> float: """Extension from: System.Linq.Enumerable""" def Concat(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Contains(self, value: typing.Any) -> bool: """Extension from: System.Linq.Enumerable""" def Contains(self, value: typing.Any, comparer: IEqualityComparer[typing.Any]) -> bool: """Extension from: System.Linq.Enumerable""" def Count(self) -> int: """Extension from: System.Linq.Enumerable""" def Count(self, predicate: _n_0_t_9[typing.Any, bool]) -> int: """Extension from: System.Linq.Enumerable""" def DefaultIfEmpty(self) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def DefaultIfEmpty(self, defaultValue: typing.Any) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Distinct(self) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Distinct(self, comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def ElementAt(self, index: int) -> typing.Any: """Extension from: System.Linq.Enumerable""" def ElementAtOrDefault(self, index: int) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Except(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Except(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def First(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def First(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def FirstOrDefault(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def FirstOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def GroupJoin(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def GroupJoin(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Intersect(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Intersect(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Join(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Join(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Last(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Last(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def LastOrDefault(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def LastOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def LongCount(self) -> int: """Extension from: System.Linq.Enumerable""" def LongCount(self, predicate: _n_0_t_9[typing.Any, bool]) -> int: """Extension from: System.Linq.Enumerable""" def Max(self) -> int: """Extension from: System.Linq.Enumerable""" def Max(self, selector: _n_0_t_9[typing.Any, int]) -> int: """Extension from: System.Linq.Enumerable""" def Min(self) -> int: """Extension from: System.Linq.Enumerable""" def Min(self, selector: _n_0_t_9[typing.Any, int]) -> int: """Extension from: System.Linq.Enumerable""" def OrderBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any]: """Extension from: System.Linq.Enumerable""" def OrderBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IComparer[typing.Any]) -> _n_3_t_3[typing.Any]: """Extension from: System.Linq.Enumerable""" def OrderByDescending(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any]: """Extension from: System.Linq.Enumerable""" def OrderByDescending(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IComparer[typing.Any]) -> _n_3_t_3[typing.Any]: """Extension from: System.Linq.Enumerable""" def Prepend(self, element: typing.Any) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Reverse(self) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Select(self, selector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def SelectMany(self, selector: _n_0_t_9[typing.Any, IEnumerable[typing.Any]]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def SelectMany(self, collectionSelector: _n_0_t_9[typing.Any, int, IEnumerable[typing.Any]], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def SequenceEqual(self, second: IEnumerable[typing.Any]) -> bool: """Extension from: System.Linq.Enumerable""" def SequenceEqual(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> bool: """Extension from: System.Linq.Enumerable""" def Single(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Single(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def SingleOrDefault(self) -> typing.Any: """Extension from: System.Linq.Enumerable""" def SingleOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any: """Extension from: System.Linq.Enumerable""" def Skip(self, count: int) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def SkipWhile(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Sum(self) -> int: """Extension from: System.Linq.Enumerable""" def Sum(self, selector: _n_0_t_9[typing.Any, int]) -> int: """Extension from: System.Linq.Enumerable""" def Take(self, count: int) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def TakeWhile(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def ToArray(self) -> _n_0_t_3[typing.Any]: """Extension from: System.Linq.Enumerable""" def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> Dictionary[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> Dictionary[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> Dictionary[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> Dictionary[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToHashSet(self) -> HashSet[typing.Any]: """Extension from: System.Linq.Enumerable""" def ToHashSet(self, comparer: IEqualityComparer[typing.Any]) -> HashSet[typing.Any]: """Extension from: System.Linq.Enumerable""" def ToList(self) -> List[typing.Any]: """Extension from: System.Linq.Enumerable""" def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]: """Extension from: System.Linq.Enumerable""" def Union(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Union(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Where(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" def Zip(self, second: IEnumerable[typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]: """Extension from: System.Linq.Enumerable""" class IEnumerator(_n_0_t_4, _n_1_t_6, typing.Generic[T]): pass class IEqualityComparer(typing.Generic[T]): def Equals(self, x: T, y: T) -> bool:... def GetHashCode(self, obj: T) -> int:... class IList(ICollection[T], typing.Generic[T], typing.Iterable[T]): @property def Item(self) -> T:"""Item { get; set; } -> T""" def IndexOf(self, item: T) -> int:... def Insert(self, index: int, item: T):... def RemoveAt(self, index: int):... class IReadOnlyCollection(IEnumerable[T], typing.Generic[T]): @property def Count(self) -> int:"""Count { get; } -> int""" class IReadOnlyDictionary(IReadOnlyCollection[KeyValuePair[TKey, TValue]], typing.Generic[TKey, TValue], typing.Iterable[TValue]): @property def Item(self) -> TValue:"""Item { get; } -> TValue""" @property def Keys(self) -> IEnumerable[TKey]:"""Keys { get; } -> IEnumerable""" @property def Values(self) -> IEnumerable[TValue]:"""Values { get; } -> IEnumerable""" def ContainsKey(self, key: TKey) -> bool:... def TryGetValue(self, key: TKey, value: object) -> bool:... class IReadOnlyList(IReadOnlyCollection[T], typing.Generic[T], typing.Iterable[T]): @property def Item(self) -> T:"""Item { get; } -> T""" class ISet(ICollection[T], typing.Generic[T]): def ExceptWith(self, other: IEnumerable[T]):... def IntersectWith(self, other: IEnumerable[T]):... def IsProperSubsetOf(self, other: IEnumerable[T]) -> bool:... def IsProperSupersetOf(self, other: IEnumerable[T]) -> bool:... def IsSubsetOf(self, other: IEnumerable[T]) -> bool:... def IsSupersetOf(self, other: IEnumerable[T]) -> bool:... def Overlaps(self, other: IEnumerable[T]) -> bool:... def SetEquals(self, other: IEnumerable[T]) -> bool:... def SymmetricExceptWith(self, other: IEnumerable[T]):... def UnionWith(self, other: IEnumerable[T]):... class KeyNotFoundException(_n_0_t_5, _n_5_t_0, _n_4_t_0): def __init__(self, message: str, innerException: _n_0_t_6) -> KeyNotFoundException:... def __init__(self, message: str) -> KeyNotFoundException:... def __init__(self) -> KeyNotFoundException:... class KeyValuePair(_n_0_t_1, typing.Generic[TKey, TValue]): @property def Key(self) -> TKey:"""Key { get; } -> TKey""" @property def Value(self) -> TValue:"""Value { get; } -> TValue""" def __init__(self, key: TKey, value: TValue) -> KeyValuePair:... class LinkedList(ICollection[T], _n_1_t_3, IReadOnlyCollection[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]): @property def First(self) -> LinkedListNode[T]:"""First { get; } -> LinkedListNode""" @property def Last(self) -> LinkedListNode[T]:"""Last { get; } -> LinkedListNode""" def __init__(self, collection: IEnumerable[T]) -> LinkedList:... def __init__(self) -> LinkedList:... def AddAfter(self, node: LinkedListNode[T], value: T) -> LinkedListNode[T]:... def AddAfter(self, node: LinkedListNode[T], newNode: LinkedListNode[T]):... def AddBefore(self, node: LinkedListNode[T], newNode: LinkedListNode[T]):... def AddBefore(self, node: LinkedListNode[T], value: T) -> LinkedListNode[T]:... def AddFirst(self, node: LinkedListNode[T]):... def AddFirst(self, value: T) -> LinkedListNode[T]:... def AddLast(self, node: LinkedListNode[T]):... def AddLast(self, value: T) -> LinkedListNode[T]:... def Find(self, value: T) -> LinkedListNode[T]:... def FindLast(self, value: T) -> LinkedListNode[T]:... def RemoveFirst(self):... def RemoveLast(self):... class Enumerator(_n_0_t_1, IEnumerator[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]): pass class LinkedListNode(typing.Generic[T]): @property def List(self) -> LinkedList[T]:"""List { get; } -> LinkedList""" @property def Next(self) -> LinkedListNode[T]:"""Next { get; } -> LinkedListNode""" @property def Previous(self) -> LinkedListNode[T]:"""Previous { get; } -> LinkedListNode""" @property def Value(self) -> T:"""Value { get; set; } -> T""" def __init__(self, value: T) -> LinkedListNode:... class List(IList[T], _n_1_t_7, IReadOnlyList[T], typing.Generic[T], typing.Iterable[T]): @property def Capacity(self) -> int:"""Capacity { get; set; } -> int""" def __init__(self, collection: IEnumerable[T]) -> List:... def __init__(self, capacity: int) -> List:... def __init__(self) -> List:... def AddRange(self, collection: IEnumerable[T]):... def AsReadOnly(self) -> _n_2_t_0[T]:... def BinarySearch(self, item: T, comparer: IComparer[T]) -> int:... def BinarySearch(self, item: T) -> int:... def BinarySearch(self, index: int, count: int, item: T, comparer: IComparer[T]) -> int:... def ConvertAll(self, converter: _n_0_t_7[T, typing.Any]) -> List[typing.Any]:... def Exists(self, match: _n_0_t_2[T]) -> bool:... def Find(self, match: _n_0_t_2[T]) -> T:... def FindAll(self, match: _n_0_t_2[T]) -> List[T]:... def FindIndex(self, startIndex: int, count: int, match: _n_0_t_2[T]) -> int:... def FindIndex(self, startIndex: int, match: _n_0_t_2[T]) -> int:... def FindIndex(self, match: _n_0_t_2[T]) -> int:... def FindLast(self, match: _n_0_t_2[T]) -> T:... def FindLastIndex(self, startIndex: int, count: int, match: _n_0_t_2[T]) -> int:... def FindLastIndex(self, startIndex: int, match: _n_0_t_2[T]) -> int:... def FindLastIndex(self, match: _n_0_t_2[T]) -> int:... def ForEach(self, action: _n_0_t_8[T]):... def GetRange(self, index: int, count: int) -> List[T]:... def InsertRange(self, index: int, collection: IEnumerable[T]):... def LastIndexOf(self, item: T, index: int, count: int) -> int:... def LastIndexOf(self, item: T, index: int) -> int:... def LastIndexOf(self, item: T) -> int:... def RemoveAll(self, match: _n_0_t_2[T]) -> int:... def RemoveRange(self, index: int, count: int):... def Reverse(self):... def Reverse(self, index: int, count: int):... def Sort(self, comparison: _n_0_t_0[T]):... def Sort(self, index: int, count: int, comparer: IComparer[T]):... def Sort(self, comparer: IComparer[T]):... def Sort(self):... def ToArray(self) -> _n_0_t_3[T]:... def TrimExcess(self):... def TrueForAll(self, match: _n_0_t_2[T]) -> bool:... class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]): pass class Queue(IEnumerable[T], _n_1_t_3, IReadOnlyCollection[T], typing.Generic[T]): def __init__(self, collection: IEnumerable[T]) -> Queue:... def __init__(self, capacity: int) -> Queue:... def __init__(self) -> Queue:... def Clear(self):... def Contains(self, item: T) -> bool:... def Dequeue(self) -> T:... def Enqueue(self, item: T):... def Peek(self) -> T:... def ToArray(self) -> _n_0_t_3[T]:... def TrimExcess(self):... class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]): pass class SortedDictionary(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], typing.Generic[TKey, TValue], typing.Iterable[TValue]): @property def Comparer(self) -> IComparer[TKey]:"""Comparer { get; } -> IComparer""" def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IComparer[TKey]) -> SortedDictionary:... def __init__(self, dictionary: IDictionary[TKey, TValue]) -> SortedDictionary:... def __init__(self, comparer: IComparer[TKey]) -> SortedDictionary:... def __init__(self) -> SortedDictionary:... def ContainsValue(self, value: TValue) -> bool:... class Enumerator(_n_0_t_1, IEnumerator[KeyValuePair[TKey, TValue]], _n_1_t_2, typing.Generic[TKey, TValue]): pass class KeyCollection(ICollection[TKey], _n_1_t_3, IReadOnlyCollection[TKey], typing.Generic[TKey, TValue]): def __init__(self, dictionary: SortedDictionary[TKey, TValue]) -> SortedDictionary.KeyCollection:... class Enumerator(_n_0_t_1, IEnumerator[TKey], typing.Generic[TKey, TValue]): pass class ValueCollection(ICollection[TValue], _n_1_t_3, IReadOnlyCollection[TValue], typing.Generic[TKey, TValue]): def __init__(self, dictionary: SortedDictionary[TKey, TValue]) -> SortedDictionary.ValueCollection:... class Enumerator(_n_0_t_1, IEnumerator[TValue], typing.Generic[TKey, TValue]): pass class SortedList(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], typing.Generic[TKey, TValue], typing.Iterable[TValue]): @property def Capacity(self) -> int:"""Capacity { get; set; } -> int""" @property def Comparer(self) -> IComparer[TKey]:"""Comparer { get; } -> IComparer""" def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IComparer[TKey]) -> SortedList:... def __init__(self, dictionary: IDictionary[TKey, TValue]) -> SortedList:... def __init__(self, capacity: int, comparer: IComparer[TKey]) -> SortedList:... def __init__(self, comparer: IComparer[TKey]) -> SortedList:... def __init__(self, capacity: int) -> SortedList:... def __init__(self) -> SortedList:... def ContainsValue(self, value: TValue) -> bool:... def IndexOfKey(self, key: TKey) -> int:... def IndexOfValue(self, value: TValue) -> int:... def RemoveAt(self, index: int):... def TrimExcess(self):... class SortedSet(ISet[T], _n_1_t_3, _n_5_t_0, _n_5_t_1, IReadOnlyCollection[T], typing.Generic[T]): @property def Comparer(self) -> IComparer[T]:"""Comparer { get; } -> IComparer""" @property def Max(self) -> T:"""Max { get; } -> T""" @property def Min(self) -> T:"""Min { get; } -> T""" def __init__(self, comparer: IComparer[T]) -> SortedSet:... def __init__(self, collection: IEnumerable[T], comparer: IComparer[T]) -> SortedSet:... def __init__(self, collection: IEnumerable[T]) -> SortedSet:... def __init__(self) -> SortedSet:... @staticmethod def CreateSetComparer(memberEqualityComparer: IEqualityComparer[T]) -> IEqualityComparer[SortedSet[T]]:... @staticmethod def CreateSetComparer() -> IEqualityComparer[SortedSet[T]]:... def GetViewBetween(self, lowerValue: T, upperValue: T) -> SortedSet[T]:... def RemoveWhere(self, match: _n_0_t_2[T]) -> int:... def Reverse(self) -> IEnumerable[T]:... def TryGetValue(self, equalValue: T, actualValue: object) -> bool:... class Enumerator(_n_0_t_1, IEnumerator[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]): pass class Stack(IEnumerable[T], _n_1_t_3, IReadOnlyCollection[T], typing.Generic[T]): def __init__(self, collection: IEnumerable[T]) -> Stack:... def __init__(self, capacity: int) -> Stack:... def __init__(self) -> Stack:... def Clear(self):... def Contains(self, item: T) -> bool:... def Peek(self) -> T:... def Pop(self) -> T:... def Push(self, item: T):... def ToArray(self) -> _n_0_t_3[T]:... def TrimExcess(self):... class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]): pass
nilq/baby-python
python
from pytorch.schedulers.imports import * from system.imports import * @accepts(dict, post_trace=True) @TraceFunction(trace_args=False, trace_rv=False) def load_scheduler(system_dict): learning_rate_scheduler = system_dict["local"]["learning_rate_scheduler"]; optimizer = system_dict["local"]["optimizer"]; if(learning_rate_scheduler == "steplr"): system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.StepLR( optimizer, system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["step_size"], gamma=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"], last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]); elif(learning_rate_scheduler == "multisteplr"): system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.MultiStepLR( optimizer, system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["milestones"], gamma=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"], last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]); elif(learning_rate_scheduler == "exponentiallr"): system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.ExponentialLR( optimizer, system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"], last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]); elif(learning_rate_scheduler == "reduceonplateaulr"): system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["mode"], factor=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["factor"], patience=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["patience"], verbose=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["verbose"], threshold=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["threshold"], threshold_mode=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["threshold_mode"], cooldown=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["cooldown"], min_lr=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["min_lr"], eps=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["epsilon"]); return system_dict;
nilq/baby-python
python
#!/usr/local/bin/python3 # A wrapper to test query v2 API # Advantage: directly replace the `query` variable with any SQL string # to run the test. In command line, the SQL has to be in one-line # to ensure nothing wrong, which is cumbersome. import subprocess import sys import os MY_ENV = os.environ.copy() COMMAND_TEMPLATE = """ aws timestream-query query-v2 --query-string "{}" --endpoint-url "https://gamma-query-cell2.timestream.us-west-2.amazonaws.com" --region us-west-2 """ query = """ SELECT region FROM (VALUES ('abc') ) AS testtb(region) """ def main(argv=sys.argv[1:]): """main program """ try: cmd = COMMAND_TEMPLATE.format(query) print(f'test query: {query}') popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True, env=MY_ENV) for stdout_line in iter(popen.stdout.readline, ""): print(f'got line from subprocess: {stdout_line}') popen.stdout.close() return_code = popen.wait() except(KeyboardInterrupt, EOFError): print() print('[Interrupted.]') return_code = 130 return return_code if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
nilq/baby-python
python
from sys import stdin def main(): for s in sorted(list(map(int, stdin.readline().strip())), reverse=True): print("", end="".join(str(s))) if __name__ == "__main__": main()
nilq/baby-python
python
""" Description: Defines the QAOACustom and CircuitSamplerCustom classes that replace the qiskit QAOA and CircuitSampler classes respectively. It is more easily customised than qiskit's built in ones and includes a variety of helper methods. Author: Gary Mooney Adapted from Qiskit 0.26.2 documentation Example 1: Full usage example. from QAOAEx import (QAOACustom, convert_to_fourier_point, print_qaoa_solutions, get_quadratic_program_from_ising_hamiltonian_terms, output_ising_graph, get_ising_graph_from_ising_hamiltonian_terms, convert_from_fourier_point) backend = Aer.get_backend('aer_simulator_matrix_product_state') quantum_instance = QuantumInstance(backend, shots=8192) optimizer = NELDER_MEAD() couplings = [(0, 1, -1.0), (0, 2, 1.0), (1, 2, 1.0), (2, 3, -1.0), (0, 3, 0.5)] # formatted as List[Tuple[int, int, float]] local_fields = {0: 0.2, 1: -0.3, 2: 0.0, 3: 0.5} # formatted as Mapping[int, float] constant_term = 1.0 # formatted as float quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms(couplings = couplings, local_fields = local_fields, constant_term = constant_term, output_ising_graph_filename = "example-ising_graph") qaoa_instance = QAOACustom(quantum_instance = quantum_instance, reps = 2, force_shots = False, optimizer = optimizer, qaoa_name = "example_qaoa") operator, offset = quadratic_program.to_ising() initial_point = [0.40784, 0.73974, -0.53411, -0.28296] print() print("Solving QAOA...") qaoa_results = qaoa_instance.solve(operator, initial_point) qaoa_results_eigenstate = qaoa_results.eigenstate print("optimal_value:", qaoa_results.optimal_value) print("optimal_parameters:", qaoa_results.optimal_parameters) print("optimal_point:", qaoa_results.optimal_point) print("optimizer_evals:", qaoa_results.optimizer_evals) solutions = qaoa_instance.get_optimal_solutions_from_statevector(qaoa_results_eigenstate, quadratic_program) print_qaoa_QuantumCircuit # initial Fourier space point, will be converted to a typical point using # 'convert_from_fourier_point' as per previous line initial_fourier_point = [0.5, 0.7] # bounds used for the optimiser bounds = [(-1, 1)] * len(initial_fourier_point) qaoa_results = qaoa_instance.solve(operator, initial_fourier_point, bounds) optimal_parameterised_point = qaoa_instance.latest_parameterised_point Example 3: Post process raw data. This is how QREM could be applied. Before the line 'qaoa_results = qaoa_instance.solve(operator, initial_point)' in Example 1, add the following. # Define a method to process the counts dict. In this case it simply calculates and prints the shot counts. def print_shot_count(raw_counts_data): shot_count = None if len(raw_counts_data) > 0: if isinstance(raw_counts_data[0], dict): shot_count = sum(raw_counts_data[0].values()) elif isinstance(raw_counts_data[0], list) and len(raw_counts_data[0]) > 0: shot_count = sum(raw_counts_data[0][0].values()) else: raise Exception("Error: Wrong format 'raw_counts_data', execting List[Dict] or List[List[Dict]]") print("Raw data shot count:", shot_count) return raw_counts_data # set the raw data processing method. If using a statevector simulator # with force_shot = False, (in qaoa_instance) then raw processing will not be used. qaoa_instance.set_post_process_raw_data(print_shot_count) """ import logging import time as time from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union import matplotlib.pyplot as plt import networkx as nx # tool to handle general Graphs import numpy as np import math as math from qiskit import QiskitError from qiskit.algorithms import QAOA from qiskit.algorithms.exceptions import AlgorithmError from qiskit.algorithms.minimum_eigen_solvers.minimum_eigen_solver import ( MinimumEigensolverResult, ) from qiskit.algorithms.minimum_eigen_solvers.vqe import VQE from qiskit.algorithms.optimizers import Optimizer from qiskit.algorithms.variational_algorithm import ( VariationalAlgorithm, VariationalResult, ) from qiskit.circuit import ClassicalRegister, Parameter, QuantumCircuit from qiskit.circuit.library.n_local.qaoa_ansatz import QAOAAnsatz # from qiskit.algorithms.minimum_eigen_solvers from qiskit.opflow import ( CircuitSampler, CircuitStateFn, DictStateFn, ExpectationBase, I, OperatorBase, StateFn, ) from qiskit.opflow.exceptions import OpflowError from qiskit.opflow.gradients import GradientBase from qiskit.providers import Backend, BaseBackend from qiskit.quantum_info import Statevector from qiskit.tools.visualization import circuit_drawer from qiskit.utils import algorithm_globals from qiskit.utils.backend_utils import is_aer_provider from qiskit.utils.quantum_instance import QuantumInstance from qiskit.utils.validation import validate_min from qiskit.visualization import plot_histogram from qiskit_optimization import QuadraticProgram logger = logging.getLogger(__name__) ############### ### Classes ############### class CircuitSamplerCustom(CircuitSampler): # a function pointer that processes returned results from execution when sample_circuits is called. # post_process_raw_data(Result) -> Result _post_process_raw_data: Optional[ Callable[ [Union[List[Dict[str, int]], List[List[Dict[str, int]]]]], Union[List[Dict[str, int]], List[List[Dict[str, int]]]], ] ] = None _shots = None _log_text = print _force_shots = False _sampler_name = "" _output_circuit_when_sample = False def __init__( self, backend: Union[Backend, BaseBackend, QuantumInstance], statevector: Optional[bool] = None, param_qobj: bool = False, attach_results: bool = False, caching: str = 'last', sampler_name: str = "", force_shots: bool = False, output_circuit_when_sample: bool = False, log_text: Optional[Callable[..., Any]] = print, ) -> None: """ Args: backend: The quantum backend or QuantumInstance to use to sample the circuits. statevector: If backend is a statevector backend, whether to replace the CircuitStateFns with DictStateFns (from the counts) or VectorStateFns (from the statevector). ``None`` will set this argument automatically based on the backend. param_qobj: Whether to use Aer's parameterized Qobj capability to avoid re-assembling the circuits. attach_results: Whether to attach the data from the backend ``Results`` object for a given ``CircuitStateFn``` to an ``execution_results`` field added the converted ``DictStateFn`` or ``VectorStateFn``. caching: The caching strategy. Can be `'last'` (default) to store the last operator that was converted, set to `'all'` to cache all processed operators. sampler_name: Name used when outputting text or files to help identify CircuitSamplerCustom instance. force_shots: If quantum instance returns a statevector, then convert into shots instead. output_circuit_when_sample: Whether to output circuit using circuit_drawer whenever circuit is sampled. log_text: Used for text output, replacement to the default print method to make logging easy. If None, no text output can occur. Raises: ValueError: Set statevector or param_qobj True when not supported by backend. """ super().__init__( backend=backend, statevector=statevector, param_qobj=param_qobj, attach_results=attach_results, caching=caching, ) self._sampler_name = sampler_name self._log_text = log_text # determines whether to use the statevectors directly from simulations is available # If true, counts are sampled from statevector (default 8192) self._force_shots = force_shots self._output_circuit_when_sample = output_circuit_when_sample def set_post_process_raw_data( self, post_process_raw_data_method: Optional[ Callable[ [Union[List[Dict[str, int]], List[List[Dict[str, int]]]]], Union[List[Dict[str, int]], List[List[Dict[str, int]]]], ] ], ) -> None: """Uses the specified method to process the raw sampled data executed on the backened whenever circuits are sampled. Args: post_process_raw_data_method: The method to process the data. Inputs a list f counts dicts List[Dict[str, int]] and outputs the processed list of count dicts List[Dict[str, int]]. The data could potentially be formatted as a list of a list of dictionaries List[List[Dict[str, int]]]. However, this will likely not happen withouth modifying QAOA to do so. Each dictionary has the counts for each qubit with the keys containing a string in binary format and separated according to the registers in circuit (e.g. ``0100 1110``). The string is little-endian (cr[0] on the right hand side). However there will likely only be a single register without modifying QAOA, so the state bitstring should have no spaces. """ self._post_process_raw_data = post_process_raw_data_method def sample_circuits( self, circuit_sfns: Optional[List[CircuitStateFn]] = None, param_bindings: Optional[List[Dict[Parameter, float]]] = None, ) -> Dict[int, List[StateFn]]: r""" Samples the CircuitStateFns and returns a dict associating their ``id()`` values to their replacement DictStateFn or VectorStateFn. If param_bindings is provided, the CircuitStateFns are broken into their parameterizations, and a list of StateFns is returned in the dict for each circuit ``id()``. Note that param_bindings is provided here in a different format than in ``convert``, and lists of parameters within the dict is not supported, and only binding dicts which are valid to be passed into Terra can be included in this list. (Overides method) Args: circuit_sfns: The list of CircuitStateFns to sample. param_bindings: The parameterizations to bind to each CircuitStateFn. Returns: The dictionary mapping ids of the CircuitStateFns to their replacement StateFns. Raises: OpflowError: if extracted circuits are empty. """ if not circuit_sfns and not self._transpiled_circ_cache: raise OpflowError('CircuitStateFn is empty and there is no cache.') ############# # NOTE: # Can modify circuits before execution here. # can even manually transpile to specific qubit layout. ############# if circuit_sfns: self._transpiled_circ_templates = None if self._statevector: circuits = [op_c.to_circuit(meas=False) for op_c in circuit_sfns] else: circuits = [op_c.to_circuit(meas=True) for op_c in circuit_sfns] ####### Saving circuit if self._output_circuit_when_sample == True: filename = "quantum-circuit-" + self._sampler_name + "-params" for _, value in param_bindings[0].items(): filename += "-" + str(int(1000 * value)) if self._log_text != None: self._log_text("Saving circuit '" + filename + "'...") fig = circuit_drawer(circuits[0], filename=filename, output='mpl') plt.close(fig) ####### try: self._transpiled_circ_cache = self.quantum_instance.transpile(circuits) except QiskitError: logger.debug( r'CircuitSampler failed to transpile circuits with unbound ' r'parameters. Attempting to transpile only when circuits are bound ' r'now, but this can hurt performance due to repeated transpilation.' ) self._transpile_before_bind = False self._transpiled_circ_cache = circuits else: circuit_sfns = list(self._circuit_ops_cache.values()) if param_bindings is not None: # if fourier method, then convert param_bindings to another param_bindings, usually larger. if self._param_qobj: start_time = time.time() ready_circs = self._prepare_parameterized_run_config(param_bindings) end_time = time.time() logger.debug( 'Parameter conversion %.5f (ms)', (end_time - start_time) * 1000 ) else: start_time = time.time() ready_circs = [ circ.assign_parameters( CircuitSamplerCustom._filter_params(circ, binding) ) for circ in self._transpiled_circ_cache for binding in param_bindings ] end_time = time.time() logger.debug( 'Parameter binding %.5f (ms)', (end_time - start_time) * 1000 ) else: ready_circs = self._transpiled_circ_cache results = self.quantum_instance.execute( ready_circs, had_transpiled=self._transpile_before_bind ) if param_bindings is not None and self._param_qobj: self._clean_parameterized_run_config() # Wipe parameterizations, if any # self.quantum_instance._run_config.parameterizations = None ############# # NOTE: # Can apply QREM here. But we need to know which qubits were used in order to apply... # results.get_counts(circ_index) # will need to convert results in case it's a statevector. ############# counts_dicts = [] for i, op_c in enumerate(circuit_sfns): # Taking square root because we're replacing a statevector # representation of probabilities. reps = len(param_bindings) if param_bindings is not None else 1 c_statefns = [] for j in range(reps): circ_index = (i * reps) + j # counts_dicts[circ_index] = results.get_counts(circ_index) circ_results = results.data(circ_index) # statevector = results.get_statevector(circ_index) if 'expval_measurement' in circ_results.get('snapshots', {}).get( 'expectation_value', {} ): if self.quantum_instance.run_config.shots != None: shots = self.quantum_instance.run_config.shots else: shots = 8192 counts_dicts.append( Statevector(results.get_statevector(circ_index)).sample_counts( shots ) ) # print("DEBUG: From statevector (1): " + str(shots) + " shots") elif self._statevector: if self.quantum_instance.run_config.shots != None: shots = self.quantum_instance.run_config.shots else: shots = 8192 counts_dicts.append( Statevector(results.get_statevector(circ_index)).sample_counts( shots ) ) # print("counts_dicts[circ_index]", counts_dicts[circ_index]) # if self._force_shots == True: # print("DEBUG: From statevector (2): " + str(shots) + " shots") # else: # print("DEBUG: From statevector (2) - using statevector") else: counts_dicts.append(results.get_counts(circ_index)) # print("counts_dicts[circ_index]", counts_dicts[circ_index]) shots = 0 for count in counts_dicts[circ_index].values(): shots += count # print("DEBUG: From counts: " + str(shots) + " shots") # print("counts_dicts:", counts_dicts) ############# ### Post process raw counts ### NOTE: counts_dicts could be formatted as ### List[Dict[str, int]] or List[List[Dict[str, int]]]: a list of dictionaries or a list of ### a list of dictionaries. A dictionary has the counts for each qubit with ### the keys containing a string in binary format and separated ### according to the registers in circuit (e.g. ``0100 1110``). ### The string is little-endian (cr[0] on the right hand side). ### ### However the format will most likely always be List[Dict[str, int]] ### with a single register, so the state bitstring will have no spaces. ############# counts_dicts_new = None if self._post_process_raw_data != None: if ( self._force_shots == False and self._statevector and self._log_text != None ): self._log_text( "WARNING: post_process_raw_data method cannot execute on statevector, set force_shots to True or don't use the stavevector simulator." ) counts_dicts_new = self._post_process_raw_data(counts_dicts) else: counts_dicts_new = counts_dicts ############# sampled_statefn_dicts = {} for i, op_c in enumerate(circuit_sfns): # Taking square root because we're replacing a statevector # representation of probabilities. reps = len(param_bindings) if param_bindings is not None else 1 c_statefns = [] for j in range(reps): circ_index = (i * reps) + j circ_results = results.data(circ_index) if self._force_shots == False: if 'expval_measurement' in circ_results.get('snapshots', {}).get( 'expectation_value', {} ): snapshot_data = results.data(circ_index)['snapshots'] avg = snapshot_data['expectation_value']['expval_measurement'][ 0 ]['value'] if isinstance(avg, (list, tuple)): # Aer versions before 0.4 use a list snapshot format # which must be converted to a complex value. avg = avg[0] + 1j * avg[1] # Will be replaced with just avg when eval is called later num_qubits = circuit_sfns[0].num_qubits result_sfn = ( DictStateFn( '0' * num_qubits, is_measurement=op_c.is_measurement ) * avg ) elif self._statevector: result_sfn = StateFn( op_c.coeff * results.get_statevector(circ_index), is_measurement=op_c.is_measurement, ) else: shots = self.quantum_instance._run_config.shots result_sfn = StateFn( { b: (v / shots) ** 0.5 * op_c.coeff for (b, v) in counts_dicts_new[circ_index].items() }, is_measurement=op_c.is_measurement, ) else: # result_sfn = ConvertCountsToStateFunction(counts_dicts_new[circ_index], shots=None, op_c=op_c) shots = 0 for _, count in counts_dicts_new[circ_index].items(): shots += count result_sfn = StateFn( { b: (v / shots) ** 0.5 * op_c.coeff for (b, v) in counts_dicts_new[circ_index].items() }, is_measurement=op_c.is_measurement, ) # use statefn instead of dictstatefn if self._statevector: result_sfn = result_sfn.to_matrix_op(massive=True) if self._attach_results: result_sfn.execution_results = circ_results c_statefns.append(result_sfn) sampled_statefn_dicts[id(op_c)] = c_statefns return sampled_statefn_dicts class QAOACustom(QAOA): # a function pointer that processes returned results from execution when sample_circuits is called. # post_process_raw_data(Result) -> Result _post_process_raw_data: Optional[ Callable[ [Union[List[Dict[str, int]], List[List[Dict[str, int]]]]], Union[List[Dict[str, int]], List[List[Dict[str, int]]]], ] ] = None _qaoa_name = "" _force_shots = False _log_text = print _output_circuit_when_sample = False _reps = 1 _mixer = None _initial_state = None _optimiser_parameter_bounds = None _parameterise_point_for_energy_evaluation: Callable[ [Union[List[float], np.ndarray], int], List[float] ] = None # After solving/optimising using a custom parameterisation, the member 'latest_parameterised_point' should # contain the solution parameterised point returned by the optimiser. latest_parameterised_point = None def __init__( self, optimizer: Optimizer = None, reps: int = 1, initial_state: Optional[QuantumCircuit] = None, mixer: Union[QuantumCircuit, OperatorBase] = None, initial_point: Union[List[float], np.ndarray, None] = None, gradient: Optional[ Union[GradientBase, Callable[[Union[np.ndarray, List]], List]] ] = None, expectation: Optional[ExpectationBase] = None, include_custom: bool = False, max_evals_grouped: int = 1, callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None, quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None, qaoa_name: str = "", force_shots: bool = False, output_circuit_when_sample: bool = False, log_text: Optional[Callable[..., Any]] = print, ) -> None: """ Args: optimizer: A classical optimizer. reps: the integer parameter :math:`p` as specified in https://arxiv.org/abs/1411.4028, Has a minimum valid value of 1. initial_state: An optional initial state to prepend the QAOA circuit with mixer: the mixer Hamiltonian to evolve with or a custom quantum circuit. Allows support of optimizations in constrained subspaces as per https://arxiv.org/abs/1709.03489 as well as warm-starting the optimization as introduced in http://arxiv.org/abs/2009.10095. initial_point: An optional initial point (i.e. initial parameter values) for the optimizer. If ``None`` then it will simply compute a random one. QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]). gradient: An optional gradient operator respectively a gradient function used for optimization. expectation: The Expectation converter for taking the average value of the Observable over the ansatz state function. When None (the default) an :class:`~qiskit.opflow.expectations.ExpectationFactory` is used to select an appropriate expectation based on the operator and backend. When using Aer qasm_simulator backend, with paulis, it is however much faster to leverage custom Aer function for the computation but, although VQE performs much faster with it, the outcome is ideal, with no shot noise, like using a state vector simulator. If you are just looking for the quickest performance when choosing Aer qasm_simulator and the lack of shot noise is not an issue then set `include_custom` parameter here to True (defaults to False). include_custom: When `expectation` parameter here is None setting this to True will allow the factory to include the custom Aer pauli expectation. max_evals_grouped: Max number of evaluations performed simultaneously. Signals the given optimizer that more than one set of parameters can be supplied so that potentially the expectation values can be computed in parallel. Typically this is possible when a finite difference gradient is used by the optimizer such that multiple points to compute the gradient can be passed and if computed in parallel improve overall execution time. Ignored if a gradient operator or function is given. callback: a callback that can access the intermediate data during the optimization. Four parameter values are passed to the callback as follows during each evaluation by the optimizer for its current set of parameters as it works towards the minimum. These are: the evaluation count, the optimizer parameters for the ansatz, the evaluated mean and the evaluated standard deviation. quantum_instance: Quantum Instance or Backend qaoa_name: Name to identify this QAOAEx instance when logging or outputting files. force_shots: If quantum instance returns a statevector, then convert into shots instead. output_circuit_when_sample: Whether to output circuit using circuit_drawer whenever circuit is sampled. log_text: Used for text output, replacement to the default print method to make logging easy. If None, no text output can occur. """ validate_min('reps', reps, 1) self._qaoa_name = qaoa_name self._reps = reps self._mixer = mixer self._initial_state = initial_state self._force_shots = force_shots self._log_text = log_text self._output_circuit_when_sample = output_circuit_when_sample # VQE will use the operator setter, during its constructor, which is overridden below and # will cause the var form to be built super(QAOA, self).__init__( ansatz=None, optimizer=optimizer, initial_point=initial_point, gradient=gradient, expectation=expectation, include_custom=include_custom, max_evals_grouped=max_evals_grouped, callback=callback, quantum_instance=quantum_instance, ) @VariationalAlgorithm.quantum_instance.setter def quantum_instance( self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend] ) -> None: """set quantum_instance. (Overides method)""" super(VQE, self.__class__).quantum_instance.__set__(self, quantum_instance) self._circuit_sampler = CircuitSamplerCustom( self._quantum_instance, param_qobj=is_aer_provider(self._quantum_instance.backend), sampler_name=self._qaoa_name, force_shots=self._force_shots, output_circuit_when_sample=self._output_circuit_when_sample, log_text=self._log_text, ) self._circuit_sampler.set_post_process_raw_data(self._post_process_raw_data) def find_minimum( self, initial_point: Optional[np.ndarray] = None, ansatz: Optional[QuantumCircuit] = None, cost_fn: Optional[Callable] = None, optimizer: Optional[Optimizer] = None, gradient_fn: Optional[Callable] = None, ) -> 'VariationalResult': """Optimize to find the minimum cost value. Args: initial_point: If not `None` will be used instead of any initial point supplied via constructor. If `None` and `None` was supplied to constructor then a random point will be used if the optimizer requires an initial point. ansatz: If not `None` will be used instead of any ansatz supplied via constructor. cost_fn: If not `None` will be used instead of any cost_fn supplied via constructor. optimizer: If not `None` will be used instead of any optimizer supplied via constructor. gradient_fn: Optional gradient function for optimizer Returns: dict: Optimized variational parameters, and corresponding minimum cost value. Raises: ValueError: invalid input """ initial_point = ( initial_point if initial_point is not None else self.initial_point ) ansatz = ansatz if ansatz is not None else self.ansatz cost_fn = cost_fn if cost_fn is not None else self._cost_fn optimizer = optimizer if optimizer is not None else self.optimizer if ansatz is None: raise ValueError('Ansatz neither supplied to constructor nor find minimum.') if cost_fn is None: raise ValueError( 'Cost function neither supplied to constructor nor find minimum.' ) if optimizer is None: raise ValueError( 'Optimizer neither supplied to constructor nor find minimum.' ) nparms = ansatz.num_parameters if self._optimiser_parameter_bounds == None: if ( hasattr(ansatz, 'parameter_bounds') and ansatz.parameter_bounds is not None ): bounds = ansatz.parameter_bounds else: bounds = [(None, None)] * len(self.initial_point) else: bounds = self._optimiser_parameter_bounds # if initial_point is not None and len(initial_point) != nparms: # raise ValueError( # 'Initial point size {} and parameter size {} mismatch'.format( # len(initial_point), nparms)) if len(bounds) != len(self.initial_point): bounds = [(None, None)] * len(self.initial_point) print( "WARNING: Ansatz bounds size does not match parameter size (len(self.initial_point)), setting bounds to (None, None)" ) # raise ValueError('Ansatz bounds size does not match parameter size (len(self.initial_point))') # If *any* value is *equal* in bounds array to None then the problem does *not* have bounds problem_has_bounds = not np.any(np.equal(bounds, None)) # Check capabilities of the optimizer if problem_has_bounds: if not optimizer.is_bounds_supported: raise ValueError( 'Problem has bounds but optimizer does not support bounds' ) else: if optimizer.is_bounds_required: raise ValueError( 'Problem does not have bounds but optimizer requires bounds' ) if initial_point is not None: if not optimizer.is_initial_point_supported: raise ValueError('Optimizer does not support initial point') else: if optimizer.is_initial_point_required: if hasattr(ansatz, 'preferred_init_points'): # Note: default implementation returns None, hence check again after below initial_point = ansatz.preferred_init_points if initial_point is None: # If still None use a random generated point low = [(l if l is not None else -2 * np.pi) for (l, u) in bounds] high = [(u if u is not None else 2 * np.pi) for (l, u) in bounds] initial_point = algorithm_globals.random.uniform(low, high) start = time.time() if not optimizer.is_gradient_supported: # ignore the passed gradient function gradient_fn = None else: if not gradient_fn: gradient_fn = self._gradient logger.info( 'Starting optimizer.\nbounds=%s\ninitial point=%s', bounds, initial_point ) opt_params, opt_val, num_optimizer_evals = optimizer.optimize( len(self.initial_point), cost_fn, variable_bounds=bounds, initial_point=initial_point, gradient_function=gradient_fn, ) if self._parameterise_point_for_energy_evaluation != None: self.latest_parameterised_point = ( self._parameterise_point_for_energy_evaluation(opt_params, nparms) ) eval_time = time.time() - start result = VariationalResult() result.optimizer_evals = num_optimizer_evals result.optimizer_time = eval_time result.optimal_value = opt_val result.optimal_point = opt_params result.optimal_parameters = dict(zip(self._ansatz_params, opt_params)) return result def eigenvector_to_solutions( self, eigenvector: Union[dict, np.ndarray, StateFn], quadratic_program: QuadraticProgram, min_probability: float = 1e-6, ) -> List[Tuple[str, float, float]]: """Convert the eigenvector to a list of solution 3-tuples (bitstrings, quadratic_function_objective_value, probability). (Overides method) Args: eigenvector: The eigenvector from which the solution states are extracted. quadratic_program: The quadatic program to evaluate at the bitstring. min_probability: Only consider states where the amplitude exceeds this threshold. Returns: A list with elements for each computational basis state contained in the eigenvector. Each element is a 3-tuple: (state as bitstring (str), quadatic program evaluated at that bitstring (float), probability of sampling this bitstring from the eigenvector (float) ). Raises: TypeError: If the type of eigenvector is not supported. """ if isinstance(eigenvector, DictStateFn): eigenvector = { bitstr: val ** 2 for (bitstr, val) in eigenvector.primitive.items() } elif isinstance(eigenvector, StateFn): eigenvector = eigenvector.to_matrix() solutions = [] if isinstance(eigenvector, dict): # iterate over all samples for bitstr, amplitude in eigenvector.items(): sampling_probability = amplitude * amplitude # add the bitstring, if the sampling probability exceeds the threshold if sampling_probability > 0: if sampling_probability >= min_probability: # I've reversed the qubits here, I think they were the wrong order. value = quadratic_program.objective.evaluate( [int(bit) for bit in bitstr[::-1]] ) solutions += [(bitstr[::-1], value, sampling_probability)] elif isinstance(eigenvector, np.ndarray): num_qubits = int(np.log2(eigenvector.size)) probabilities = np.abs(eigenvector * eigenvector.conj()) # iterate over all states and their sampling probabilities for i, sampling_probability in enumerate(probabilities): # add the i-th state if the sampling probability exceeds the threshold if sampling_probability > 0: if sampling_probability >= min_probability: bitstr = '{:b}'.format(i).rjust(num_qubits, '0')[::-1] value = quadratic_program.objective.evaluate( [int(bit) for bit in bitstr] ) solutions += [(bitstr, value, sampling_probability)] else: raise TypeError( 'Unsupported format of eigenvector. Provide a dict or numpy.ndarray.' ) return solutions def _energy_evaluation( self, parameters: Union[List[float], np.ndarray] ) -> Union[float, List[float]]: """Evaluate energy at given parameters for the ansatz. This is the objective function to be passed to the optimizer that is used for evaluation. (Overides method) Args: parameters: The parameters for the ansatz. Returns: Energy of the hamiltonian of each parameter. Raises: RuntimeError: If the ansatz has no parameters. """ num_parameters = self.ansatz.num_parameters if self._parameterise_point_for_energy_evaluation != None: self.latest_parameterised_point = parameters parameters = self._parameterise_point_for_energy_evaluation( parameters, num_parameters ) if self._ansatz.num_parameters == 0: raise RuntimeError('The ansatz cannot have 0 parameters.') parameter_sets = np.reshape(parameters, (-1, num_parameters)) # Create dict associating each parameter with the lists of parameterization values for it param_bindings = dict( zip(self._ansatz_params, parameter_sets.transpose().tolist()) ) # type: Dict start_time = time.time() # self._log_text("self._expect_op:", self._expect_op) sampled_expect_op = self._circuit_sampler.convert( self._expect_op, params=param_bindings ) means = np.real(sampled_expect_op.eval()) if self._callback is not None: variance = np.real(self._expectation.compute_variance(sampled_expect_op)) estimator_error = np.sqrt(variance / self.quantum_instance.run_config.shots) for i, param_set in enumerate(parameter_sets): self._eval_count += 1 self._callback( self._eval_count, param_set, means[i], estimator_error[i] ) else: self._eval_count += len(means) end_time = time.time() logger.info( 'Energy evaluation returned %s - %.5f (ms), eval count: %s', means, (end_time - start_time) * 1000, self._eval_count, ) return means if len(means) > 1 else means[0] def _prepare_for_optisation( self, operator: OperatorBase, aux_operators: Optional[List[Optional[OperatorBase]]] = None, ) -> None: """Prepares the QAOA instance to perform simulation without needing to run the optimisation loop. (New method) Args: operator: The operator (usually obtained from QuadraticProgram.to_ising()). """ # super(VQE, self).compute_minimum_eigenvalue(operator, aux_operators) if self.quantum_instance is None: raise AlgorithmError( "A QuantumInstance or Backend " "must be supplied to run the quantum algorithm." ) if operator is None: raise AlgorithmError("The operator was never provided.") # operator = self._check_operator(operator) # The following code "operator = self._check_operator(operator)" was not working correctly here since it is meant to replace the operator. # So instead, using below code to manually update the ansatz. self.ansatz = QAOAAnsatz( operator, self._reps, initial_state=self._initial_state, mixer_operator=self._mixer, ) # We need to handle the array entries being Optional i.e. having value None if aux_operators: zero_op = I.tensorpower(operator.num_qubits) * 0.0 converted = [] for op in aux_operators: if op is None: converted.append(zero_op) else: converted.append(op) # For some reason Chemistry passes aux_ops with 0 qubits and paulis sometimes. aux_operators = [zero_op if op == 0 else op for op in converted] else: aux_operators = None self._quantum_instance.circuit_summary = True self._eval_count = 0 # Convert the gradient operator into a callable function that is compatible with the # optimization routine. if self._gradient: if isinstance(self._gradient, GradientBase): self._gradient = self._gradient.gradient_wrapper( ~StateFn(operator) @ StateFn(self._ansatz), bind_params=self._ansatz_params, backend=self._quantum_instance, ) # if not self._expect_op: self._expect_op = self.construct_expectation(self._ansatz_params, operator) def calculate_statevector_at_point( self, operator: OperatorBase, point: Union[List[float], np.ndarray], force_shots: bool = False, sample_shots: int = 8192, ) -> Union[Dict[str, float], List[float], np.ndarray]: """Prepares for QAOA simulation and calculates the statevector for the given point. (New method) Args: operator: The operator (usually obtained from QuadraticProgram.to_ising()). point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]). force_shots: If simulating using a statevector, should a new statevector be formed by sampling from it? sample_shots: If force_shots is True, how many shots to sample? Returns: The resulting statevector. Might be a dict or an ndarray, depending on which simulator is used and whether the statevector is being sampled or not. When statevector sim is used, returns an ndarray, otherwise returns a dict. """ from qiskit.utils.run_circuits import find_regs_by_name self._prepare_for_optisation(operator) qc = self.ansatz.assign_parameters(point) statevector = {} if self._quantum_instance.is_statevector: ret = self._quantum_instance.execute(qc) statevector = ret.get_statevector(qc) if force_shots == True: counts = Statevector(ret.get_statevector(qc)).sample_counts( sample_shots ) statevector = {} for state in counts.keys(): statevector[state] = (counts[state] / sample_shots) ** 0.5 else: c = ClassicalRegister(qc.width(), name='c') q = find_regs_by_name(qc, 'q') qc.add_register(c) qc.barrier(q) qc.measure(q, c) ret = self._quantum_instance.execute(qc) counts = ret.get_counts(qc) shots = self._quantum_instance._run_config.shots statevector = {b: (v / shots) ** 0.5 for (b, v) in counts.items()} return statevector def execute_at_point( self, point: Union[List[float], np.ndarray], quadratic_program: QuadraticProgram, optimal_function_value: float = None, log_text: Optional[Callable[..., Any]] = print, ) -> Dict[str, Any]: """Runs QAOA without the optimization loop. Evaluates a single set of qaoa parameters. (New method) Args: point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]). quadratic_program: The quadratic program to obtain the operator from and to evaluate the solution state bitstrings with. optimal_function_value: The optimal value for which the solution states return in the quadratic_program. Useful in rare cases where the solutions have zero probability. If None, the best function_value among solutions will be used. log_text: Used for text output, replacement to the default print method to make logging easy. If None, no text output can occur. Returns: A dict containing the results. Keys are: 'energy', 'point', 'solutions', 'solution_probability', 'eigenstate', 'function_value'. """ op_custom, offset = quadratic_program.to_ising() results_dict = {} # no need to call "self.prepare_for_optisation(op_custom)" because the # methods "self.calculate_statevector_at_point(op_custom, point)" and # "self.evaluate_energy(op_custom, point)" already do. eigenstate = self.calculate_statevector_at_point(op_custom, point) energy = self.evaluate_energy_at_point(op_custom, point) solutions = self.get_optimal_solutions_from_statevector( eigenstate, quadratic_program, min_probability=10 ** -6, optimal_function_value=optimal_function_value, ) solution_probability = 0 for sol in solutions: solution_probability += sol["probability"] results_dict["energy"] = energy results_dict["point"] = point results_dict["solutions"] = solutions results_dict["solution_probability"] = solution_probability results_dict["eigenstate"] = eigenstate if len(solutions) > 0: results_dict["function_value"] = solutions[0]["function_value"] else: if log_text != None: log_text("WARNING: No solutions were found.") return results_dict def evaluate_energy_at_point( self, operator: OperatorBase, point: Union[List[float], np.ndarray] ) -> Union[float, List[float]]: """Evaluate energy at given parameters for the operator ansatz. (New method) Args: operator: The operator (usually obtained from QuadraticProgram.to_ising()). point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]). Returns: Energy of the hamiltonian of each parameter. Raises: RuntimeError: If the ansatz has no parameters. """ self._prepare_for_optisation(operator) return self._energy_evaluation(point) def get_optimal_solutions_from_statevector( self, eigenvector: Union[dict, np.ndarray, StateFn], quadratic_program: QuadraticProgram, min_probability: float = 1e-6, optimal_function_value: float = None, ) -> List[Tuple[str, float, float]]: """Extract the solution state information from the eigenvector. (New method) Args: eigenvector: The eigenvector from which the solution states are extracted. quadratic_program: The QUBO to evaluate at the bitstring. min_probability: Only consider states where the amplitude exceeds this threshold. optimal_function_value: The optimal value for which the solution states return in the quadratic_program. Useful in rare cases where the solutions have zero probability. Returns: A list of all solutions. Each solution is a dict of length 3: "state": the state bitstring, "function_value": the function value, and "probability": the state probability. Raises: TypeError: If the type of eigenvector is not supported. """ samples = self.eigenvector_to_solutions( eigenvector, quadratic_program, min_probability ) samples.sort(key=lambda x: quadratic_program.objective.sense.value * x[1]) fval = samples[0][1] if optimal_function_value != None: fval = optimal_function_value solution_samples = [] for i in range(len(samples)): if samples[i][1] == fval: solution = {} solution["state"] = samples[i][0] solution["function_value"] = samples[i][1] solution["probability"] = samples[i][2] solution_samples.append(solution) return solution_samples def reset_reps(self, reps: int) -> None: """Reset the number of reps when performing QAOA. Args: reps: The number of layers in QAOA (the 'p' value) """ validate_min('reps', reps, 1) self._reps = reps def set_optimiser_parameter_bounds( self, optimiser_parameter_bounds: Optional[ List[Tuple[Optional[float], Optional[float]]] ], ) -> None: self._optimiser_parameter_bounds = optimiser_parameter_bounds def set_parameterise_point_for_energy_evaluation( self, parameterise_point_for_optimisation: Callable[ [Union[List[float], np.ndarray], int], List[float] ], ) -> None: self._parameterise_point_for_energy_evaluation = ( parameterise_point_for_optimisation ) def set_post_process_raw_data( self, post_process_raw_data_method: Optional[ Callable[ [Union[List[Dict[str, int]], List[List[Dict[str, int]]]]], Union[List[Dict[str, int]], List[List[Dict[str, int]]]], ] ], ) -> None: """Uses the specified method to process the raw sampled data executed on the backened whenever circuits are sampled. Args: post_process_raw_data_method: The method to process the data. Inputs a list f counts dicts List[Dict[str, int]] and outputs the processed list of count dicts List[Dict[str, int]]. The data could potentially be formatted as a list of a list of dictionaries List[List[Dict[str, int]]]. However, this will likely not happen withouth modifying QAOA to do so. Each dictionary has the counts for each qubit with the keys containing a string in binary format and separated according to the registers in circuit (e.g. ``0100 1110``). The string is little-endian (cr[0] on the right hand side). However there will likely only be a single register without modifying QAOA, so the state bitstring should have no spaces. """ self._post_process_raw_data = post_process_raw_data_method if self._circuit_sampler != None: self._circuit_sampler.set_post_process_raw_data(self._post_process_raw_data) def solve( self, ising_hamiltonian_operator: Union[OperatorBase, nx.Graph], initial_point: Union[List[float], np.ndarray], bounds: Optional[List[Tuple[Optional[float], Optional[float]]]] = None, ) -> MinimumEigensolverResult: if isinstance(ising_hamiltonian_operator, nx.Graph): couplings, local_fields = get_ising_hamiltonian_terms_from_ising_graph( ising_hamiltonian_operator ) quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms( couplings, local_fields, 0, None, None ) ising_hamiltonian_operator, _ = quadratic_program.to_ising() self.initial_point = initial_point self.set_optimiser_parameter_bounds(bounds) return self.compute_minimum_eigenvalue(ising_hamiltonian_operator) def solve_from_ising_hamiltonian_terms( self, couplings: List[Tuple[int, int, float]], local_fields: Mapping[int, float], constant_term: float, initial_point: Union[List[float], np.ndarray], bounds: Optional[List[Tuple[Optional[float], Optional[float]]]] = None, ) -> MinimumEigensolverResult: quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms( couplings, local_fields, constant_term, None, None ) ising_hamiltonian_operator, _ = quadratic_program.to_ising() self.initial_point = initial_point self.set_optimiser_parameter_bounds(bounds) return self.compute_minimum_eigenvalue(ising_hamiltonian_operator) ############### ### Helper Methods ############### def convert_from_fourier_point( fourier_point: List[float], num_params_in_point: int ) -> List[float]: """Converts a point in Fourier space back to QAOA angles. Args: fourier_point: The point in Fourier space to convert. num_params_in_point: The length of the resulting point. Must be even. Returns: The converted point in the form of QAOA rotation angles. """ new_point = [0] * num_params_in_point reps = int(num_params_in_point / 2) # num_params_in_result should always be even max_frequency = int(len(fourier_point) / 2) # fourier_point should always be even for i in range(reps): new_point[i] = 0 for k in range(max_frequency): new_point[i] += fourier_point[k] * math.sin( (k + 0.5) * (i + 0.5) * math.pi / reps ) new_point[i + reps] = 0 for k in range(max_frequency): new_point[i + reps] += fourier_point[k + max_frequency] * math.cos( (k + 0.5) * (i + 0.5) * math.pi / reps ) return new_point def convert_to_fourier_point( point: List[float], num_params_in_fourier_point: int ) -> List[float]: """Converts a point to fourier space. Args: point: The point to convert. num_params_in_fourier_point: The length of the resulting fourier point. Must be even. Returns: The converted point in fourier space. """ fourier_point = [0] * num_params_in_fourier_point reps = int(len(point) / 2) # point should always be even max_frequency = int( num_params_in_fourier_point / 2 ) # num_params_in_fourier_point should always be even for i in range(max_frequency): fourier_point[i] = 0 for k in range(reps): fourier_point[i] += point[k] * math.sin( (k + 0.5) * (i + 0.5) * math.pi / max_frequency ) fourier_point[i] = 2 * fourier_point[i] / reps fourier_point[i + max_frequency] = 0 for k in range(reps): fourier_point[i + max_frequency] += point[k + reps] * math.cos( (k + 0.5) * (i + 0.5) * math.pi / max_frequency ) fourier_point[i + max_frequency] = 2 * fourier_point[i + max_frequency] / reps return fourier_point def get_ising_graph_from_ising_hamiltonian_terms( couplings: List[Tuple[int, int, float]], local_fields: Mapping[int, float] ) -> nx.Graph: """Constructs a networkx graph with node and edge weights corresponding to the coefficients of the local field and coupling strengths of the Ising Hamiltonian respectively. Args: couplings: A list of couplings for the Ising graph (or Hamiltonian). Couplings are in the form of a 3-tuple e.g. (spin_1, spin_2, coupling_strength). local_fields: The local field strengths for the Ising graph (or Hamiltonian) A Dict with keys: spin numbers and values: field strengths. Returns: The Ising graph as an instance of a networkx Graph object with node and edge weights. """ G = nx.Graph() for local_field in local_fields.keys(): G.add_node(local_field, weight=local_fields[local_field]) G.add_weighted_edges_from(couplings) return G def get_ising_hamiltonian_terms_from_ising_graph( ising_graph: nx.Graph, ) -> Tuple[List[Tuple[int, int, float]], Dict[int, float]]: """Constructs a networkx graph with node and edge weights corresponding to the coefficients of the local field and coupling strengths of the Ising Hamiltonian respectively. Args: couplings: A list of couplings for the Ising graph (or Hamiltonian). Couplings are in the form of a 3-tuple e.g. (spin_1, spin_2, coupling_strength). local_fields: The local field strengths for the Ising graph (or Hamiltonian) A Dict with keys: spin numbers and values: field strengths. Returns: The Ising graph as an instance of a networkx Graph object with node and edge weights. """ local_fields = {} for i in range(len(ising_graph.nodes)): local_fields[i] = ising_graph.nodes[i]['weight'] couplings = [] edge_data = ising_graph.edges(data=True) for edge in edge_data: couplings.append((edge[0], edge[1], edge[3]['weight'])) return couplings, local_fields def get_quadratic_program_from_ising_hamiltonian_terms( couplings: List[Tuple[int, int, float]], local_fields: Mapping[int, float], constant_term: float, output_ising_graph_filename: Optional[str] = None, log_text: Optional[Callable[..., Any]] = print, ) -> QuadraticProgram: """Constructs and returns the quadratic program corresponding to the input Hamiltonian terms. Applies the transformation -> Z = 2b - 1, since Ising Hamiltonian spins have {+-1} values while the quadratic program is binary. Args: couplings: A list of couplings for the Ising graph (or Hamiltonian). Couplings are in the form of a 3-tuple e.g. (spin_1, spin_2, coupling_strength). Negative coupling strengths are Ferromagnetic (spin states want to be the same). local_fields: The local field strengths for the Ising graph (or Hamiltonian) A Dict with keys: spin numbers and values: field strengths. Using convention with negative sign on local fields. So a negative local field makes the spin want to be +1. constant_term: the constant for the Ising Hamiltonian. output_ising_graph_filename: Filename to save ising graph file with. If None, will not output ising graph to file. log_text: Used for text output, replacement to the default print method to make logging easy. If None, no text output will occur. Returns: The binary quadratic program corresponding to the Hamiltonian """ if output_ising_graph_filename != None: ising_graph = get_ising_graph_from_ising_hamiltonian_terms( couplings, local_fields ) output_ising_graph( ising_graph, custom_filename_no_ext=output_ising_graph_filename, log_text=log_text, ) quadratic_program = QuadraticProgram() for local_field in local_fields.keys(): quadratic_program.binary_var('c' + str(local_field)) new_constant_term = 0 new_linear_terms = {} for car_number in local_fields.keys(): new_linear_terms[car_number] = 0.0 new_quadratic_terms = {} # transform constant term new_constant_term = constant_term # transform local fields for car_number in local_fields.keys(): new_linear_terms[car_number] = 2 * local_fields[car_number] new_constant_term -= local_fields[car_number] # transform couplings for coupling in couplings: if ('c' + str(coupling[0]), 'c' + str(coupling[1])) in new_quadratic_terms: new_quadratic_terms[('c' + str(coupling[0]), 'c' + str(coupling[1]))] += ( 4 * coupling[2] ) else: new_quadratic_terms[('c' + str(coupling[0]), 'c' + str(coupling[1]))] = ( 4 * coupling[2] ) new_linear_terms[coupling[0]] -= 2 * coupling[2] new_linear_terms[coupling[1]] -= 2 * coupling[2] new_constant_term += coupling[2] quadratic_program.minimize( constant=new_constant_term, linear=[new_linear_terms[lf] for lf in new_linear_terms.keys()], quadratic=new_quadratic_terms, ) return quadratic_program def output_ising_graph( ising_graph: nx.Graph, custom_filename_no_ext: Optional[str] = None, log_text: Optional[Callable[..., Any]] = print, ) -> None: """Outputs the networkx graph to file in PNG format Args: ising_graph: A networkx graph with node and edge weights specified. Nodes have attribute 'weight' that corresponds to a local field strength. Edges have attribute 'weight' corresponding to the coupling strength. custom_filename_no_ext: The filename to save the figure to. Defaults to "Ising_graph" if None. log_text: Used for text output, replacement to the default print method to make logging easy. If None, no text output will occur. """ # Generate plot of the Graph colors = ['r' for node in ising_graph.nodes()] default_axes = plt.axes(frameon=False) default_axes.set_axis_off() default_axes.margins(0.1) pos = nx.circular_layout(ising_graph) labels = { n: str(n) + '; ' + str(ising_graph.nodes[n]['weight']) for n in ising_graph.nodes } nx.draw_networkx( ising_graph, node_color=colors, node_size=600, alpha=1, ax=default_axes, pos=pos, labels=labels, ) edge_labels = nx.get_edge_attributes(ising_graph, 'weight') nx.draw_networkx_edge_labels(ising_graph, pos=pos, edge_labels=edge_labels) if custom_filename_no_ext == None: filename = "Ising_graph.png" else: filename = custom_filename_no_ext + '.png' if log_text != None: log_text("Saving Ising graph '" + filename + "'...") plt.savefig(filename, format="PNG", bbox_inches=0) plt.close() def print_qaoa_solutions( solutions: List[Mapping[str, Any]], log_text: Callable[..., Any] = print ) -> None: """Pretty prints (pprint) a list of solutions followed by their summed probability. Args: solutions: List of solutions, they are each formatted as a dict with (key, value): 'state', state bitstring (str) 'function_value', binary quadratic program objective value (float) 'probability', probability (float) log_text: Used for text output, replacement to the default print method to make logging easy. """ import pprint if len(solutions) > 0: log_text( "function value (quadratic program):", str(solutions[0]["function_value"]) ) solutions_string = pprint.pformat( [ [solutions[x]["state"], solutions[x]["probability"]] for x in range(len(solutions)) ], indent=2, ) log_text(solutions_string) initial_solution_probability = 0 for x in range(len(solutions)): initial_solution_probability += solutions[x]["probability"] log_text("total probability:", initial_solution_probability) else: log_text("total probability: 0")
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com> # ---------- # # ---------- import os import importlib from .common import TypeMatcher for name in os.listdir(os.path.dirname(__file__)): if name.startswith('_') or not name.endswith('.py'): continue importlib.import_module('.' + name[:-3], __name__)
nilq/baby-python
python
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code related to the PyTorch model registry for easily creating models. """ import re from typing import Any, Callable, Dict, List, Optional, Union from sparseml.tensorflow_v1.models.estimator import EstimatorModelFn from sparseml.tensorflow_v1.utils import tf_compat from sparseml.utils import TENSORFLOW_V1_FRAMEWORK, parse_optimization_str from sparsezoo import Zoo from sparsezoo.objects import Model __all__ = ["ModelRegistry"] class _ModelAttributes(object): def __init__( self, input_shape: Any, domain: str, sub_domain: str, architecture: str, sub_architecture: str, default_dataset: str, default_desc: str, default_model_fn_creator: EstimatorModelFn, base_name_scope: str, tl_ignore_tens: List[str], repo_source: str, ): self.input_shape = input_shape self.domain = domain self.sub_domain = sub_domain self.architecture = architecture self.sub_architecture = sub_architecture self.default_dataset = default_dataset self.default_desc = default_desc self.default_model_fn_creator = default_model_fn_creator self.base_name_scope = base_name_scope self.tl_ignore_tens = tl_ignore_tens self.repo_source = repo_source class ModelRegistry(object): """ Registry class for creating models """ _CONSTRUCTORS = {} # type: Dict[str, Callable] _ATTRIBUTES = {} # type: Dict[str, _ModelAttributes] @staticmethod def available_keys() -> List[str]: """ :return: the keys (models) currently available in the registry """ return list(ModelRegistry._CONSTRUCTORS.keys()) @staticmethod def create(key: str, *args, **kwargs) -> Any: """ Create a new model for the given key :param key: the model key (name) to create :param args: any args to supply to the graph constructor :param kwargs: any keyword args to supply to the graph constructor :return: the outputs from the created graph """ if key not in ModelRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, ModelRegistry._CONSTRUCTORS ) ) return ModelRegistry._CONSTRUCTORS[key](*args, **kwargs) @staticmethod def create_estimator( key: str, model_dir: str, model_fn_params: Optional[Dict[str, Any]], run_config: tf_compat.estimator.RunConfig, *args, **kwargs, ) -> tf_compat.estimator.Estimator: """ Create Estimator for a model given the key and extra parameters :param key: the key that the model was registered with :param model_dir: directory to save results :param model_fn_params: parameters for model function :param run_config: RunConfig used by the estimator during training :param args: additional positional arguments to pass into model constructor :param kwargs: additional keyword arguments to pass into model constructor :return: an Estimator instance """ model_const = ModelRegistry._CONSTRUCTORS[key] attributes = ModelRegistry._ATTRIBUTES[key] model_fn_creator = attributes.default_model_fn_creator() model_fn = model_fn_creator.create(model_const, *args, **kwargs) model_fn_params = {} if model_fn_params is None else model_fn_params classifier = tf_compat.estimator.Estimator( config=run_config, model_dir=model_dir, model_fn=model_fn, params=model_fn_params, ) return classifier @staticmethod def create_zoo_model( key: str, pretrained: Union[bool, str] = True, pretrained_dataset: str = None, ) -> Model: """ Create a sparsezoo Model for the desired model in the zoo :param key: the model key (name) to retrieve :param pretrained: True to load pretrained weights; to load a specific version give a string with the name of the version (pruned-moderate, base), default True :param pretrained_dataset: The dataset to load for the model :return: the sparsezoo Model reference for the given model """ if key not in ModelRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, ModelRegistry._CONSTRUCTORS ) ) attributes = ModelRegistry._ATTRIBUTES[key] optim_name, optim_category, optim_target = parse_optimization_str( pretrained if isinstance(pretrained, str) else attributes.default_desc ) return Zoo.load_model( attributes.domain, attributes.sub_domain, attributes.architecture, attributes.sub_architecture, TENSORFLOW_V1_FRAMEWORK, attributes.repo_source, attributes.default_dataset if pretrained_dataset is None else pretrained_dataset, None, optim_name, optim_category, optim_target, ) @staticmethod def load_pretrained( key: str, pretrained: Union[bool, str] = True, pretrained_dataset: str = None, pretrained_path: str = None, remove_dynamic_tl_vars: bool = False, sess: tf_compat.Session = None, saver: tf_compat.train.Saver = None, ): """ Load pre-trained variables for a given model into a session. Uses a Saver object from TensorFlow to restore the variables from an index and data file. :param key: the model key (name) to create :param pretrained: True to load the default pretrained variables, a string to load a specific pretrained graph (ex: base, optim, optim-perf), or False to not load any pretrained weights :param pretrained_dataset: The dataset to load pretrained weights for (ex: imagenet, mnist, etc). If not supplied will default to the one preconfigured for the model. :param pretrained_path: A path to the pretrained variables to load, if provided will override the pretrained param :param remove_dynamic_tl_vars: True to remove the vars that are used for transfer learning (have a different shape and should not be restored), False to keep all vars in the Saver. Only used if saver is None :param sess: The session to load the model variables into if pretrained_path or pretrained is supplied. If not supplied and required, then will use the default session :param saver: The Saver instance to use to restore the variables for the graph if pretrained_path or pretrained is supplied. If not supplied and required, then will create one using the ModelRegistry.saver function """ if key not in ModelRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, ModelRegistry._CONSTRUCTORS ) ) if not sess and (pretrained_path or pretrained): sess = tf_compat.get_default_session() if not saver and (pretrained_path or pretrained): saver = ModelRegistry.saver(key, remove_dynamic_tl_vars) if isinstance(pretrained, str): if pretrained.lower() == "true": pretrained = True elif pretrained.lower() in ["false", "none"]: pretrained = False if pretrained_path: saver.restore(sess, pretrained_path) elif pretrained: zoo_model = ModelRegistry.create_zoo_model( key, pretrained, pretrained_dataset ) try: paths = zoo_model.download_framework_files() index_path = [path for path in paths if path.endswith(".index")] index_path = index_path[0] model_path = index_path[:-6] saver.restore(sess, model_path) except Exception: # try one more time with overwrite on in case files were corrupted paths = zoo_model.download_framework_files(overwrite=True) index_path = [path for path in paths if path.endswith(".index")] if len(index_path) != 1: raise FileNotFoundError( "could not find .index file for {}".format(zoo_model.root_path) ) index_path = index_path[0] model_path = index_path[:-6] saver.restore(sess, model_path) @staticmethod def input_shape(key: str): """ :param key: the model key (name) to create :return: the specified input shape for the model """ if key not in ModelRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, ModelRegistry._CONSTRUCTORS ) ) return ModelRegistry._ATTRIBUTES[key].input_shape @staticmethod def saver(key: str, remove_dynamic_tl_vars: bool = False) -> tf_compat.train.Saver: """ Get a tf compat saver that contains only the variables for the desired architecture specified by key. Note, the architecture must have been created in the current graph already to work. :param key: the model key (name) to get a saver instance for :param remove_dynamic_tl_vars: True to remove the vars that are used for transfer learning (have a different shape and should not be restored), False to keep all vars in the Saver :return: a Saver object with the appropriate vars for the model to restore """ if key not in ModelRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, ModelRegistry._CONSTRUCTORS ) ) base_name = ModelRegistry._ATTRIBUTES[key].base_name_scope saver_vars = [ var for var in tf_compat.get_collection(tf_compat.GraphKeys.TRAINABLE_VARIABLES) if base_name in var.name ] saver_vars.extend( [ var for var in tf_compat.global_variables() if ("moving_mean" in var.name or "moving_variance" in var.name) and base_name in var.name ] ) if remove_dynamic_tl_vars: tl_ignore_tens = ModelRegistry._ATTRIBUTES[key].tl_ignore_tens def _check_ignore(var: tf_compat.Variable) -> bool: for ignore in tl_ignore_tens: if re.match(ignore, var.name): return True return False saver_vars = [var for var in saver_vars if not _check_ignore(var)] saver = tf_compat.train.Saver(saver_vars) return saver @staticmethod def register( key: Union[str, List[str]], input_shape: Any, domain: str, sub_domain: str, architecture: str, sub_architecture: str, default_dataset: str, default_desc: str, default_model_fn_creator: EstimatorModelFn, base_name_scope: str, tl_ignore_tens: List[str], repo_source: str = "sparseml", ): """ Register a model with the registry. Should be used as a decorator :param key: the model key (name) to create :param input_shape: the specified input shape for the model :param domain: the domain the model belongs to; ex: cv, nlp, etc :param sub_domain: the sub domain the model belongs to; ex: classification, detection, etc :param architecture: the architecture the model belongs to; ex: resnet, mobilenet, etc :param sub_architecture: the sub architecture the model belongs to; ex: 50, 101, etc :param default_dataset: the dataset to use by default for loading pretrained if not supplied :param default_desc: the description to use by default for loading pretrained if not supplied :param default_model_fn_creator: default model creator to use when creating estimator instance :param base_name_scope: the base string used to create the graph under :param tl_ignore_tens: a list of tensors to ignore restoring for if transfer learning :param repo_source: the source repo for the model, default is sparseml :return: the decorator """ if not isinstance(key, List): key = [key] def decorator(const_func): for r_key in key: if r_key in ModelRegistry._CONSTRUCTORS: raise ValueError("key {} is already registered".format(key)) ModelRegistry._CONSTRUCTORS[r_key] = const_func ModelRegistry._ATTRIBUTES[r_key] = _ModelAttributes( input_shape, domain, sub_domain, architecture, sub_architecture, default_dataset, default_desc, default_model_fn_creator, base_name_scope, tl_ignore_tens, repo_source, ) return const_func return decorator
nilq/baby-python
python
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import collections import numpy as np import pytest import megengine from megengine.core._imperative_rt.core2 import apply from megengine.core._trace_option import use_symbolic_shape from megengine.core.ops import builtin from megengine.tensor import Tensor def cvt_to_shape_desc(val, inpvar, config=None): def as_tensor(val, device): assert device is not None, "can not infer device" # TODO: should copy to appropriate device val = Tensor(val, device=device) return val device = None if inpvar is not None: assert isinstance(inpvar, Tensor) device = device or inpvar.device if config is not None: device = device or config.device if isinstance(val, Tensor): return as_tensor(val, device) if not isinstance(val, collections.abc.Iterable): val = [val] components = [] on_host = True for i in val: if isinstance(i, Tensor): on_host = False device = device or i.device else: assert isinstance(i, int), ( "shape desc could contain either int or Tensor, got {}" " actually".format(repr(i)) ) components.append(i) assert components, "shape desc could not be empty" if on_host: shape = np.ascontiguousarray(components, dtype=np.int32) assert np.all(shape == components), "failed to convert to shape: {}".format( components ) return as_tensor(shape, device) for idx, v in enumerate(components): if not isinstance(v, Tensor): vi = int(v) assert vi == v, "could not convert {} to int".format(v) v = vi components[idx] = as_tensor(v, device) return invoke_op(all_oprs.Concat(axis=0), components) def canonize_reshape(inputs, *, config): src, tshape = inputs tshape = cvt_to_shape_desc(tshape, src, config) return src, tshape def canonize_inputs(inputs, *, config): """convert immediate numbers and SharedND to SymbolVar in inputs; at least one of the inputs must be SymbolVar, so comp node and comp graph can beinferred :return: list of converted vars """ if ( isinstance(inputs, (list, tuple)) and len(inputs) == 1 and isinstance(inputs[0], (list, tuple)) ): # handle the case when a list is passed to a function with # variable-length argument (e.g. concat has signature concat(*inputs) # and is called with concat([a, b])) inputs = inputs[0] if isinstance(inputs, Tensor): return [inputs] old_inputs = inputs inputs = [] get_comp_node = None need_cvt = False for i in old_inputs: if isinstance(i, Tensor): get_comp_node = lambda cn=i.device: cn else: need_cvt = True inputs.append(i) if not need_cvt: return inputs if get_comp_node is None: def get_comp_node(): return config.comp_node for idx, var in enumerate(inputs): if not isinstance(var, Tensor): var = Tensor(var) inputs[idx] = var return inputs def invoke_op(op, inputs_, cvt_inputs=canonize_inputs): inputs = cvt_inputs( inputs_, config=megengine.core._imperative_rt.OperatorNodeConfig() ) return apply(op, *inputs) def unpack_getitem(inp, tuple_val, *, allow_newaxis=True): assert isinstance(inp, Tensor) if not isinstance(tuple_val, tuple): tuple_val = (tuple_val,) def as_tensor(v): if not isinstance(v, Tensor): vi = np.ascontiguousarray(v, dtype=np.int32) assert np.abs(vi - v).max() == 0, "bad index: {!r}".format(v) v = Tensor(vi) return v new_axes = [] tensors = [] items = [] cur_axis = -1 for i_idx, i in enumerate(tuple_val): cur_axis += 1 if i is np.newaxis: if cur_axis >= 0: new_axes.append(cur_axis) continue if i is Ellipsis: cur_axis = -1 for j in tuple_val[:i_idx:-1]: if j is Ellipsis: raise IndexError("only one ellipsis is allowed") if j is np.newaxis: new_axes.append(cur_axis) cur_axis -= 1 continue item = [ cur_axis, ] def push(v, item, tensors): if v is None: item.append(False) else: item.append(True) tensors.append(as_tensor(v)) if isinstance(i, slice): if i.start is None and i.stop is None and i.step is None: continue push(i.start, item, tensors) push(i.stop, item, tensors) push(i.step, item, tensors) item.append(False) # idx else: item += [False,] * 3 # begin, end, stop push(i, item, tensors) assert len(item) == 5 items.append(item) if new_axes: raise IndexError("newaxis is not allowed here") return inp, tensors, items def transpose(*args, **kwargs): op = builtin.Dimshuffle(**kwargs) return invoke_op(op, args) def broadcast(input, tshape): op = builtin.Broadcast() return invoke_op(op, (input, tshape), canonize_reshape) def subtensor(input, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.Subtensor(items) return invoke_op(op, (input, *tensors)) def set_subtensor(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.SetSubtensor(items) return invoke_op(op, (input, value, *tensors)) def incr_subtensor(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.IncrSubtensor(items) return invoke_op(op, (input, value, *tensors)) def advance_indexing(input, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.IndexingMultiAxisVec(items) return invoke_op(op, (input, *tensors)) def set_advance_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.IndexingSetMultiAxisVec(items) return invoke_op(op, (input, value, *tensors)) def incr_advance_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.IndexingIncrMultiAxisVec(items) return invoke_op(op, (input, value, *tensors)) def mesh_indexing(input, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.MeshIndexing(items) return invoke_op(op, (input, *tensors)) def set_mesh_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.SetMeshIndexing(items) return invoke_op(op, (input, value, *tensors)) def incr_mesh_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.IncrMeshIndexing(items) return invoke_op(op, (input, value, *tensors)) def batched_mesh_indexing(input, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.BatchedMeshIndexing(items) return invoke_op(op, (input, *tensors)) def batched_set_mesh_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.BatchedSetMeshIndexing(items) return invoke_op(op, (input, value, *tensors)) def batched_incr_mesh_indexing(input, value, tuple_val): input, tensors, items = unpack_getitem(input, tuple_val) op = builtin.BatchedIncrMeshIndexing(items) return invoke_op(op, (input, value, *tensors)) def test_transpose(): x = np.arange(10).reshape(2, 5).astype("int32") xx = Tensor(x) (yy,) = transpose(xx, pattern=[1, -1, 0]) np.testing.assert_equal(np.expand_dims(x.transpose(), axis=1), yy.numpy()) def test_broadcast(): x = np.arange(10).reshape(1, 10).astype("int32") xx = Tensor(x) (yy,) = broadcast(xx, (10, 10)) np.testing.assert_equal(np.repeat(x, 10, 0), yy.numpy()) def test_subtensor(): x = np.arange(25).reshape(5, 5).astype("int32") d = np.arange(2).astype("int32") xx = Tensor(x) (yy0,) = subtensor(xx, (slice(0, 4, 2), 3)) (yy1,) = set_subtensor(xx, d, (slice(0, 4, 2), 3)) (yy2,) = incr_subtensor(xx, d, (slice(0, 4, 2), 3)) np.testing.assert_equal(x[0:4:2, 3], yy0.numpy()) x_ = x.copy() x_[0:4:2, 3] = d np.testing.assert_equal(x_, yy1.numpy()) x_ = x.copy() x_[0:4:2, 3] += d np.testing.assert_equal(x_, yy2.numpy()) def test_advance_indexing(): x = np.arange(25).reshape(5, 5).astype("int32") d = np.arange(15).reshape(3, 5).astype("int32") xx = Tensor(x) (yy0,) = advance_indexing(xx, ((0, 4, 2), slice(None, None, None))) (yy1,) = set_advance_indexing(xx, d, ((0, 4, 2), slice(None, None, None))) (yy2,) = incr_advance_indexing(xx, d, ((0, 4, 2), slice(None, None, None))) np.testing.assert_equal(x[(0, 4, 2), :], yy0.numpy()) x_ = x.copy() x_[(0, 4, 2), :] = d np.testing.assert_equal(x_, yy1.numpy()) x_ = x.copy() x_[(0, 4, 2), :] += d np.testing.assert_equal(x_, yy2.numpy()) def test_mesh_indexing(): x = np.arange(25).reshape(5, 5).astype("int32") d = np.arange(6).reshape(3, 2).astype("int32") xx = Tensor(x) (yy0,) = mesh_indexing(xx, (slice(0, 5, 2), (1, 3))) (yy1,) = set_mesh_indexing(xx, d, (slice(0, 5, 2), (1, 3))) (yy2,) = incr_mesh_indexing(xx, d, (slice(0, 5, 2), (1, 3))) r = np.ndarray(shape=(3, 2), dtype="int32") for i0, i1 in enumerate(range(0, 5, 2)): for j0, j1 in enumerate((1, 3)): r[i0, j0] = x[i1, j1] np.testing.assert_equal(r, yy0.numpy()) r = x.copy() for i0, i1 in enumerate(range(0, 5, 2)): for j0, j1 in enumerate((1, 3)): r[i1, j1] = d[i0, j0] np.testing.assert_equal(r, yy1.numpy()) r = x.copy() for i0, i1 in enumerate(range(0, 5, 2)): for j0, j1 in enumerate((1, 3)): r[i1, j1] += d[i0, j0] np.testing.assert_equal(r, yy2.numpy()) def test_batched_mesh_indexing(): x = np.arange(24).reshape(2, 3, 4).astype("int32") d = np.arange(12).reshape(2, 2, 3).astype("int32") xx = Tensor(x) s = [(0, 1, 2), (1, 2, 3)] (yy0,) = batched_mesh_indexing(xx, (slice(None, None, None), [(0, 2)] * 2, s)) (yy1,) = batched_set_mesh_indexing( xx, d, (slice(None, None, None), [(0, 2)] * 2, s) ) (yy2,) = batched_incr_mesh_indexing( xx, d, (slice(None, None, None), [(0, 2)] * 2, s) ) r = np.ndarray(shape=(2, 2, 3), dtype="int32") for i in range(2): for j0, j1 in enumerate((0, 2)): for k0, k1 in enumerate(s[i]): r[i, j0, k0] = x[i, j1, k1] np.testing.assert_equal(r, yy0.numpy()) r = x.copy() for i in range(2): for j0, j1 in enumerate((0, 2)): for k0, k1 in enumerate(s[i]): r[i, j1, k1] = d[i, j0, k0] np.testing.assert_equal(r, yy1.numpy()) r = x.copy() for i in range(2): for j0, j1 in enumerate((0, 2)): for k0, k1 in enumerate(s[i]): r[i, j1, k1] += d[i, j0, k0] np.testing.assert_equal(r, yy2.numpy()) # high level def test_advance_indexing_high_level(): x = np.arange(25).reshape(5, 5).astype("int32") d = np.arange(15).reshape(3, 5).astype("int32") xx = Tensor(x) np.testing.assert_equal(x[1, :], xx[1, :].numpy()) np.testing.assert_equal(x[:, 1], xx[:, 1].numpy()) np.testing.assert_equal(x[1:3, :], xx[1:3, :].numpy()) np.testing.assert_equal(x[:, :], xx[:, :].numpy()) np.testing.assert_equal(x[1, 1], xx[1, 1].numpy()) yy = xx[(0, 4, 2), :] np.testing.assert_equal(x[(0, 4, 2), :], yy.numpy()) x_ = x.copy() x_[(0, 4, 2), :] = d xx_ = Tensor(xx) xx_[(0, 4, 2), :] = d np.testing.assert_equal(x_, xx_.numpy()) x = np.arange(27).reshape(3, 3, 3).astype("int32") xx = Tensor(x) np.testing.assert_equal(x[1, :, :], xx[1, :, :].numpy()) np.testing.assert_equal(x[1, :, 1], xx[1, :, 1].numpy()) np.testing.assert_equal(x[1, 0:1, :], xx[1, 0:1, :].numpy()) np.testing.assert_equal(x[0:1, 1, 1], xx[0:1, 1, 1].numpy()) np.testing.assert_equal(x[:, 1, 1], xx[:, 1, 1].numpy()) np.testing.assert_equal(x[:, 1], xx[:, 1].numpy()) np.testing.assert_equal(x[1, 1:2], xx[1, 1:2].numpy()) x_ = x.copy() x_[1, 1, 1] = -1 xx[1, 1, 1] = -1 np.testing.assert_equal(x_, xx.numpy()) x_[:, 1, 1] = -2 xx[:, 1, 1] = x_[:, 1, 1] np.testing.assert_equal(x_, xx.numpy()) x_[0:1, :, 1] = -3 xx[0:1, :, 1] = x_[0:1, :, 1] np.testing.assert_equal(x_, xx.numpy()) x_[0:1, :, 1] = -4 y = Tensor(x_) xx[0:1, :, 1] = y[0:1, :, 1] np.testing.assert_equal(y.numpy(), xx.numpy()) x[:] = 1 xx[:] = 1 np.testing.assert_equal(x, xx.numpy()) x = np.arange(9).reshape(3, 3).astype("int32") xx = Tensor(x) y = np.array([1, 2]) yy = Tensor(y) np.testing.assert_equal(x[:, y[0]], xx[:, y[0]].numpy()) np.testing.assert_equal(x[:, y[0]], xx[:, yy[0]].numpy()) np.testing.assert_equal(x[:, y], xx[:, y].numpy()) np.testing.assert_equal(x[:, y], xx[:, yy].numpy()) x_ = x.copy() x_[:, y[0]] = -1 xx_ = Tensor(x_) xx[:, yy[0]] = xx_[:, yy[0]] np.testing.assert_equal(x_, xx.numpy()) x_[:, y] = -1 xx_ = Tensor(x_) xx[:, yy] = xx_[:, yy] np.testing.assert_equal(x_, xx.numpy()) x = np.arange(9).reshape(3, 3).astype("int32") xx = Tensor(x) y = np.array([1]) yy = Tensor(y) np.testing.assert_equal(x[:, y[0]], xx[:, y[0]].numpy()) np.testing.assert_equal(x[:, y[0]], xx[:, yy[0]].numpy()) np.testing.assert_equal(x[:, y], xx[:, y].numpy()) np.testing.assert_equal(x[:, y], xx[:, yy].numpy()) x = np.arange(9).reshape(3, 3).astype("int32") xx = Tensor(x) np.testing.assert_equal(x[[0, 1], 0], xx[[0, 1], 0].numpy()) np.testing.assert_equal(x[0:2, 0], xx[0:2, 0].numpy()) def test_advance_indexing_with_bool(): a = np.arange(9).reshape(3, 3).astype(np.float32) b = np.array([1, 2, 3]) c = np.array([1, 2, 3]) aa = Tensor(a) bb = Tensor(b) cc = Tensor(c) np.testing.assert_equal(a[b == 1, c == 2], aa[bb == 1, cc == 2].numpy()) a[b == 1, c == 2] = -1.0 aa[bb == 1, cc == 2] = -1.0 np.testing.assert_equal(a, aa.numpy()) a = np.arange(9).reshape(3, 3).astype(np.float32) b = np.array([False, True, True]) c = np.array([2, 0]).astype(np.int32) aa = Tensor(a) bb = Tensor(b) cc = Tensor(c) np.testing.assert_equal(a[b, c], aa[bb, cc].numpy()) a[b, c] = -1.0 aa[bb, cc] = -1.0 np.testing.assert_equal(a, aa.numpy()) d = np.array([-1, -2], dtype=np.float32) dd = Tensor(d) a[b, c] = d aa[bb, cc] = dd np.testing.assert_equal(a, aa.numpy()) a = np.ones((2, 2)) b = np.array([[True, False], [False, True]]) aa = Tensor(a) bb = Tensor(b) np.testing.assert_equal(a[b], aa[bb].numpy()) b[:] = True bb[:] = True np.testing.assert_equal(a[b], aa[bb].numpy()) np.testing.assert_equal(a[:, [True, False]], aa[:, [True, False]].numpy()) a = np.array([[True, False], [False, True]]) b = np.array([1]) aa = Tensor(a) bb = Tensor(b) np.testing.assert_equal(a[b], aa[bb].numpy()) b = np.array([[True, True], [False, True]]) bb = Tensor(b) np.testing.assert_equal(a[b], aa[bb].numpy()) a[b] = False aa[bb] = False np.testing.assert_equal(a, aa.numpy()) # XXX: trace does not expect empty condtake tensor if not use_symbolic_shape(): a = np.ones((2, 2), dtype=np.int32) b = np.array([[False, False], [False, False]]) aa = Tensor(a) bb = Tensor(b) np.testing.assert_equal(a[b], aa[b].numpy()) np.testing.assert_equal(a[b], aa[bb].numpy()) b = np.array([False, False]) bb = Tensor(b) np.testing.assert_equal(a[b], aa[bb].numpy().reshape(a[b].shape)) # FIXME a = np.arange(576).reshape(2, 3, 4, 3, 4, 2).astype("int32") aa = Tensor(a) b = (np.random.sample((2, 3, 4)) > 0.5).astype("bool") bb = Tensor(b) np.testing.assert_equal(a[b, :, 0:4:2], aa[bb, :, 0:4:2].numpy()) b = (np.random.sample((4, 3, 4)) > 0.5).astype("bool") bb = Tensor(b) np.testing.assert_equal(a[..., b, 0:2], aa[..., bb, 0:2].numpy()) b = (np.random.sample((3, 4, 3)) > 0.5).astype("bool") bb = Tensor(b) np.testing.assert_equal( a[:, b, 0:2, [True, False]], aa[:, bb, 0:2, [True, False]].numpy() )
nilq/baby-python
python
import json import pathlib import os print("Please enter the input path to the filepath you want to use for Mistos") print("We will create a folder called 'Mistos' there. It contains your input and output directory") path = input() is_dir = False while is_dir == False: path = pathlib.Path(path) if path.is_dir(): is_dir = True else: print("Path is not valid. Make sure to enter a correct filepath (e.g. 'C:/Users/tlux1/Desktop')") path = input() mistos_path = path.joinpath("Mistos") export_path = mistos_path.joinpath("export") fileserver_path = mistos_path.joinpath("fileserver") os.mkdir(mistos_path) os.mkdir(export_path) os.mkdir(fileserver_path) config = { "EXPORT_DIRECTORY": export_path.as_posix(), "WORKING_DIRECTORY": fileserver_path.as_posix() } with open("config.json", "w") as _file: json.dump(config, _file) print("Success! Start Mistos by running the 'mistos_start.bat' script.")
nilq/baby-python
python
"Tests for presto.map" import unittest as ut from presto.map import System, Constellation, Region class TestMap(ut.TestCase): def test_map(self): "Basic map data functionality test" stacmon = System.by_name("Stacmon") self.assertTrue(stacmon) self.assertEqual(len(list(stacmon.neighbors())), 5) self.assertTrue("Ostingele" in {n.name for n in stacmon.neighbors()}) self.assertEqual(stacmon.region.name, "Placid") self.assertEqual(stacmon.constellation.name, "Fislipesnes") fislipesnes = Constellation.by_name("Fislipesnes") placid = Region.by_name("Placid") self.assertEqual(fislipesnes, stacmon.constellation) self.assertEqual(placid, stacmon.region) self.assertEqual(len(stacmon.region.systems), 71) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
import logging import smores.medkit as medkit failures = [] def rxnorm_ingredient(rxcui, expect): _return_check=False _med_key_check=None _ing_key_check=None _overall=False _failures=[] ingredients = medkit.get_ingredients(rxcui, 'RXNORM') if ingredients is not None: if len(ingredients) > 0: if type(ingredients[0]) is dict: _return_check = True else: _failures.append('Return Check: Bad Return Type') if _return_check: default_keys = ['rxcui','tty','name','ingredients'] med_1_keys = list(ingredients[0].keys()) i=0 while True: if i == len(med_1_keys)-1: break elif default_keys[i] not in med_1_keys: _failures.append('Med Check Failure: {0}'.format(default_keys[i])) if _med_key_check: _med_key_check = False i += 1 continue else: _med_key_check = True if _ing_key_check is None else False i += 1 continue ing_1_keys = list(ingredients[0]['ingredients'][0].keys()) ing_default_keys = ['rxcui', 'tty', 'name'] j=0 while True: if j == len(ing_1_keys)-1: break elif ing_default_keys[j] not in ing_1_keys: _failures.append('Med ING Check Failure: {0}'.format(ing_default_keys[j])) if _ing_key_check: _ing_key_check = False j += 1 continue else: _ing_key_check = True if _ing_key_check is None or _ing_key_check else _ing_key_check j += 1 continue else: _failures.append('RxNav Check of {0} Failed All Checks'.format(rxcui)) if _return_check and _med_key_check and _ing_key_check: _overall = True failures.append('{0}: {1} '.format(rxcui, _failures)) if len(_failures) > 0 else None if _overall == expect: print('RxNav Check of {0} Produced Expected Result of {1}'.format(rxcui, expect)) else: print('RxNav Check of {0} Produced Unexpected Result of {1}'.format(rxcui, _overall)) if __name__ == "__main__": console = logging.StreamHandler() console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(funcName)-20s: %(levelname)-8s %(message)s') console.setFormatter(formatter) rxnormLog = logging.getLogger('rxnorm') smoresLog = logging.getLogger('smores') rxnormLog.addHandler(console) smoresLog.addHandler(console) rxnorm_ingredient('209387', True) rxnorm_ingredient('206410', False) rxnorm_ingredient('161', True) print(failures)
nilq/baby-python
python
# Generated by Django 2.1.3 on 2018-12-01 22:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Eprint_users', '0011_auto_20181130_0119'), ] operations = [ migrations.AlterField( model_name='profile', name='image', field=models.ImageField(default='default.png', upload_to='media/profilepics'), ), ]
nilq/baby-python
python
#!/usr/bin/env python """ Loop over a list of blog post src filenames and generate a blog index markdown file. """ import sys import os.path from datetime import datetime from utils import parse_metadata POST_TEMPLATE = """ --- ## [{title}]({htmlname}) ### {subtitle} {description} _{datestr}_ | [Read more...]({htmlname}) """ def post_index(filenames): for file in sorted(filenames,reverse=True): path,name = os.path.split(file) htmlname = file[4:-3] + '.html' with open(file,'r') as f: md = parse_metadata(f.read()) #DATESTR md['datestr'] = str(datetime.strptime(name[:10],'%Y-%m-%d').date()) if 'subtitle' not in md: md['subtitle'] = '' print(POST_TEMPLATE.format(htmlname=htmlname,**md)) if __name__=='__main__': post_index(sys.argv[1:])
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.http import HttpResponse from django.shortcuts import render def home(request): """return HttpResponse('<h1>Hello, Welcome to this test</h1>')""" """Le chemin des templates est renseigne dans "DIRS" de "TEMPLATES" dans settings.py DONC PAS BESOIN DE RENSEIGNER LE CHEMIN ABSOLU""" return render(request, "index.html") def us(request): return render(request, "us.html") def algos(request): return render(request, "algos_explanation.html") def breastCancer(request): return render(request, "breastCancer.html") def handler404(request, exception): return render(request, "errors/404.html") def handler500(request): return render(request, "errors/500.html")
nilq/baby-python
python
#!/usr/bin/env python # Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org> # # This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details. """ Description here """ import logging as log import networkx as nx import madsenlab.axelrod.utils.configuration import numpy as np import math as m import pprint as pp import matplotlib.pyplot as plt from numpy.random import RandomState ################################################################################### class BaseGraphPopulation(object): """ Base class for all Axelrod model populations that use a graph (NetworkX) representation to store the relations between agents. Methods here need to be independent of the trait representation, but can assume that the agents are nodes in a Graph. Thus, most of the "agent selection" and "neighbor" methods are concentrated here. """ def __init__(self,simconfig,graph_factory,trait_factory): self.simconfig = simconfig self.interactions = 0 self.innovations = 0 self.losses = 0 self.time_step_last_interaction = 0 self.prng = RandomState() # allow the library to choose a seed via OS specific mechanism self.graph_factory = graph_factory self.trait_factory = trait_factory # initialize the graph structure via the factory object self.agentgraph = self.graph_factory.get_graph() def get_agent_by_id(self, agent_id): return (agent_id, self.agentgraph.node[agent_id]['traits']) def get_random_agent(self): """ Returns a random agent chosen from the population, in the form of a tuple of two elements (node_id, array_of_traits). This allows operations on the agent and its traits without additional calls. To modify the traits, change one or more elements in the array, and then call set_agent_traits(agent_id, new_list) """ rand_agent_id = self.prng.randint(0, self.simconfig.popsize) return self.get_agent_by_id(rand_agent_id) def get_random_neighbor_for_agent(self, agent_id): """ Returns a random agent chosen from among the neighbors of agent_id. The format is the same as get_random_agent -- a two element tuple with the neighbor's ID and their trait list. """ neighbor_list = self.agentgraph.neighbors(agent_id) num_neighbors = len(neighbor_list) rand_neighbor_id = neighbor_list[self.prng.randint(0,num_neighbors)] return self.get_agent_by_id(rand_neighbor_id) def get_all_neighbors_for_agent(self, agent_id): return self.agentgraph.neighbors(agent_id) def get_coordination_number(self): return self.graph_factory.get_lattice_coordination_number() def update_interactions(self, timestep): self.interactions += 1 self.time_step_last_interaction = timestep def update_innovations(self): self.innovations += 1 def update_loss_events(self): self.losses += 1 def get_time_last_interaction(self): return self.time_step_last_interaction def get_interactions(self): return self.interactions def get_innovations(self): return self.innovations def get_losses(self): return self.losses def initialize_population(self): self.trait_factory.initialize_population(self.agentgraph) ### Abstract methods - derived classes need to override def draw_network_colored_by_culture(self): raise NotImplementedError def get_traits_packed(self,agent_traits): raise NotImplementedError def set_agent_traits(self, agent_id, trait_list): raise NotImplementedError ################################################################################### class TreeTraitStructurePopulation(BaseGraphPopulation): """ Base class for all Axelrod models which feature a non-fixed number of features/traits per individual where traits are encoded as paths in a tree. """ def __init__(self, simconfig,graph_factory,trait_factory): super(TreeTraitStructurePopulation, self).__init__(simconfig,graph_factory,trait_factory) def set_agent_traits(self, agent_id, trait_set): self.agentgraph.node[agent_id]['traits'] = trait_set def get_traits_packed(self,agent_traits): hashable_set = frozenset(agent_traits) return hash(hashable_set) def draw_network_colored_by_culture(self): nodes, traits = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items()) nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items()) color_tupled_compressed = [self.get_traits_packed(t) for t in traits] nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed) plt.show() # EXPLICIT OVERRIDE OF BASE CLASS METHOD! def initialize_population(self): """ For semantically structured traits, since the traits are not just random integers, we need to have a copy of the trait "universe" -- i.e., all possible traits and their relations. So we initialize the trait universe first, and then allow the trait factory to initialize our starting population on the chosen population structure. """ self.trait_universe = self.trait_factory.initialize_traits() self.trait_factory.initialize_population(self.agentgraph) def __repr__(self): rep = 'TreeTraitStructurePopulation: [' for nodename in self.agentgraph.nodes(): rep += "node %s: " % nodename rep += pp.pformat(self.agentgraph.node[nodename]['traits']) rep += ",\n" rep += ' ]' return rep ################################################################################### class ExtensibleTraitStructurePopulation(BaseGraphPopulation): """ Base class for all Axelrod models which feature a non-fixed number of features/traits per individual. """ def __init__(self, simconfig,graph_factory,trait_factory): super(ExtensibleTraitStructurePopulation, self).__init__(simconfig,graph_factory, trait_factory) def set_agent_traits(self, agent_id, trait_set): self.agentgraph.node[agent_id]['traits'] = trait_set def get_traits_packed(self,agent_traits): hashable_set = frozenset(agent_traits) return hash(hashable_set) def draw_network_colored_by_culture(self): nodes, traits = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items()) nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items()) color_tupled_compressed = [self.get_traits_packed(t) for t in traits] nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed) plt.show() ################################################################################### class FixedTraitStructurePopulation(BaseGraphPopulation): """ Base class for all Axelrod models with a fixed number of features and number of traits per feature. Specifies no specific graph, lattice, or network model, but defines operations usable on any specific model as long as the graph is represented by the NetworkX library and API. Agents are given by nodes, and edges define "neighbors". Important operations on a model include choosing a random agent, finding a random neighbor, updating an agent's traits, and updating statistics such as the time the last interaction occurred (which is used to know when (or if) we've reached a fully absorbing state and can stop. Subclasses should ONLY implement an __init__ method, in which self.model is assigned an instance of a """ def __init__(self, simconfig,graph_factory, trait_factory): super(FixedTraitStructurePopulation, self).__init__(simconfig, graph_factory, trait_factory) def draw_network_colored_by_culture(self): nodes, colors = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items()) nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items()) color_tupled_compressed = [int(''.join(str(i) for i in t)) for t in colors] nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed) plt.show() def get_traits_packed(self,agent_traits): return ''.join(str(i) for i in agent_traits) def set_agent_traits(self, agent_id, trait_list): """ Stores a modified version of the trait list for an agent. """ #old_traits = self.model.node[agent_id]['traits'] self.agentgraph.node[agent_id]['traits'] = trait_list #new_traits = self.model.node[agent_id]['traits'] #log.debug("setting agent %s: target traits: %s old: %s new: %s", agent_id, trait_list, old_traits, new_traits)
nilq/baby-python
python
#!/usr/bin/env python3 # coding:utf-8 class Solution: def maxInWindows(self, num, size): if num == []: return [] if len(num) < size: return [max(num)] res = [] queue = num[:size] res.append(max(queue)) for i in range(size, len(num)): queue.pop(0) queue.append(num[i]) res.append(max(queue)) return res if __name__ == "__main__": nums = [2, 3, 4, 2, 6, 2, 5, 1] size = 3 s = Solution() ans = s.maxInWindows(nums, size) print(ans)
nilq/baby-python
python
__all__=["greeters"] # *** # *** Use __init__.py to expose different parts of the submodules in the desired namespace # *** # *** Define what can be seen in the main "skeleton." namespace (as this is skeleton/__init__.py) like this: # from .greeters.fancy import * # now you can do: from skeleton import FancyHelloWorld from valkka.skeleton.greeters.fancy import * # relative imports are evil, so use this instead # *** Be aware that that in "skeleton.greeters" a list __all__ has been defined. It declares what is exposed to the API user when calling "fro skeleton.greeters.fancy import *" # *** We could declare the API exposure here as well, by being more explicit: # from skeleton.greeters.fancy import FancyHelloWorld # *** If you want to keep FancyHelloWorld under the "greeters.fancy." namespace, don't add ".. import *" statements to this file # *** The idea is, that the submodules have "internal hierarchies" that the API user is not supposed to worry with # *** and he/she access them simply with "from skeleton import ClassName" from valkka.skeleton.greeters.cool.cool1 import * from valkka.skeleton.greeters.cool.cool2 import * from valkka.skeleton.version import * __version__=str(VERSION_MAJOR)+"."+str(VERSION_MINOR)+"."+str(VERSION_PATCH)
nilq/baby-python
python
import iota_client # client will connect to testnet by default client = iota_client.Client() print(client.get_info())
nilq/baby-python
python
from django.apps import AppConfig class SiteAdocaoConfig(AppConfig): name = 'site_adocao'
nilq/baby-python
python
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os import shutil as sh import filecmp as fc import pathlib as pth import collections as co from hashlib import blake2b from .log import Logger from .base import config from .counter import counters from .resource import Resource, resource, Names log = Logger(__name__) def calc_digest(path, *, base=None, **_): p = base / path if base else pth.Path(path) if p.exists(): d, s = blake2b(digest_size=20), 0 with open(p, 'rb') as f: for b in iter(lambda: f.read(65536), b''): s += len(b) d.update(b) assert s == p.stat().st_size return d.hexdigest(), s log.warning("Cant't digest nonexistent file {}", p) return None, None class Entry(co.namedtuple('Entry', 'path digest size')): __slots__ = () def __new__(cls, path, digest=None, size=None, **kw): if not digest: digest, size = calc_digest(path, **kw) return super().__new__(cls, path, digest, size) def __bool__(self): return bool(self.path and self.digest is not None and self.size is not None) __hash__ = None def __eq__(self, other): if isinstance(other, type(self)): d = self.digest return (d and d == other.digest and self.size == other.size) return NotImplemented def __repr__(self): s = "{}({!r}".format(type(self).__name__, str(self.path)) d = self.digest if d: s += ", {!r}, {}".format(d, self.size) s += ")" return s def relative_to(self, path, base, **_): try: (base / self.path).relative_to(base / path) except ValueError: return False return True def check(self, **kw): d = self.digest if d: d2, s = calc_digest(self.path, **kw) if d2 == d and s == self.size: return True m = 'Mismatched digest for {}' else: m = 'No digest for {}' log.info(m, self.path) return False def prune_dir(path, cntr=None, **_): with os.scandir(path) as es: for e in es: p = pth.Path(e.path) j = None if p.name.startswith('.'): if e.is_dir(follow_symlinks=False): sh.rmtree(str(p)) elif p.suffix != '.qnr': p.unlink() log.info('Deleted {}', p) j = '-' elif e.is_dir(follow_symlinks=False): prune_dir(p, cntr) continue if cntr: cntr.incr(j) try: path.rmdir() log.info('Deleted {}', path) j = '-' except: j = None if cntr: cntr.incr(j) class Roster(Resource): _res_path = '.roster.qnr' @classmethod def globals(cls): return globals() def __init__(self, entries=None, **kw): super().__init__(None, **kw) self._expels = [] self._symlinks = [] if entries: self.add_entry(entries) def __repr__(self): return '{}({!r})'.format(type(self).__name__, tuple(self.entries)) def __str__(self): s = '{}:'.format(str(self.base)) for e in self.entries: s += '\n{} {} {}'.format(str(e.path), str(e.digest), e.size) return s @property def entries(self): es = [e for e in self.values() if isinstance(e, Entry)] return sorted(es, key=lambda x: x.path) def adjust_kw(self, kw): def _adjust(key, default): v = kw.get(key) v = pth.Path(v) if v else default kw[key] = v _adjust('base', self.base) def entry_adder(self, entry, cntr, modify=False, expel=True, **kw): if isinstance(entry, Entry): assert entry p, d, s = entry k = d, s if p in self: ok = self[p] if k != ok: if modify: log.info('Modifying digest for {}', p) del self[ok] self[p] = k self[k] = entry cntr.incr(modify) return else: log.warning('Digest mismatch for {}', p) cntr.incr() else: try: o = self[k] except KeyError: self[p] = k self[k] = entry yield p else: log.info('Duplicates: {} and {}', o.path, p) if expel: self._expels.append((o, entry)) cntr.incr() else: for e in entry: yield from self.entry_adder(e, cntr, modify, expel, **kw) add_args = ((('scanned', '.'), ('added', '+')), 'Adding:') def add_entry(self, entry, **kw): with counters(self.add_args, kw) as cs: for _ in self.entry_adder(entry, **kw): cs.incr('+') return cs def path_adder(self, path, **kw): self.adjust_kw(kw) p = str(pth.Path(path).relative_to(kw['base'])) yield from self.entry_adder(Entry(p, **kw), **kw) def walker(self, paths=(), **kw): for e in self.entries: if paths: for p in paths: if e.relative_to(p, **kw): break else: continue yield e def scanner(self, root, cntr, **kw): def _paths(path): with os.scandir(path) as es: for e in es: p = pth.Path(e.path) if not p.name.startswith('.'): if e.is_dir(follow_symlinks=False): yield from _paths(p) continue elif e.is_file(follow_symlinks=False): yield p continue elif e.is_symlink(): log.info('Symlink {}', p) self._symlinks.append(p) else: log.info('Ignoring dir entry {}', p) cntr.incr() if root.exists(): for p in _paths(root): yield from self.path_adder(p, **kw, cntr=cntr) scan_args = ((('scanned', '.'), ('added', '+')), 'Scanning:') def scan(self, paths=(), **kw): self.adjust_kw(kw) b = kw['base'] with counters(self.scan_args, kw) as cs: for p in paths or ('', ): for _ in self.scanner(b / p, **kw): cs.incr('+') return cs rescan_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'), ('modified', 'm')), 'Rescanning:') def rescanner(self, paths, cntr, **kw): self.adjust_kw(kw) b = kw['base'] es = [e for e in self.walker(paths, **kw) if not (b / e.path).exists()] for p, d, s in es: del self[p] del self[(d, s)] cntr.incr('-') self._expels = [] for p in paths or ('', ): for p in self.scanner(b / p, **kw, cntr=cntr, modify='m'): yield p def rescan(self, paths=(), **kw): with counters(self.rescan_args, kw) as cs: for _ in self.rescanner(paths, **kw): cs.incr('+') return cs check_args = ((('passed', '.'), ('failed', 'F')), 'Checking:') def check(self, paths=(), **kw): self.adjust_kw(kw) with counters(self.check_args, kw) as cs: for e in self.walker(paths, **kw): cs.incr('.' if e.check(**kw) else 'F') return cs def check_ok(self, paths=(), **kw): return not self.check(paths, **kw)['F'] def rename_path(self, src, dst, cntr, cntr_key=None, **_): if dst.exists(): log.warning("Can't move/rename, destination exists {}", dst) cntr.incr('F') else: dst.parent.mkdir(parents=True, exist_ok=True) src.rename(dst) log.info('Moved/renamed {} to/as {}', src, dst) cntr.incr(cntr_key) expel_args = ((('scanned', '.'), ('expelled', 'e'), ('failed', 'F')), 'Expelling:') def expel(self, ebase=None, **kw): with counters(self.expel_args, kw) as cs: self.adjust_kw(kw) b = kw['base'] for o, d in self._expels: op = b / o.path dp = b / d.path if fc.cmp(op, dp, shallow=False): e = (ebase or (b.parent / 'expel')) / d.path self.rename_path(dp, e, **kw, cntr_key='e') else: log.error('Duplicates compare failed {}, {}', op, dp) cs.incr('F') self._expels = [] return cs def absorb_paths(self, paths=(), abase=None, **kw): self.adjust_kw(kw) b = kw['base'] ab = abase or (b.parent / 'absorb') for p in paths or ('', ): p = ab / p if p.exists(): yield b, ab, p absorb_args = ((('scanned', '.'), ('absorbed', 'a'), ('failed', 'F')), 'Absorbing:') def absorb(self, paths=(), abase=None, **kw): with counters(self.absorb_args, kw) as cs: kw['expel'] = False for b, ab, path in self.absorb_paths(paths, abase, **kw): for p in [p for p in self.scanner(path, **kw, base=ab)]: self.rename_path(ab / p, b / p, **kw, cntr_key='a') prune_dir(path) return cs prune_args = ((('scanned', '.'), ('deleted', '-')), 'Pruning:') def prune(self, paths=(), abase=None, **kw): with counters(self.prune_args, kw) as cs: for _, ab, p in self.absorb_paths(paths, abase, **kw): prune_dir(p, **kw) return cs def namer(self, path, names, base, cntr, **_): p = str(path) if p not in names: if (base / path).exists(): names[p] = np = p.lower().replace(' ', '-') cntr.incr('.' if p == np else 'n') path = path.parent if path.name: self.namer(path, names, base, cntr) else: cntr.incr('F') names_args = ((('scanned', '.'), ('renamed', 'r'), ('normalized', 'n'), ('failed', 'F')), 'Naming:') def names(self, paths=(), **kw): with counters(self.names_args, kw) as cs: self.adjust_kw(kw) with resource(Names.create(kw['base'])) as ns: ns.clear() for e in self.walker(paths, **kw): self.namer(pth.Path(e.path), ns, **kw) return cs rename_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'), ('modified', 'm'), ('normalized', 'n'), ('renamed', 'r'), ('failed', 'F')), 'Renaming:') def rename(self, paths=(), **kw): with counters(self.rename_args, kw) as cs: self.adjust_kw(kw) b = kw['base'] with resource(Names.create(b)) as ns: if ns: for e in self.walker(paths, **kw): p = e.path try: d = b / ns.pop(p) except KeyError: cs.incr() continue self.rename_path(b / p, d, **kw, cntr_key='r') ps = paths or ('', ) for o in sorted(ns.keys(), reverse=True): d = b / ns.pop(o) o = b / o if o.exists() and o.is_dir(): for p in ps: try: o.relative_to(b / p) break except ValueError: continue else: cs.incr() continue self.rename_path(o, d, **kw, cntr_key='r') else: cs.incr() for p in self.rescanner(paths, **kw): self.namer(pth.Path(p), ns, **kw) return cs if __name__ == '__main__': from .args import BArgs a = BArgs() a.add_argument('paths', nargs='*', help='Paths to follow') a.add_argument('-u', '--prune', action=a.st, help='Prune absorb dir') a.add_argument('-a', '--absorb', help='Path to absorb uniques from') a.add_argument('-x', '--rename', action=a.st, help='Rename files') a.add_argument('-R', '--rescan', action=a.st, help='Rescan base') a.add_argument('-s', '--scan', action=a.st, help='Scan base') a.add_argument('-e', '--expel', help='Path to expel duplicates to') a.add_argument('-c', '--check', action=a.st, help='Check all digests') a.add_argument('-n', '--names', action=a.st, help='Names of files') a = a.parse_args() r = Roster.create(a.base) if a.prune: abase = None if a.absorb is None or a.absorb == config.DEFAULT else a.absorb r.prune(a.paths, abase=abase) elif a.absorb: abase = None if a.absorb == config.DEFAULT else a.absorb r.absorb(a.paths, abase=abase) elif a.rename: r.rename(a.paths) else: if a.rescan: r.rescan(a.paths) elif a.scan: r.scan(a.paths) if a.expel: ebase = None if a.expel == config.DEFAULT else a.expel r.expel(ebase=ebase) if a.check: r.check_ok(a.paths) if a.names: r.names(a.paths) r.save()
nilq/baby-python
python
import aita if __name__ == "__main__": # Development, Testing, Production app = aita.create_app('Development') app.run()
nilq/baby-python
python
import json from logging import root import os import warnings from skimage.color import rgb2lab, gray2rgb, rgba2rgb from skimage.util import img_as_float import numpy as np import numpy.typing as npt import torch from torch.utils.data import DataLoader import torch.optim as optim import torch.nn as nn from torchvision.models import vgg16_bn from torchvision.transforms import Resize from sklearn.metrics import f1_score, precision_recall_fscore_support, cohen_kappa_score, confusion_matrix from sklearn import svm from sklearn.cluster import MiniBatchKMeans from sklearn.model_selection import train_test_split from scipy.spatial.distance import cdist import joblib from termcolor import colored import math from math import floor from collections import OrderedDict from skimage.color import lab2rgb from ..models.lcn import LCNCreator, MarkerBasedNorm2d, MarkerBasedNorm3d, LIDSConvNet from ._dataset import LIDSDataset from PIL import Image import nibabel as nib import re ift = None try: import pyift.pyift as ift except: warnings.warn("PyIFT is not installed.", ImportWarning) def load_image(path: str, lab: bool=True) -> np.ndarray: if path.endswith('.mimg'): image = load_mimage(path) elif path.endswith('.nii.gz') or path.endswith('.nii.gz'): image = np.asanyarray(nib.load(path).dataobj) else: image = np.asarray(Image.open(path)) if lab: if image.ndim == 3 and image.shape[-1] == 4: image = rgba2rgb(image) elif image.ndim == 2 or image.shape[-1] == 1: image = gray2rgb(image) elif image.ndim == 3 and image.shape[-1] > 4: image = gray2rgb(image) elif image.ndim == 4 and image.shape[-1] == 4: image = rgba2rgb(image) image = rgb2lab(image) if image.dtype != float: image = img_as_float(image) return image def image_to_rgb(image): warnings.warn("'image_to_rgb' will be remove due to its misleading name. " + "Use 'from_lab_to_rgb' instead", DeprecationWarning, stacklevel=2 ) return from_lab_to_rgb(image) def from_lab_to_rgb(image): image = lab2rgb(image) return image def load_markers(markers_dir): markers = [] lines = None with open(markers_dir, 'r') as f: lines = f.readlines() label_infos = [int(info) for info in lines[0].split(" ")] is_2d = len(label_infos) == 3 if is_2d: image_shape = (label_infos[2], label_infos[1]) else: image_shape = (label_infos[2], label_infos[1], label_infos[3]) markers = np.zeros(image_shape, dtype=np.int) for line in lines[1:]: split_line = line.split(" ") if is_2d: y, x, label = int(split_line[0]), int(split_line[1]), int(split_line[3]) markers[x][y] = label else: x, y, z, label = int(split_line[0]), int(split_line[1]), int(split_line[3]), int(split_line[4]) markers[x][y][z] = label return markers def load_images_and_markers(path): dirs = os.listdir(path) images_names = [filename for filename in dirs if not filename.endswith('.txt')] makers_names = [filename for filename in dirs if filename.endswith('.txt')] images_names.sort() makers_names.sort() images = [] images_markers = [] for image_name, marker_name in zip(images_names, makers_names): if image_name.endswith('.npy'): image = np.load(os.path.join(path, image_name)) else: image = load_image(os.path.join(path, image_name)) markers = load_markers(os.path.join(path, marker_name)) images.append(image) images_markers.append(markers) return np.array(images), np.array(images_markers) def _convert_arch_from_lids_format(arch): stdev_factor = arch['stdev_factor'] n_layers = arch['nlayers'] n_arch = { "type": "sequential", "layers": {} } for i in range(1, n_layers + 1): layer_name = f"layer{i}" layer_params = arch[layer_name] kernel_size = layer_params['conv']['kernel_size'] is3d = kernel_size[2] > 0 end = 3 if is3d else 2 dilation_rate = layer_params['conv']['dilation_rate'][:end] kernel_size = kernel_size[:end] m_norm_layer = { "operation": "m_norm3d" if is3d else "m_norm2d", "params": { "kernel_size": kernel_size, "dilation": dilation_rate, "default_std": stdev_factor } } conv_layer = { "operation": "conv3d" if is3d else "conv2d", "params": { "kernel_size": kernel_size, "dilation": dilation_rate, "number_of_kernels_per_marker": layer_params['conv']['nkernels_per_image'], "padding": [k_size // 2 for k_size in kernel_size], "out_channels": layer_params['conv']['noutput_channels'], "stride": 1 } } relu_layer = None if layer_params['relu']: relu_layer = { "operation": "relu", "params": { "inplace": True } } pool_type_mapping = { "max_pool2d": "max_pool2d", "avg_pool2d": "avg_pool2d", "max_pool3d": "max_pool3d", "avg_pool3d": "avg_pool3d", "no_pool": None } pool_type = layer_params['pooling']['type'] if is3d and pool_type != "no_pool": pool_type += "3d" elif pool_type != "no_pool": pool_type += "2d" assert pool_type in pool_type_mapping, f"{pool_type} is not a supported pooling operation" if pool_type == "no_pool": pool_layer = None else: pool_kernel_size = layer_params['pooling']['size'][:end] pool_layer = { "operation": pool_type_mapping[pool_type], "params": { "kernel_size": pool_kernel_size, "stride": layer_params['pooling']['stride'], "padding": [k_size // 2 for k_size in pool_kernel_size] } } n_arch['layers'][f'm-norm{i}'] = m_norm_layer n_arch['layers'][f'conv{i}'] = conv_layer if relu_layer: n_arch['layers'][f'activation{i}'] = relu_layer if pool_layer: n_arch['layers'][f'pool{i}'] = pool_layer return { "features": n_arch } def load_architecture(architecture_dir): path = architecture_dir with open(path) as json_file: architecture = json.load(json_file) if 'nlayers' in architecture: architecture = _convert_arch_from_lids_format(architecture) return architecture def configure_dataset(dataset_dir, split_dir, transform=None): dataset = LIDSDataset(dataset_dir, split_dir, transform) return dataset def build_model(architecture, images=None, markers=None, input_shape=None, batch_size=32, train_set=None, remove_border=0, relabel_markers=True, default_std=1e-6, device='cpu', verbose=False): creator = LCNCreator(architecture, images=images, markers=markers, input_shape=input_shape, batch_size=batch_size, relabel_markers=relabel_markers, remove_border=remove_border, default_std=default_std, device=device) if verbose: print("Building model...") creator.build_model(verbose=verbose) model = creator.get_LIDSConvNet() if verbose: print("Model ready.") return model def get_torchvision_model(model_name, number_classes, pretrained=True, device='cpu'): model = None if model_name == "vgg16_bn": if pretrained: model = vgg16_bn(pretrained=pretrained) model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, number_classes), ) for m in model.classifier.modules(): if isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) else: model = vgg16_bn(num_classes=number_classes, init_weights=True) model.to(device) return model def train_mlp(model, train_set, epochs=30, batch_size=64, lr=1e-3, weight_decay=1e-3, criterion=nn.CrossEntropyLoss(), device='cpu'): dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=False) model.to(device) model.feature_extractor.eval() model.classifier.train() #optimizer optimizer = optim.Adam(model.classifier.parameters(), lr=lr, weight_decay=weight_decay) #training print(f"Training classifier for {epochs} epochs") for epoch in range(0, epochs): print('-' * 40) print('Epoch {}/{}'.format(epoch, epochs - 1)) running_loss = 0.0 running_corrects = 0.0 for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) preds = torch.max(outputs, 1)[1] loss.backward() #clip_grad_norm_(self.mlp.parameters(), 1) optimizer.step() #print(outputs) running_loss += loss.item()*inputs.size(0)/len(train_set) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss epoch_acc = running_corrects.double()/len(train_set) print('Loss: {:.6f} Acc: {:.6f}'.format(epoch_loss, epoch_acc)) def train_model(model, train_set, epochs=30, batch_size=64, lr=1e-3, weight_decay=1e-3, step=0, loss_function=nn.CrossEntropyLoss, device='cpu', ignore_label=-100, only_classifier=False, wandb=None): #torch.manual_seed(42) #np.random.seed(42) #if device != 'cpu': # torch.backends.cudnn.deterministic = True dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=False) model.to(device) model.eval() criterion = loss_function(ignore_index=ignore_label) parameters = [] if not only_classifier: model.feature_extractor.train() parameters.append({ "params": model.feature_extractor.parameters(), "lr": lr, "weight_decay": weight_decay }) model.classifier.train() parameters.append({ "params": model.classifier.parameters(), "lr": lr, "weight_decay": weight_decay }) #optimizer optimizer = optim.Adam(parameters) if step > 0: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step, gamma=0.1) #training print(f"Training classifier for {epochs} epochs") for epoch in range(0, epochs): print('-' * 40) print('Epoch {}/{}'.format(epoch, epochs - 1)) running_loss = 0.0 running_corrects = 0.0 n = 0 for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) preds = torch.max(outputs, 1)[1] loss.backward() if epoch < 3: nn.utils.clip_grad_norm_(model.parameters(), .1) else: nn.utils.clip_grad_norm_(model.parameters(), 1) optimizer.step() #print(outputs) mask = labels != ignore_label running_loss += loss.item()*(mask.sum()) running_corrects += torch.sum(preds[mask] == labels[mask].data) n += (mask).sum() if step > 0: scheduler.step() epoch_loss = running_loss/n epoch_acc = (running_corrects.double())/n if wandb: wandb.log({"loss": epoch_loss, "train-acc": epoch_acc}, step=epoch) print('Loss: {:.6f} Acc: {:.6f}'.format(epoch_loss, epoch_acc)) #if epoch_acc >= 0.9900000: # break def save_model(model, outputs_dir, model_filename): if not os.path.exists(outputs_dir): os.makedirs(outputs_dir) dir_to_save = os.path.join(outputs_dir, model_filename) print("Saving model...") torch.save(model.state_dict(), dir_to_save) def load_model(model_path, architecture, input_shape, remove_border=0, default_std=1e-6): state_dict = torch.load(model_path, map_location=torch.device('cpu')) creator = LCNCreator(architecture, input_shape=input_shape, remove_border=remove_border, default_std=default_std, relabel_markers=False) print("Loading model...") creator.load_model(state_dict) model = creator.get_LIDSConvNet() return model def load_torchvision_model_weights(model, weigths_path): state_dict = torch.load(weigths_path, map_location=torch.device('cpu')) model.load_state_dict(state_dict) return model def load_weights_from_lids_model(model, lids_model_dir): print("Loading LIDS model...") for name, layer in model.named_children(): print(name) if isinstance(layer, MarkerBasedNorm2d): conv_name = name.replace('m-norm', 'conv') with open(os.path.join(lids_model_dir, f"{conv_name}-mean.txt")) as f: lines = f.readlines()[0] mean = np.array([float(line) for line in lines.split(' ') if len(line) > 0]) with open(os.path.join(lids_model_dir, f"{conv_name}-stdev.txt")) as f: lines = f.readlines()[0] std = np.array([float(line) for line in lines.split(' ') if len(line) > 0]) layer.mean_by_channel = torch.from_numpy(mean).float() layer.std_by_channel = torch.from_numpy(std).float() if isinstance(layer, nn.Conv2d): if os.path.exists(os.path.join(lids_model_dir, f"{name}-kernels.npy")): weights = np.load(os.path.join(lids_model_dir, f"{name}-kernels.npy")) in_channels = layer.in_channels out_channels = layer.out_channels kernel_size = layer.kernel_size weights = weights.transpose() weights = weights.reshape(out_channels, kernel_size[1], kernel_size[0], in_channels) weights = weights.transpose(0, 3, 2, 1) layer.weight = nn.Parameter(torch.from_numpy(weights).float()) if isinstance(layer, nn.Conv3d): if os.path.exists(os.path.join(lids_model_dir, f"{name}-kernels.npy")): weights = np.load(os.path.join(lids_model_dir, f"{name}-kernels.npy")) in_channels = layer.in_channels out_channels = layer.out_channels kernel_size = layer.kernel_size weights = weights.transpose() weights = weights.reshape(out_channels, kernel_size[0], kernel_size[1], kernel_size[2], in_channels) weights = weights.transpose(0, 4, 1, 2, 3) layer.weight = nn.Parameter(torch.from_numpy(weights).float()) if isinstance(layer, MarkerBasedNorm3d): conv_name = name.replace('m-norm', 'conv') with open(os.path.join(lids_model_dir, f"{conv_name}-mean.txt")) as f: lines = f.readlines()[0] mean = np.array([float(line) for line in lines.split(' ') if len(line) > 0]) with open(os.path.join(lids_model_dir, f"{conv_name}-stdev.txt")) as f: lines = f.readlines()[0] std = np.array([float(line) for line in lines.split(' ') if len(line) > 0]) layer.mean_by_channel = nn.Parameter(torch.from_numpy(mean.reshape(1, -1, 1, 1, 1)).float()) layer.std_by_channel = nn.Parameter(torch.from_numpy(std.reshape(1, -1, 1, 1, 1)).float()) '''for name, layer in model.classifier.named_children(): print(name) if isinstance(layer, SpecialLinearLayer): if os.path.exists(os.path.join(lids_model_dir, f"{name}-weights.npy")): weights = np.load(os.path.join(lids_model_dir, f"split{split}-{name}-weights.npy")) weights = weights.transpose() with open(os.path.join(lids_model_dir, f"{name}-mean.txt")) as f: lines = f.readlines() mean = np.array([float(line) for line in lines]) with open(os.path.join(lids_model_dir, f"{name}-stdev.txt")) as f: lines = f.readlines() std = np.array([float(line) for line in lines]) layer.mean = torch.from_numpy(mean.reshape(1, -1)).float() layer.std = torch.from_numpy(std.reshape(1, -1)).float() layer._linear.weight = nn.Parameter(torch.from_numpy(weights).float())''' print("Finish loading...") return model def save_lids_model(model, architecture, split, outputs_dir, model_name): if not isinstance(model, LIDSConvNet): pass print("Saving model in LIDS format...") if model_name.endswith('.pt'): model_name = model_name.replace('.pt', '') if not os.path.exists(os.path.join(outputs_dir, model_name)): os.makedirs(os.path.join(outputs_dir, model_name)) if isinstance(split, str): split_basename = os.path.basename(split) split = re.findall(r'\d+', split_basename) if len(split) == 0: split = 1 else: split = int(split[0]) layer_specs = get_arch_in_lids_format(architecture, split) conv_count = 1 for _, layer in model.feature_extractor.named_children(): if isinstance(layer, SpecialConvLayer): weights = layer.conv.weight.detach().cpu() num_kernels = weights.size(0) weights = weights.reshape(num_kernels, -1) weights = weights.transpose(0, 1) mean = layer.mean_by_channel.detach().cpu() std = layer.std_by_channel.detach().cpu() mean = mean.reshape(1, -1) std = std.reshape(1, -1) np.save(os.path.join(outputs_dir, model_name, f"conv{conv_count}-kernels.npy"), weights.float()) np.savetxt(os.path.join(outputs_dir, model_name, f"conv{conv_count}-mean.txt"), mean.float()) np.savetxt(os.path.join(outputs_dir, model_name, f"conv{conv_count}-stdev.txt"), std.float()) conv_count += 1 for i, layer_spec in enumerate(layer_specs, 1): with open(os.path.join(outputs_dir, model_name, f"convlayerseeds-layer{i}.json"), 'w') as f: json.dump(layer_spec, f, indent=4) '''for name, layer in model.classifier.named_children(): if isinstance(layer, SpecialLinearLayer): weights = layer._linear.weight.detach().cpu() weights.transpose(0, 1) mean = layer.mean.detach().cpu() std = layer.std.detach().cpu() mean = mean.reshape(-1) std = std.reshape(-1) np.save(os.path.join(outputs_dir, model_name, f"{name}-weights.npy"), weights.float()) np.savetxt(os.path.join(outputs_dir, model_name, f"{name}-mean.txt"), mean.float()) np.savetxt(os.path.join(outputs_dir, model_name, f"{name}-std.txt"), std.float())''' def _calulate_metrics(true_labels, pred_labels): average = 'binary' if np.unique(true_labels).shape[0] == 2 else 'weighted' acc = 1.0*(true_labels == pred_labels).sum()/true_labels.shape[0] precision, recall, f_score, support = precision_recall_fscore_support(true_labels, pred_labels, zero_division=0) precision_w, recall_w, f_score_w, _ = precision_recall_fscore_support(true_labels, pred_labels, average=average, zero_division=0) cm = confusion_matrix(true_labels, pred_labels) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("#" * 50) print(colored("Acc", "yellow"),f': {colored(f"{acc:.6f}", "blue", attrs=["bold"])}') print("-" * 50) print(colored("F1-score", "yellow"), f': {colored(f"{f1_score(true_labels, pred_labels, average=average):.6f}", "blue", attrs=["bold"])}') print("-" * 50) print("Accuracy", *cm.diagonal()) print("-" * 50) print("Precision:", *precision) print("Recall:", *recall) print("F-score:", *f_score) print("-" * 50) print("W-Precision:", precision_w) print("W-Recall:", recall_w) print("W-F-score:", f_score_w) print("-" * 50) print("Kappa {}".format(cohen_kappa_score(true_labels, pred_labels))) print("-" * 50) print("Suport", *support) print("#" * 50) def validate_model(model, val_set, criterion=nn.CrossEntropyLoss(), batch_size=32, device='cpu'): dataloader = DataLoader(val_set, batch_size=batch_size, shuffle=True, drop_last=False) model.eval() model.to(device) running_loss = 0.0 running_corrects = 0.0 true_labels = torch.Tensor([]).long() pred_labels = torch.Tensor([]).long() print("Validating...") for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) with torch.set_grad_enabled(False): outputs = model(inputs) loss = criterion(outputs, labels) preds = torch.max(outputs, 1)[1] running_loss += loss.item()*inputs.size(0)/len(val_set) running_corrects += torch.sum(preds == labels.data) true_labels = torch.cat((true_labels, labels.cpu())) pred_labels = torch.cat((pred_labels, preds.cpu())) print('Val - loss: {:.6f}'.format(running_loss)) print("Calculating metrics...") _calulate_metrics(true_labels, pred_labels) def train_svm(model, train_set, batch_size=32, max_iter=10000, device='cpu', C=100, degree=3): print("Preparing to train SVM") clf = svm.SVC(max_iter=max_iter, C=C, degree=degree, gamma='auto', coef0=0, decision_function_shape='ovo', kernel='linear') dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=False, drop_last=False) model.eval() model.to(device) features = torch.Tensor([]) y = torch.Tensor([]).long() for inputs, labels in dataloader: inputs, labels = inputs.to(device), labels.to(device) outputs = model(inputs).detach() features = torch.cat((features, outputs.cpu())) y = torch.cat((y, labels.cpu())) print("Fitting SVM...") clf.fit(features.flatten(start_dim=1), y) print("Done") return clf def save_svm(clf, outputs_dir, svm_filename): if not os.path.exists(outputs_dir): os.makedirs(outputs_dir) dir_to_save = os.path.join(outputs_dir, svm_filename) print("Saving SVM...") joblib.dump(clf, dir_to_save, compress=9) def load_svm(svm_path): print("Loading SVM...") clf = joblib.load(svm_path) return clf def validate_svm(model, clf, val_set, batch_size=32, device='cpu'): dataloader = DataLoader(val_set, batch_size=batch_size, shuffle=False, drop_last=False) model.eval() model.to(device) true_labels = torch.Tensor([]).long() pred_labels = torch.Tensor([]).long() for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) if hasattr(model, "features"): outputs = model.features(inputs).detach() else: outputs = model(inputs).detach() preds = clf.predict(outputs.cpu().flatten(start_dim=1)) true_labels = torch.cat((true_labels, labels.cpu())) pred_labels = torch.cat((pred_labels, torch.from_numpy(preds))) print("Calculating metrics...") _calulate_metrics(true_labels, pred_labels) def _images_close_to_center(images, centers): _images = [] for center in centers: _center = np.expand_dims(center, 0) dist = cdist(images, _center) _images.append(images[np.argmin(dist)]) return np.array(_images) def _find_elems_in_array(a, elems): indices = [] for elem in elems: _elem = np.expand_dims(elem, 0) mask = np.all(a == _elem, axis=1) indice = np.where(mask)[0][0:1].item() indices.append(indice) return indices def select_images_to_put_markers(dataset, class_proportion=0.05): dataloader = DataLoader(dataset, batch_size=64, shuffle=False, drop_last=False) all_images = None all_labels = None input_shape = dataset[0][0].shape for images, labels in dataloader: if all_images is None: all_images = images all_labels = labels else: all_images = torch.cat((all_images, images)) all_labels = torch.cat((all_labels, labels)) all_images = all_images.flatten(1).numpy() all_labels = all_labels.numpy() possible_labels = np.unique(all_labels) images_names = [] roots = None for label in possible_labels: images_of_label = all_images[all_labels == label] n_clusters = max(1, math.floor(images_of_label.shape[0]*class_proportion)) kmeans = MiniBatchKMeans(n_clusters=n_clusters, random_state=42) kmeans.fit(images_of_label) roots_of_label = _images_close_to_center(images_of_label, kmeans.cluster_centers_) if roots is None: roots = roots_of_label else: roots = np.concatenate((roots, roots_of_label)) indices = _find_elems_in_array(all_images, roots_of_label) for indice in indices: images_names.append(dataset.images_names[indice]) return roots.reshape(-1, *input_shape), images_names def _label_of_image(image_name): if not isinstance(image_name, str): raise TypeError("Parameter image_name must be a string.") i = image_name.index("_") label = int(image_name[0:i]) - 1 return label def split_dataset(dataset_dir, train_size, val_size=0, test_size=None, stratify=True): if os.path.exists(os.path.join(dataset_dir, 'files.txt')): with open(os.path.join(dataset_dir, 'files.txt'), 'r') as f: filenames = f.read().split('\n') filenames = [filename for filename in filenames if len(filename) > 0] else: filenames = os.listdir(dataset_dir) filenames.sort() labels = np.array([_label_of_image(filename) for filename in filenames]) if train_size > 1: train_size = int(train_size) train_split, test_split, _, test_labels = train_test_split(filenames, labels, train_size=train_size, test_size=test_size, stratify=labels) val_size = 0 if val_size is None else val_size val_split = [] if val_size > 0: test_size = len(test_split) - val_size test_size = int(test_size) if test_size > 0 else test_size val_split, test_split = train_test_split(test_split, test_size=test_size, stratify=test_labels) return train_split, val_split, test_split def compute_grad_cam(model, image, target_layers, class_label=0, device="cpu"): model = model.to(device) image = image.to(device) model.eval() gradients = [] features = [] if image.dim() == 3: x = image.unsqueeze(0) else: x = image for name, module in model._modules.items(): if name == "features" or name == "feature_extractor": for layer_name, layer in module.named_children(): x = layer(x) if layer_name in target_layers: x.register_hook(lambda grad : gradients.append(grad)) features.append(x) elif name == "classifier": x = x.flatten(1) x = module(x) else: x = module(x) y = x one_hot = torch.zeros_like(y, device=device) one_hot[0][class_label] = 1 one_hot = torch.sum(one_hot * y) model.zero_grad() one_hot.backward() weights = torch.mean(gradients[-1], axis=(2,3))[0, :] target = features[-1][0].detach() cam = torch.zeros_like(target[0]) for i, w in enumerate(weights): cam += w * target[i, :, ] cam[cam < 0] = 0.0 print(cam.shape) print(image.shape) resize = Resize(image.shape[1:]) cam = resize(cam.unsqueeze(0)) cam = cam - cam.min() cam = cam/cam.max() return cam.cpu().numpy() def load_mimage(path): assert ift is not None, "PyIFT is not available" mimge = ift.ReadMImage(path) return mimge.AsNumPy().squeeze() def save_mimage(path, image): assert ift is not None, "PyIFT is not available" mimage = ift.CreateMImageFromNumPy(np.ascontiguousarray(image)) ift.WriteMImage(mimage, path) def save_opf_dataset(path, opf_dataset): assert ift is not None, "PyIFT is not available" ift.WriteDataSet(opf_dataset, path) def load_opf_dataset(path): assert ift is not None, "PyIFT is not available" opf_dataset = ift.ReadDataSet(path) return opf_dataset def save_intermediate_outputs(model, dataset, outputs_dir, batch_size=16, layers=None, only_features=True, format="mimg", remove_border=0, device='cpu'): if only_features: if hasattr(model, "features"): _model = model.features else: _model = model.feature_extractor else: _model = model last_layer = None for layer_name in layers: layer_dir = os.path.join(outputs_dir, 'intermediate-outputs', layer_name) if not os.path.exists(layer_dir): os.makedirs(layer_dir) last_layer = layer_name _model.eval() _model.to(device) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False) outputs = {} outputs_count = {} outputs_names = dataset.images_names print("Saving intermediate outputs...") for inputs, _ in dataloader: inputs = inputs.to(device) for layer_name, layer in _model.named_children(): _outputs = layer(inputs) if layer_name == last_layer and remove_border > 0: b = remove_border _outputs = _outputs[:,:, b:-b, b:-b] inputs = _outputs if layer_name not in outputs_count: outputs_count[layer_name] = 0 if layers is None or len(layers) == 0 or layer_name in layers: if format == 'zip': if layer_name not in outputs: outputs[layer_name] = _outputs.detach().cpu() else: outputs[layer_name] = torch.cat((outputs[layer_name],_outputs.detach().cpu())) elif format in ["mimg", "npy"]: layer_dir = os.path.join(outputs_dir, 'intermediate-outputs', layer_name) _outputs = _outputs.detach().cpu() for _output in _outputs: _output_dir = os.path.join(layer_dir, f"{outputs_names[outputs_count[layer_name]].split('.')[0]}.{format}") if format == "npy": np.save(_output_dir, _output) else: save_mimgage(_output_dir, _output.permute(1, 2, 0).numpy()) outputs_count[layer_name] += 1 del _outputs torch.cuda.empty_cache() if format == 'zip': for layer_name in outputs: _outputs = outputs[layer_name] _outputs = _outputs.permute(0, 2, 3, 1).numpy().reshape(_outputs.shape[0], -1) labels = np.array([int(image_name[0:image_name.index("_")]) - 1 for image_name in outputs_names]).astype(np.int32) opf_dataset = ift.CreateDataSetFromNumPy(_outputs, labels + 1) opf_dataset.SetNClasses = labels.max() + 1 ift.SetStatus(opf_dataset, ift.IFT_TRAIN) ift.AddStatus(opf_dataset, ift.IFT_SUPERVISED) # opf_dataset.SetLabels(labels + 1) _output_dir = os.path.join(layer_dir, "dataset.zip") save_opf_dataset(_output_dir, opf_dataset) def get_arch_in_lids_format(architecture, split): layer_names = list(architecture['features']['layers'].keys()) layers = architecture['features']['layers'] operations = [layers[layer_name]['operation'] for layer_name in layer_names] conv_layers_count = 1 lids_layer_specs = [] for i in range(len(layer_names)): layer_spec = {} if operations[i] == 'conv2d': params = layers[layer_names[i]]['params'] kernel_size = params['kernel_size'] dilation = params['kernel_size'] number_of_kernels_per_markers = params['number_of_kernels_per_marker'] out_channels = params['out_channels'] layer_spec['layer'] = conv_layers_count layer_spec['split'] = split if isinstance(kernel_size, int): layer_spec['kernelsize'] = [kernel_size, kernel_size, 0] else: layer_spec['kernelsize'] = [*kernel_size, 0] if isinstance(dilation, int): layer_spec['dilationrate'] = [dilation, dilation, 0] else: layer_spec['dilationrate'] = [*dilation, 0] layer_spec['nkernelspermarker'] = number_of_kernels_per_markers layer_spec['finalnkernels'] = out_channels layer_spec['nkernelsperimage'] = 10000 if i + 1 < len(layer_names) and operations[i+1] == 'relu': layer_spec['relu'] = 1 else: layer_spec['relu'] = 0 conv_layers_count += 1 j = i + 1 if layer_spec['relu'] == 0 else i + 2 pool_spec = {} if j < len(layer_names) and 'pool' in operations[j]: if operations[j] == 'max_pool2d': pool_spec['pool_type'] = 2 elif operations[j] == 'avg_pool2d': pool_spec['pool_type'] = 1 pool_params = layers[layer_names[j]]['params'] kernel_size = pool_params['kernel_size'] stride = pool_params['stride'] if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] pool_spec['poolxsize'] = kernel_size[0] pool_spec['poolysize'] = kernel_size[1] pool_spec['poolzsize'] = 0 pool_spec['stride'] = stride else: pool_spec['pool_type'] = 0 layer_spec['pooling'] = pool_spec lids_layer_specs.append(layer_spec) return lids_layer_specs def create_arch(layers_dir): layers_info_files = [f for f in os.listdir(layers_dir) if f.endswith('.json')] layers_info_files.sort() arch = OrderedDict([('features', {'type': 'sequential', 'layers': OrderedDict()})]) layers = arch['features']['layers'] for i, layer_info_file in enumerate(layers_info_files, 1): with open(os.path.join(layers_dir, layer_info_file), 'r') as f: layer_info = json.load(f) # print(layer_info) conv_spec = { 'operation': 'conv2d', 'params': { 'kernel_size': layer_info['kernelsize'][:-1], 'number_of_kernels_per_marker': layer_info['nkernelspermarker'], 'dilation': layer_info['dilationrate'][:-1], 'out_channels': layer_info['finalnkernels'], 'padding': [floor((layer_info['kernelsize'][0] + (layer_info['kernelsize'][0] - 1) * (layer_info['dilationrate'][0] -1))/2), floor((layer_info['kernelsize'][1] + (layer_info['kernelsize'][1] - 1) * (layer_info['dilationrate'][1] -1))/2)], 'stride': 1 } } if layer_info['relu'] == 1: relu_spec = { 'operation': 'relu', 'params': { 'inplace': True } } else: relu_spec = None if layer_info['pooling']['pooltype'] != 0: pool_spec = { 'params': { 'kernel_size': [layer_info['pooling']['poolxsize'], layer_info['pooling']['poolysize']], 'stride': layer_info['pooling']['poolstride'], 'padding': [floor(layer_info['pooling']['poolxsize']/2), floor(layer_info['pooling']['poolysize']/2)] } } if layer_info['pooling']['pooltype'] == 2: pool_spec['operation'] = 'max_pool2d' elif layer_info['pooling']['pooltype'] == 1: pool_spec['operation'] = 'avg_pool2d' layers[f'conv{i}'] = conv_spec if relu_spec is not None: layers[f'relu{i}'] = relu_spec if pool_spec is not None: layers[f'pool{i}'] = pool_spec return arch def save_arch(arch, output_path): dirname = os.path.dirname(output_path) if not os.path.exists(dirname) and dirname != '': os.makedirs(os.path.dirname(output_path)) with open(output_path, 'w') as f: json.dump(arch, f, indent=4)
nilq/baby-python
python
from functools import partial from PyQt5.QtCore import pyqtSignal, QTimer, Qt from PyQt5.QtWidgets import QInputDialog, QLabel, QVBoxLayout, QLineEdit, QWidget, QPushButton from electrum.i18n import _ from electrum.plugin import hook from electrum.wallet import Standard_Wallet from electrum.gui.qt.util import WindowModalDialog from .ledger import LedgerPlugin, Ledger_Client, AtomicBoolean, AbstractTracker from ..hw_wallet.qt import QtHandlerBase, QtPluginBase from ..hw_wallet.plugin import only_hook_if_libraries_available class Plugin(LedgerPlugin, QtPluginBase): icon_unpaired = "ledger_unpaired.png" icon_paired = "ledger.png" def create_handler(self, window): return Ledger_Handler(window) @only_hook_if_libraries_available @hook def receive_menu(self, menu, addrs, wallet): if type(wallet) is not Standard_Wallet: return keystore = wallet.get_keystore() if type(keystore) == self.keystore_class and len(addrs) == 1: def show_address(): keystore.thread.add(partial(self.show_address, wallet, addrs[0])) menu.addAction(_("Show on Ledger"), show_address) class Ledger_UI(WindowModalDialog): def __init__(self, parse_data: AbstractTracker, atomic_b: AtomicBoolean, parent=None, title='Ledger UI'): super().__init__(parent, title) # self.setWindowModality(Qt.NonModal) # Thread interrupter. If we cancel, set true self.parse_data = parse_data self.atomic_b = atomic_b self.label = QLabel('') self.label.setText(_("Generating Information...")) layout = QVBoxLayout(self) layout.addWidget(self.label) self.cancel = QPushButton(_('Cancel')) def end(): self.finished() self.close() self.atomic_b.set_true() self.cancel.clicked.connect(end) layout.addWidget(self.cancel) self.setLayout(layout) self.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) self.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) self.timer = QTimer() self.timer.timeout.connect(self.update_text) def begin(self): self.timer.start(500) def finished(self): self.timer.stop() def update_text(self): self.label.setText(self.parse_data.parsed_string()) class Ledger_Handler(QtHandlerBase): setup_signal = pyqtSignal() auth_signal = pyqtSignal(object, object) ui_start_signal = pyqtSignal(object, object, object) ui_stop_signal = pyqtSignal() def __init__(self, win): super(Ledger_Handler, self).__init__(win, 'Ledger') self.setup_signal.connect(self.setup_dialog) self.auth_signal.connect(self.auth_dialog) self.ui_start_signal.connect(self.ui_dialog) self.ui_stop_signal.connect(self.stop_ui_dialog) def word_dialog(self, msg): response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg, QLineEdit.Password) if not response[1]: self.word = None else: self.word = str(response[0]) self.done.set() def message_dialog(self, msg): self.clear_dialog() self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status")) l = QLabel(msg) vbox = QVBoxLayout(dialog) vbox.addWidget(l) dialog.show() def ui_dialog(self, title, stopped_boolean, parse_data): self.clear_dialog() self.dialog = Ledger_UI(parse_data, stopped_boolean, self.top_level_window(), title) self.dialog.show() self.dialog.begin() def stop_ui_dialog(self): if isinstance(self.dialog, Ledger_UI): self.dialog.finished() def auth_dialog(self, data, client: 'Ledger_Client'): try: from .auth2fa import LedgerAuthDialog except ImportError as e: self.message_dialog(repr(e)) return dialog = LedgerAuthDialog(self, data, client=client) dialog.exec_() self.word = dialog.pin self.done.set() def get_auth(self, data, *, client: 'Ledger_Client'): self.done.clear() self.auth_signal.emit(data, client) self.done.wait() return self.word def get_setup(self): self.done.clear() self.setup_signal.emit() self.done.wait() return def get_ui(self, title, atomic_b, data): self.ui_start_signal.emit(title, atomic_b, data) def finished_ui(self): self.ui_stop_signal.emit() def setup_dialog(self): self.show_error(_('Initialization of Ledger HW devices is currently disabled.'))
nilq/baby-python
python
from data.scrapers import * import pandas as pd from wordcloud import WordCloud import matplotlib.pyplot as plt def model_run(model, freq='1111111', existing=None): scraper = model(freq) dfs = scraper.run() for df in dfs: existing.append(df) return existing def generate_wordcloud(text, year=None): wordcloud = WordCloud().generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") if year: plt.savefig("../assets/img/jellyfish_{}.png".format(str(year)), format="png") plt.show() def count_frequency(wordtxt): my_list = wordtxt.split() freq = {} for word in my_list: if word not in freq: freq[word] = 0 else: pass freq[word] += 1 freq = {k: v for k, v in sorted(freq.items(), key=lambda item: item[1])} return freq if __name__ == "__main__": dfs = list() model_run(SmithsonianScraper, freq='1111111', existing=dfs) model_run(FastCompanyScraper, freq='1111111', existing=dfs) model_run(WorldEconomicForumScraper, freq='1111111', existing=dfs) model_run(NewScientistScraper, freq='1111111', existing=dfs) model_run(TimeScraper, freq='1111111', existing=dfs) model_run(JStorScraper, freq='1111111', existing=dfs) model_run(QuartzScraper, freq='1111111', existing=dfs) model_run(MarineScienceScraper, freq='1111111', existing=dfs) model_run(BBCEarthScraper, freq='1111111', existing=dfs) model_run(BBCNewsScraper, freq='1111111', existing=dfs) model_run(TheGuardianScraper, freq='1111111', existing=dfs) dfs = pd.DataFrame(dfs).sort_values(by="date") grouped_df = dfs.groupby(dfs['date'].dt.year)['words'].agg(['sum', 'count']).reset_index() for index, row in grouped_df.iterrows(): row['freq'] = count_frequency(row['sum']) print(grouped_df) # for index, row in grouped_df.iterrows(): # generate_wordcloud(row['sum'], row['date'])
nilq/baby-python
python
import numpy as np from ..pakbase import Package class ModflowFlwob(Package): """ Head-dependent flow boundary Observation package class. Minimal working example that will be refactored in a future version. Parameters ---------- nqfb : int Number of cell groups for the head-dependent flow boundary observations nqcfb : int Greater than or equal to the total number of cells in all cell groups nqtfb : int Total number of head-dependent flow boundary observations for all cell groups iufbobsv : int unit number where output is saved tomultfb : float Time-offset multiplier for head-dependent flow boundary observations. The product of tomultfb and toffset must produce a time value in units consistent with other model input. tomultfb can be dimensionless or can be used to convert the units of toffset to the time unit used in the simulation. nqobfb : int list of length nqfb The number of times at which flows are observed for the group of cells nqclfb : int list of length nqfb Is a flag, and the absolute value of nqclfb is the number of cells in the group. If nqclfb is less than zero, factor = 1.0 for all cells in the group. obsnam : string list of length nqtfb Observation name irefsp : int of length nqtfb Stress period to which the observation time is referenced. The reference point is the beginning of the specified stress period. toffset : float list of length nqtfb Is the time from the beginning of the stress period irefsp to the time of the observation. toffset must be in units such that the product of toffset and tomultfb are consistent with other model input. For steady state observations, specify irefsp as the steady state stress period and toffset less than or equal to perlen of the stress period. If perlen is zero, set toffset to zero. If the observation falls within a time step, linearly interpolation is used between values at the beginning and end of the time step. flwobs : float list of length nqtfb Observed flow value from the head-dependent flow boundary into the aquifer (+) or the flow from the aquifer into the boundary (-) layer : int list of length(nqfb, nqclfb) layer index for the cell included in the cell group row : int list of length(nqfb, nqclfb) row index for the cell included in the cell group column : int list of length(nqfb, nqclfb) column index of the cell included in the cell group factor : float list of length(nqfb, nqclfb) Is the portion of the simulated gain or loss in the cell that is included in the total gain or loss for this cell group (fn of eq. 5). flowtype : string String that corresponds to the head-dependent flow boundary condition type (CHD, GHB, DRN, RIV) extension : list of string Filename extension. If extension is None, extension is set to ['chob','obc','gbob','obg','drob','obd', 'rvob','obr'] (default is None). no_print : boolean When True or 1, a list of flow observations will not be written to the Listing File (default is False) options : list of strings Package options (default is None). unitnumber : list of int File unit number. If unitnumber is None, unitnumber is set to [40, 140, 41, 141, 42, 142, 43, 143] (default is None). filenames : str or list of str Filenames to use for the package and the output files. If filenames=None the package name will be created using the model name and package extension and the flwob output name will be created using the model name and .out extension (for example, modflowtest.out), if iufbobsv is a number greater than zero. If a single string is passed the package will be set to the string and flwob output name will be created using the model name and .out extension, if iufbobsv is a number greater than zero. To define the names for all package files (input and output) the length of the list of strings should be 2. Default is None. Attributes ---------- Methods ------- See Also -------- Notes ----- This represents a minimal working example that will be refactored in a future version. """ def __init__(self, model, nqfb=0, nqcfb=0, nqtfb=0, iufbobsv=0, tomultfb=1.0, nqobfb=None, nqclfb=None, obsnam=None, irefsp=None, toffset=None, flwobs=None, layer=None, row=None, column=None, factor=None, flowtype=None, extension=None, no_print=False, options=None, filenames=None, unitnumber=None): """ Package constructor """ if nqobfb is None: nqobfb = [] if nqclfb is None: nqclfb = [] if obsnam is None: obsnam = [] if irefsp is None: irefsp = [] if toffset is None: toffset = [] if flwobs is None: flwobs = [] if layer is None: layer = [] if row is None: row = [] if column is None: column = [] if factor is None: factor = [] if extension is None: extension = ['chob', 'obc', 'gbob', 'obg', 'drob', 'obd', 'rvob', 'obr'] if unitnumber is None: unitnumber = [40, 140, 41, 141, 42, 142, 43, 143] if flowtype.upper().strip() == 'CHD': name = ['CHOB', 'DATA'] extension = extension[0:2] unitnumber = unitnumber[0:2] iufbobsv = unitnumber[1] self.url = 'chob.htm' self.heading = '# CHOB for MODFLOW, generated by Flopy.' elif flowtype.upper().strip() == 'GHB': name = ['GBOB', 'DATA'] extension = extension[2:4] unitnumber = unitnumber[2:4] iufbobsv = unitnumber[1] self.url = 'gbob.htm' self.heading = '# GBOB for MODFLOW, generated by Flopy.' elif flowtype.upper().strip() == 'DRN': name = ['DROB', 'DATA'] extension = extension[4:6] unitnumber = unitnumber[4:6] iufbobsv = unitnumber[1] self.url = 'drob.htm' self.heading = '# DROB for MODFLOW, generated by Flopy.' elif flowtype.upper().strip() == 'RIV': name = ['RVOB', 'DATA'] extension = extension[6:8] unitnumber = unitnumber[6:8] iufbobsv = unitnumber[1] self.url = 'rvob.htm' self.heading = '# RVOB for MODFLOW, generated by Flopy.' else: msg = 'ModflowFlwob: flowtype must be CHD, GHB, DRN, or RIV' raise KeyError(msg) # set filenames if filenames is None: filenames = [None, None] elif isinstance(filenames, str): filenames = [filenames, None] elif isinstance(filenames, list): if len(filenames) < 2: filenames.append(None) # call base package constructor Package.__init__(self, model, extension=extension, name=name, unit_number=unitnumber, allowDuplicates=True, filenames=filenames) self.nqfb = nqfb self.nqcfb = nqcfb self.nqtfb = nqtfb self.iufbobsv = iufbobsv self.tomultfb = tomultfb self.nqobfb = nqobfb self.nqclfb = nqclfb self.obsnam = obsnam self.irefsp = irefsp self.toffset = toffset self.flwobs = flwobs self.layer = layer self.row = row self.column = column self.factor = factor # -create empty arrays of the correct size self.layer = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32') self.row = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32') self.column = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32') self.factor = np.zeros((self.nqfb, max(self.nqclfb)), dtype='float32') self.nqobfb = np.zeros((self.nqfb), dtype='int32') self.nqclfb = np.zeros((self.nqfb), dtype='int32') self.irefsp = np.zeros((self.nqtfb), dtype='int32') self.toffset = np.zeros((self.nqtfb), dtype='float32') self.flwobs = np.zeros((self.nqtfb), dtype='float32') # -assign values to arrays self.nqobfb[:] = nqobfb self.nqclfb[:] = nqclfb self.obsnam[:] = obsnam self.irefsp[:] = irefsp self.toffset[:] = toffset self.flwobs[:] = flwobs for i in range(self.nqfb): self.layer[i, :len(layer[i])] = layer[i] self.row[i, :len(row[i])] = row[i] self.column[i, :len(column[i])] = column[i] self.factor[i, :len(factor[i])] = factor[i] # add more checks here self.no_print = no_print self.np = 0 if options is None: options = [] if self.no_print: options.append('NOPRINT') self.options = options # add checks for input compliance (obsnam length, etc.) self.parent.add_package(self) def write_file(self): """ Write the package file Returns ------- None """ # open file for writing f_fbob = open(self.fn_path, 'w') # write header f_fbob.write('{}\n'.format(self.heading)) # write sections 1 and 2 : NOTE- what about NOPRINT? line = '{:10d}'.format(self.nqfb) line += '{:10d}'.format(self.nqcfb) line += '{:10d}'.format(self.nqtfb) line += '{:10d}'.format(self.iufbobsv) if self.no_print or 'NOPRINT' in self.options: line += '{: >10}'.format('NOPRINT') line += '\n' f_fbob.write(line) f_fbob.write('{:10e}\n'.format(self.tomultfb)) # write sections 3-5 looping through observations groups c = 0 for i in range(self.nqfb): # while (i < self.nqfb): # write section 3 f_fbob.write('{:10d}{:10d}\n'.format(self.nqobfb[i], self.nqclfb[i])) # Loop through observation times for the groups for j in range(self.nqobfb[i]): # write section 4 line = '{}{:10d}{:10.4g} {:10.4g}\n'.format(self.obsnam[c], self.irefsp[c], self.toffset[c], self.flwobs[c]) f_fbob.write(line) c += 1 # index variable # write section 5 - NOTE- need to adjust factor for multiple # observations in the same cell for j in range(abs(self.nqclfb[i])): # set factor to 1.0 for all cells in group if self.nqclfb[i] < 0: self.factor[i, :] = 1.0 line = '{:10d}'.format(self.layer[i, j]) line += '{:10d}'.format(self.row[i, j]) line += '{:10d}'.format(self.column[i, j]) line += ' '.format(self.factor[i, j]) # note is 10f good enough here? line += '{:10f}\n'.format(self.factor[i, j]) f_fbob.write(line) f_fbob.close() # # swm: BEGIN hack for writing standard file sfname = self.fn_path sfname += '_ins' # write header f_ins = open(sfname, 'w') f_ins.write('jif @\n') f_ins.write('StandardFile 0 1 {}\n'.format(self.nqtfb)) for i in range(0, self.nqtfb): f_ins.write('{}\n'.format(self.obsnam[i])) f_ins.close() # swm: END hack for writing standard file return
nilq/baby-python
python
# globals.py # Logic to get a list of the DBS instances available on DAS. # Currently hardcoding. There's probably a better way! instances = ['prod/global', 'prod/phys01', 'prod/phys02', 'prod/phys03', 'prod/caf']
nilq/baby-python
python
from hashlib import sha1 from multiprocessing.dummy import Lock m_lock = Lock() z_lock = Lock() print(f"是否相等:{m_lock==z_lock}\n{m_lock}\n{z_lock}") # 地址不一样 m_code = hash(m_lock) z_code = hash(z_lock) print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 值一样 # Java可以使用:identityhashcode m_code = sha1(str(m_lock).encode("utf-8")).hexdigest() z_code = sha1(str(z_code).encode("utf-8")).hexdigest() print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 不相等 m_code = id(m_lock) z_code = id(z_lock) print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 不相等
nilq/baby-python
python
import codecs import csv import json import os import random import sys directory = str(os.getcwd()) final_data = {"url": "http://10.10.0.112"} def getNumberRecords(): ''' Counts the number of username-password for admin.csv file Arguments: None Returns: number of username-password records in admin.csv file ''' fileDirectory = directory + "/config/admin.csv" readFile=csv.reader(codecs.open(fileDirectory, encoding='utf-8'),delimiter=",") number = 0 for x in readFile: number += 1 return number def checkFilesExist(botNumber): ''' Checks if the csv files to be generated already exist Arguments: botNumber (int): Number of admin bot concurrently running Returns: True, if the files already exist. Else, False ''' fileNumber = botNumber + 20 number = 0 while (number <= fileNumber): outputFileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv" if os.path.exists(outputFileDirectory): number += 1 continue else: return False return True def genAdminFiles(botNumbers): ''' Generate csv files for different usernames-passwords according to the number of bots Arguments: botNumber (int): Number of admin bot concurrently running Returns: None ''' fileNumber = botNumbers + 20 recordsPerFile = (int)(getNumberRecords()/fileNumber) print(recordsPerFile) adminFileDirectory = directory + "/config/admin.csv" readFile=csv.reader(codecs.open(adminFileDirectory, encoding='utf-8'),delimiter=",") number = 0 for row in readFile: outputFileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv" writeFile = open(outputFileDirectory,mode = 'a', newline = '') writer = csv.writer(writeFile, delimiter = ',') writer.writerow(row) if (number >= fileNumber): number = 0 number += 1 def getCredentials(botNumbers): ''' Obtain credentials for the bot to login Arguments: botNumber (int): Number of admin bot concurrently running Returns: None ''' trackNumber = 0 newRecords = [] credentials = [] number = ((random.randint(1,2000)%23) * (random.randint(1,2000)%17) * (random.randint(1000,2000)%13)) % (botNumbers + 20) fileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv" print("Reading from " + str(fileDirectory)) readFile=csv.reader(codecs.open(fileDirectory, encoding='utf-8'),delimiter=",") for rows in readFile: if (trackNumber == 0): credentials.append(rows[0]) credentials.append(rows[1]) trackNumber += 1 else: newRecords.append(rows) credentials.append(number) writeFile = open(fileDirectory,mode = 'w', newline = '') for record in newRecords: writeFile.write(record[0] + ',' + record[1]) writeFile.write('\n') return credentials def writeBack(username, password, fileNumber): ''' Writes back the credentials to the csv file after all admin actions have been completed Arguments: username (str): Username that the bot is logging in with password (str): Password that the bot is logging in with fileNumber (int): file number for the csv file that the bot is going to open to read the credentials Returns: None ''' fileDirectory = directory + "/config/admin/adminLogin" + str(fileNumber) + ".csv" writeFile = open(fileDirectory,mode = 'a', newline = '') writer = csv.writer(writeFile, delimiter = ',') writeBack = [] writeBack.append(username) writeBack.append(password) writer.writerow(writeBack) def getUrl(): ''' Obtain the url that the bot is logging into Arguments: None Returns: None ''' return "https://10.10.0.112"
nilq/baby-python
python
# Copyright 2020 Yuhao Zhang and Arun Kumar. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import glob SEED = 2018 INPUT_SHAPE = (112, 112, 3) NUM_CLASSES = 1000 TOP_5 = 'top_k_categorical_accuracy' TOP_1 = 'categorical_accuracy' MODEL_ARCH_TABLE = 'model_arch_library' MODEL_SELECTION_TABLE = 'mst_table' MODEL_SELECTION_SUMMARY_TABLE = 'mst_table_summary' class spark_imagenet_cat: valid_list = [ "hdfs://master:9000/imagenet_parquet/valid/valid_{}.parquet".format(i) for i in range(8)] train_list = [ "hdfs://master:9000/imagenet_parquet/train/train_{}.parquet".format(i) for i in range(8)] class spark_imagenet_cat_nfs: valid_list = [ "/mnt/nfs/hdd/imagenet/valid/valid_{}.parquet".format(i) for i in range(8) ] train_list = [ "/mnt/nfs/hdd/imagenet/train/train_{}.parquet".format(i) for i in range(8) ] param_grid = { "learning_rate": [1e-4, 1e-6], "lambda_value": [1e-4, 1e-6], "batch_size": [32, 256], "model": ["vgg16", "resnet50"] } param_grid_hetro = { "learning_rate": [1e-4, 1e-4], "lambda_value": [1e-4, 1e-4], "batch_size": [4, 128], "model": ["nasnetmobile", "mobilenetv2"], 'p': 0.8, 'hetro': True, 'fast': 38, 'slow': 10, 'total': 48 } param_grid_scalability = { "learning_rate": [1e-3, 1e-4, 1e-5, 1e-6], "lambda_value": [1e-4, 1e-6], "batch_size": [32], "model": ["resnet50"] } param_grid_model_size = { 's': { "learning_rate": [1e-4, 1e-6], "lambda_value": [1e-3, 1e-4, 1e-5, 1e-6], "batch_size": [32], "model": ["mobilenetv2"] }, 'm': { "learning_rate": [1e-4, 1e-6], "lambda_value": [1e-3, 1e-4, 1e-5, 1e-6], "batch_size": [32], "model": ["resnet50"] }, 'l': { "learning_rate": [1e-4, 1e-6], "lambda_value": [1e-3, 1e-4, 1e-5, 1e-6], "batch_size": [32], "model": ["resnet152"] }, 'x': { "learning_rate": [1e-4, 1e-6], "lambda_value": [1e-3, 1e-4, 1e-5, 1e-6], "batch_size": [32], "model": ["vgg16"] }, } param_grid_best_model = { "learning_rate": [1e-4], "lambda_value": [1e-4], "batch_size": [32], "model": ["resnet50"] } param_grid_hyperopt = { "learning_rate": [0.00001, 0.1], "lambda_value": [1e-4, 1e-6], "batch_size": [16, 256], "model": ["resnet18", "resnet34"] }
nilq/baby-python
python
def ejercicio01MCM(): #Definir variables y otros print("--> EJERCICIO 01 <--") notaFinal=round(0.0) #Datos de entrada n1=float(input("Ingrese la 1ra nota: ")) n2=float(input("Ingrese la 2da nota: ")) n3=float(input("Ingrese la 2da nota: ")) n4=float(input("Ingrese la 4ta nota: ")) #Proceso notaFinal=(n1*0.2+n2*0.15+n3*0.15+n4*0.5) #Datos de salida print("La nota final del curso es:",notaFinal) ejercicio01MCM() print("") def ejercicio02MCM(): #Definir variables y otros print("--> EJERCICIO 02 <--") puntos=0 salariomin=0 bono=0 #Datos de entrada puntos=int(input("Ingrese los puntos: ")) salariomin=int(input("Ingrese el salario minimo: ")) #Proceso if puntos>=50 and puntos<=100: bono=(salariomin*0.10) else: bono=("Nada, sera para la proxima") if puntos>=101 and puntos<=150: bono=(salariomin*0.40) elif puntos>=151: bono=(salariomin*0.70) #Datos de salida print("El bono que recibira es:",bono) ejercicio02MCM() print("") def ejercicio03MCM(): #Definir variables y otros print("--> EJERCICIO 03 <--") edad=0 sexo=0 vacuna="" #Datos de entrada edad=int(input("Ingrese la edad: ")) sexo=input("Ingrese sexo: ") #Proceso if sexo=="mujer" or sexo=="hombre" and edad>70: vacuna=("Tipo C") if sexo=="mujer" and edad>=16 and edad<=69: vacuna=("Tipo B") elif sexo=="hombre" and edad>=16 and edad<=69: vacuna=("Tipo A") if sexo=="mujer" or sexo=="hombre" and edad<16: vacuna=("Tipo A") #Datos de salida print("Recibira la vacuna :", vacuna) ejercicio03MCM() print("") def ejercicio04MCM(): #Definir variables y otros print("--> EJERCICIO 04 <--") operador=0 resultado=0 #Datos de entrada operador=input("Ingrese el operador aritmetico: ") n1=int(input("Ingrese el 1er numero: ")) n2=int(input("Ingrese el 2do numero: ")) #Proceso if operador=="suma" or operador=="+": resultado=n1+n2 if operador=="resta" or operador=="-": resultado=n1-n2 elif operador=="division" or operador=="/": resultado=n1/n2 if operador=="multiplicacion" or operador=="*": resultado=n1*n2 elif operador=="potencia" or operador=="^": resultado=n1**n2 #Datos de salida print("Los resultados son:", resultado) ejercicio04MCM() print("")
nilq/baby-python
python
from __future__ import absolute_import from django.test import RequestFactory from exam import fixture from mock import patch from sentry.middleware.stats import RequestTimingMiddleware, add_request_metric_tags from sentry.testutils import TestCase from sentry.testutils.helpers.faux import Mock class RequestTimingMiddlewareTest(TestCase): middleware = fixture(RequestTimingMiddleware) factory = fixture(RequestFactory) @patch('sentry.utils.metrics.incr') def test_records_default_api_metrics(self, incr): request = self.factory.get('/') request._view_path = '/' response = Mock(status_code=200) self.middleware.process_response(request, response) incr.assert_called_with( 'view.response', instance=request._view_path, tags={ 'method': 'GET', 'status_code': 200, }, skip_internal=False, ) @patch('sentry.utils.metrics.incr') def test_records_endpoint_specific_metrics(self, incr): request = self.factory.get('/') request._view_path = '/' request._metric_tags = {'a': 'b'} response = Mock(status_code=200) self.middleware.process_response(request, response) incr.assert_called_with( 'view.response', instance=request._view_path, tags={ 'method': 'GET', 'status_code': 200, 'a': 'b', }, skip_internal=False, ) @patch('sentry.utils.metrics.incr') def test_add_request_metric_tags(self, incr): request = self.factory.get('/') request._view_path = '/' add_request_metric_tags(request, foo='bar') response = Mock(status_code=200) self.middleware.process_response(request, response) incr.assert_called_with( 'view.response', instance=request._view_path, tags={ 'method': 'GET', 'status_code': 200, 'foo': 'bar', }, skip_internal=False, )
nilq/baby-python
python
from django.conf.urls import url from django.urls import path from rest.quiklash import views from rest.push_the_buttons.views import PushTheButtonView urlpatterns = [ path('api/qa/game/start', views.QuicklashMainGame.as_view()), path('api/qa/question/new', views.QuiklashQuestionListView.as_view()), path('api/qa/question/answer', views.QuiklashQuestionAnswer.as_view()), # path('api/qa/voting', PushTheButtonView.as_view()), # path('api/qa/vote', views.PlayerView.as_view()), ]
nilq/baby-python
python
# coding: utf-8 from __future__ import annotations from datetime import date, datetime # noqa: F401 import re # noqa: F401 from typing import Any, Dict, List, Optional # noqa: F401 from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401 from acapy_wrapper.models.indy_proof_requested_proof_predicate import ( IndyProofRequestedProofPredicate, ) from acapy_wrapper.models.indy_proof_requested_proof_revealed_attr import ( IndyProofRequestedProofRevealedAttr, ) from acapy_wrapper.models.indy_proof_requested_proof_revealed_attr_group import ( IndyProofRequestedProofRevealedAttrGroup, ) class IndyProofRequestedProof(BaseModel): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. IndyProofRequestedProof - a model defined in OpenAPI predicates: The predicates of this IndyProofRequestedProof [Optional]. revealed_attr_groups: The revealed_attr_groups of this IndyProofRequestedProof [Optional]. revealed_attrs: The revealed_attrs of this IndyProofRequestedProof [Optional]. self_attested_attrs: The self_attested_attrs of this IndyProofRequestedProof [Optional]. unrevealed_attrs: The unrevealed_attrs of this IndyProofRequestedProof [Optional]. """ predicates: Optional[Dict[str, IndyProofRequestedProofPredicate]] = None revealed_attr_groups: Optional[ Dict[str, IndyProofRequestedProofRevealedAttrGroup] ] = None revealed_attrs: Optional[Dict[str, IndyProofRequestedProofRevealedAttr]] = None self_attested_attrs: Optional[Dict[str, Any]] = None unrevealed_attrs: Optional[Dict[str, Any]] = None IndyProofRequestedProof.update_forward_refs()
nilq/baby-python
python
# Basic training configuration file from pathlib import Path from torchvision.transforms import RandomVerticalFlip, RandomHorizontalFlip, CenterCrop from torchvision.transforms import RandomApply, RandomAffine from torchvision.transforms import ToTensor, Normalize from common.dataset import get_test_data_loader SEED = 12345 DEBUG = True OUTPUT_PATH = "output" dataset_path = Path("/home/fast_storage/imaterialist-challenge-furniture-2018/") SAMPLE_SUBMISSION_PATH = dataset_path / "sample_submission_randomlabel.csv" TEST_TRANSFORMS = [ RandomApply( [RandomAffine(degrees=45, translate=(0.1, 0.1), scale=(0.7, 1.2), resample=2), ], p=0.5 ), CenterCrop(size=350), RandomHorizontalFlip(p=0.5), RandomVerticalFlip(p=0.5), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ] N_CLASSES = 128 BATCH_SIZE = 32 NUM_WORKERS = 8 TEST_LOADER = get_test_data_loader( dataset_path=dataset_path / "test_400x400", test_data_transform=TEST_TRANSFORMS, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=True) MODEL = (Path(OUTPUT_PATH) / "training_FurnitureSqueezeNet350_20180414_1610" / "model_FurnitureSqueezeNet350_47_val_loss=0.8795085.pth").as_posix() N_TTA = 10
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2021 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The module file for nxos_bgp_global """ from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = """ module: nxos_bgp_global short_description: BGP Global resource module. description: - This module manages global BGP configuration on devices running Cisco NX-OS. version_added: 1.4.0 notes: - Tested against NX-OS 9.3.6. - Unsupported for Cisco MDS - This module works with connection C(network_cli) and C(httpapi). author: Nilashish Chakraborty (@NilashishC) options: running_config: description: - This option is used only with state I(parsed). - The value of this option should be the output received from the NX-OS device by executing the command B(show running-config | section '^router bgp'). - The state I(parsed) reads the configuration from C(running_config) option and transforms it into Ansible structured data as per the resource module's argspec and the value is then returned in the I(parsed) key within the result. type: str config: description: A list of BGP process configuration. type: dict suboptions: as_number: description: Autonomous System Number of the router. type: str affinity_group: description: Configure an affinity group. type: dict suboptions: group_id: description: Affinity Group ID. type: int bestpath: &bestpath description: Define the default bestpath selection algorithm. type: dict suboptions: always_compare_med: description: Compare MED on paths from different AS. type: bool as_path: description: AS-Path. type: dict suboptions: ignore: description: Ignore AS-Path during bestpath selection. type: bool multipath_relax: description: Relax AS-Path restriction when choosing multipaths. type: bool compare_neighborid: description: When more paths are available than max path config, use neighborid as tie-breaker. type: bool compare_routerid: description: Compare router-id for identical EBGP paths. type: bool cost_community_ignore: description: Ignore cost communities in bestpath selection. type: bool igp_metric_ignore: description: Ignore IGP metric for next-hop during bestpath selection. type: bool med: description: MED type: dict suboptions: confed: description: Compare MED only from paths originated from within a confederation. type: bool missing_as_worst: description: Treat missing MED as highest MED. type: bool non_deterministic: description: Not always pick the best-MED path among paths from same AS. type: bool cluster_id: &cluster_id description: Configure Route Reflector Cluster-ID. type: str confederation: &confederation description: AS confederation parameters. type: dict suboptions: identifier: description: Set routing domain confederation AS. type: str peers: description: Peer ASs in BGP confederation. type: list elements: str disable_policy_batching: description: Disable batching evaluation of outbound policy for a peer. type: dict suboptions: set: description: Set policy batching. type: bool ipv4: description: IPv4 address-family settings. type: dict suboptions: prefix_list: description: Name of prefix-list to apply. type: str ipv6: description: IPv6 address-family settings. type: dict suboptions: prefix_list: description: Name of prefix-list to apply. type: str nexthop: description: Batching based on nexthop. type: bool dynamic_med_interval: description: Sets the interval for dampening of med changes. type: int enforce_first_as: description: Enforce neighbor AS is the first AS in AS-PATH attribute (EBGP). type: bool enhanced_error: description: Enable BGP Enhanced error handling. type: bool fabric_soo: description: Fabric site of origin. type: str fast_external_fallover: description: Immediately reset the session if the link to a directly connected BGP peer goes down. type: bool flush_routes: description: Flush routes in RIB upon controlled restart. type: bool graceful_restart: &graceful_restart description: Configure Graceful Restart functionality. type: dict suboptions: set: description: Enable graceful-restart. type: bool restart_time: description: Maximum time for restart advertised to peers. type: int stalepath_time: description: Maximum time to keep a restarting peer's stale routes. type: int helper: description: Configure Graceful Restart Helper mode functionality. type: bool graceful_shutdown: description: Graceful-shutdown for BGP protocol. type: dict suboptions: activate: description: Send graceful-shutdown community on all routes. type: dict suboptions: set: description: Activiate graceful-shutdown. type: bool route_map: description: Apply route-map to modify attributes for outbound. type: str aware: description: Lower preference of routes carrying graceful-shutdown community. type: bool isolate: description: Isolate this router from BGP perspective. type: dict suboptions: set: description: Withdraw remote BGP routes to isolate this router. type: bool include_local: description: Withdraw both local and remote BGP routes. type: bool log_neighbor_changes: &log_nbr description: Log a message for neighbor up/down event. type: bool maxas_limit: &maxas_limit description: Allow AS-PATH attribute from EBGP neighbor imposing a limit on number of ASes. type: int neighbors: &nbr description: Configure BGP neighbors. type: list elements: dict suboptions: neighbor_address: description: IP address/Prefix of the neighbor or interface. type: str required: True bfd: description: Bidirectional Fast Detection for the neighbor. type: dict suboptions: set: description: Set BFD for this neighbor. type: bool singlehop: description: Single-hop session. type: bool multihop: description: Multihop session. type: dict suboptions: set: description: Set BFD multihop. type: bool interval: description: Configure BFD session interval parameters. type: dict suboptions: tx_interval: description: TX interval in milliseconds. type: int min_rx_interval: description: Minimum RX interval. type: int multiplier: description: Detect Multiplier. type: int neighbor_affinity_group: description: Configure an affinity group. type: dict suboptions: group_id: description: Affinity Group ID. type: int bmp_activate_server: description: Specify server ID for activating BMP monitoring for the peer. type: int capability: description: Capability. type: dict suboptions: suppress_4_byte_as: description: Suppress 4-byte AS Capability. type: bool description: description: Neighbor specific descripion. type: str disable_connected_check: description: Disable check for directly connected peer. type: bool dont_capability_negotiate: description: Don't negotiate capability with this neighbor. type: bool dscp: description: Set dscp value for tcp transport. type: str dynamic_capability: description: Dynamic Capability type: bool ebgp_multihop: description: Specify multihop TTL for remote peer. type: int graceful_shutdown: description: Graceful-shutdown for this neighbor. type: dict suboptions: activate: description: Send graceful-shutdown community. type: dict suboptions: set: description: Set activate. type: bool route_map: description: Apply route-map to modify attributes for outbound. type: str inherit: description: Inherit a template. type: dict suboptions: peer: description: Peer template to inherit. type: str peer_session: description: Peer-session template to inherit. type: str local_as: description: Specify the local-as number for the eBGP neighbor. type: str log_neighbor_changes: description: Log message for neighbor up/down event. type: dict suboptions: set: description: - Set log-neighbor-changes. type: bool disable: description: - Disable logging of neighbor up/down event. type: bool low_memory: description: Behaviour in low memory situations. type: dict suboptions: exempt: description: Do not shutdown this peer when under memory pressure. type: bool password: description: Configure a password for neighbor. type: dict suboptions: encryption: description: - 0 specifies an UNENCRYPTED neighbor password. - 3 specifies an 3DES ENCRYPTED neighbor password will follow. - 7 specifies a Cisco type 7 ENCRYPTED neighbor password will follow. type: int key: description: Authentication password. type: str path_attribute: description: BGP path attribute optional filtering. type: list elements: dict suboptions: action: description: Action. type: str choices: ["discard", "treat-as-withdraw"] type: description: Path attribute type type: int range: description: Path attribute range. type: dict suboptions: start: description: Path attribute range start value. type: int end: description: Path attribute range end value. type: int peer_type: description: Neighbor facing type: str choices: ["fabric-border-leaf", "fabric-external"] remote_as: description: Specify Autonomous System Number of the neighbor. type: str remove_private_as: description: Remove private AS number from outbound updates. type: dict suboptions: set: description: Remove private AS. type: bool replace_as: description: Replace. type: bool all: description: All. type: bool shutdown: description: Administratively shutdown this neighbor. type: bool timers: description: Configure keepalive and hold timers. type: dict suboptions: keepalive: description: Keepalive interval (seconds). type: int holdtime: description: Holdtime (seconds). type: int transport: description: BGP transport connection. type: dict suboptions: connection_mode: description: Specify type of connection. type: dict suboptions: passive: description: Allow passive connection setup only. type: bool ttl_security: description: Enable TTL Security Mechanism. type: dict suboptions: hops: description: Specify hop count for remote peer. type: int update_source: description: Specify source of BGP session and updates. type: str neighbor_down: &nbr_down description: Handle BGP neighbor down event, due to various reasons. type: dict suboptions: fib_accelerate: description: Accelerate the hardware updates for IP/IPv6 adjacencies for neighbor. type: bool nexthop: description: Nexthop resolution options. type: dict suboptions: suppress_default_resolution: description: Prohibit use of default route for nexthop address resolution. type: bool rd: description: Secondary Route Distinguisher for vxlan multisite border gateway. type: dict suboptions: dual: description: Generate Secondary RD for all VRFs and L2VNIs. type: bool id: description: Specify 2 byte value for ID. type: int reconnect_interval: &reconn_intv description: Configure connection reconnect interval. type: int router_id: &rtr_id description: Specify the IP address to use as router-id. type: str shutdown: &shtdwn description: Administratively shutdown BGP protocol. type: bool suppress_fib_pending: &suppr description: Advertise only routes that are programmed in hardware to peers. type: bool timers: &timers description: Configure bgp related timers. type: dict suboptions: bestpath_limit: description: Configure timeout for first bestpath after restart. type: dict suboptions: timeout: description: Bestpath timeout (seconds). type: int always: description: Configure update-delay-always option. type: bool bgp: description: Configure different bgp keepalive and holdtimes. type: dict suboptions: keepalive: description: Keepalive interval (seconds). type: int holdtime: description: Holdtime (seconds). type: int prefix_peer_timeout: description: Prefix Peer timeout (seconds). type: int prefix_peer_wait: description: Configure wait timer for a prefix peer. type: int vrfs: description: Virtual Router Context configurations. type: list elements: dict suboptions: vrf: description: VRF name. type: str allocate_index: description: Configure allocate-index. type: int bestpath: *bestpath cluster_id: *cluster_id confederation: *confederation graceful_restart: *graceful_restart local_as: description: Specify the local-as for this vrf. type: str log_neighbor_changes: *log_nbr maxas_limit: *maxas_limit neighbors: *nbr neighbor_down: *nbr_down reconnect_interval: *reconn_intv router_id: *rtr_id timers: *timers state: description: - The state the configuration should be left in. - State I(purged) removes all the BGP configurations from the target device. Use caution with this state. - State I(deleted) only removes BGP attributes that this modules manages and does not negate the BGP process completely. Thereby, preserving address-family related configurations under BGP context. - Running states I(deleted) and I(replaced) will result in an error if there are address-family configuration lines present under a neighbor, or a vrf context that is to be removed. Please use the M(cisco.nxos.nxos_bgp_af) or M(cisco.nxos.nxos_bgp_neighbor_af) modules for prior cleanup. - States I(merged) and I(replaced) will result in a failure if BGP is already configured with a different ASN than what is provided in the task. In such cases, please use state I(purged) to remove the existing BGP process and proceed further. - Refer to examples for more details. type: str choices: - merged - replaced - deleted - purged - parsed - gathered - rendered default: merged """ EXAMPLES = """ # Using merged # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # Nexus9000v# - name: Merge the provided configuration with the existing running configuration cisco.nxos.nxos_bgp_global: config: as_number: 65563 router_id: 192.168.1.1 bestpath: as_path: multipath_relax: True compare_neighborid: True cost_community_ignore: True confederation: identifier: 42 peers: - 65020 - 65030 - 65040 log_neighbor_changes: True maxas_limit: 20 neighbors: - neighbor_address: 192.168.1.100 neighbor_affinity_group: group_id: 160 bmp_activate_server: 1 remote_as: 65563 description: NBR-1 low_memory: exempt: True - neighbor_address: 192.168.1.101 remote_as: 65563 password: encryption: 7 key: 12090404011C03162E neighbor_down: fib_accelerate: True vrfs: - vrf: site-1 allocate_index: 5000 local_as: 200 log_neighbor_changes: True neighbors: - neighbor_address: 198.51.100.1 description: site-1-nbr-1 password: encryption: 3 key: 13D4D3549493D2877B1DC116EE27A6BE remote_as: 65562 - neighbor_address: 198.51.100.2 remote_as: 65562 description: site-1-nbr-2 - vrf: site-2 local_as: 300 log_neighbor_changes: True neighbors: - neighbor_address: 203.0.113.2 description: site-2-nbr-1 password: encryption: 3 key: AF92F4C16A0A0EC5BDF56CF58BC030F6 remote_as: 65568 neighbor_down: fib_accelerate: True # Task output # ------------- # before: {} # # commands: # - router bgp 65563 # - bestpath as-path multipath-relax # - bestpath compare-neighborid # - bestpath cost-community ignore # - confederation identifier 42 # - log-neighbor-changes # - maxas-limit 20 # - neighbor-down fib-accelerate # - router-id 192.168.1.1 # - confederation peers 65020 65030 65040 # - neighbor 192.168.1.100 # - remote-as 65563 # - affinity-group 160 # - bmp-activate-server 1 # - description NBR-1 # - low-memory exempt # - neighbor 192.168.1.101 # - remote-as 65563 # - password 7 12090404011C03162E # - vrf site-1 # - allocate-index 5000 # - local-as 200 # - log-neighbor-changes # - neighbor 198.51.100.1 # - remote-as 65562 # - description site-1-nbr-1 # - password 3 13D4D3549493D2877B1DC116EE27A6BE # - neighbor 198.51.100.2 # - remote-as 65562 # - description site-1-nbr-2 # - vrf site-2 # - local-as 300 # - log-neighbor-changes # - neighbor-down fib-accelerate # - neighbor 203.0.113.2 # - remote-as 65568 # - description site-2-nbr-1 # - password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 # # after: # as_number: '65563' # bestpath: # as_path: # multipath_relax: true # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65040' # log_neighbor_changes: true # maxas_limit: 20 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # - neighbor_address: 192.168.1.101 # password: # encryption: 7 # key: 12090404011C03162E # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - allocate_index: 5000 # local_as: '200' # log_neighbor_changes: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 198.51.100.1 # password: # encryption: 3 # key: 13D4D3549493D2877B1DC116EE27A6BE # remote_as: '65562' # - description: site-1-nbr-2 # neighbor_address: 198.51.100.2 # remote_as: '65562' # vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - description: site-2-nbr-1 # neighbor_address: 203.0.113.2 # password: # encryption: 3 # key: AF92F4C16A0A0EC5BDF56CF58BC030F6 # remote_as: '65568' # vrf: site-2 # After state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65040 # bestpath as-path multipath-relax # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 20 # log-neighbor-changes # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # neighbor 192.168.1.101 # remote-as 65563 # password 7 12090404011C03162E # vrf site-1 # local-as 200 # log-neighbor-changes # allocate-index 5000 # neighbor 198.51.100.1 # remote-as 65562 # description site-1-nbr-1 # password 3 13D4D3549493D2877B1DC116EE27A6BE # neighbor 198.51.100.2 # remote-as 65562 # description site-1-nbr-2 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # remote-as 65568 # description site-2-nbr-1 # password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 # Using replaced # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65040 # bestpath as-path multipath-relax # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 20 # log-neighbor-changes # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # neighbor 192.168.1.101 # remote-as 65563 # password 7 12090404011C03162E # vrf site-1 # local-as 200 # log-neighbor-changes # allocate-index 5000 # neighbor 198.51.100.1 # remote-as 65562 # description site-1-nbr-1 # password 3 13D4D3549493D2877B1DC116EE27A6BE # neighbor 198.51.100.2 # remote-as 65562 # description site-1-nbr-2 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # remote-as 65568 # description site-2-nbr-1 # password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 - name: Replace BGP configuration with provided configuration cisco.nxos.nxos_bgp_global: config: as_number: 65563 router_id: 192.168.1.1 bestpath: compare_neighborid: True cost_community_ignore: True confederation: identifier: 42 peers: - 65020 - 65030 - 65050 maxas_limit: 40 neighbors: - neighbor_address: 192.168.1.100 neighbor_affinity_group: group_id: 160 bmp_activate_server: 1 remote_as: 65563 description: NBR-1 low_memory: exempt: True neighbor_down: fib_accelerate: True vrfs: - vrf: site-2 local_as: 300 log_neighbor_changes: True neighbors: - neighbor_address: 203.0.113.2 password: encryption: 7 key: 12090404011C03162E neighbor_down: fib_accelerate: True state: replaced # Task output # ------------- # before: # as_number: '65563' # bestpath: # as_path: # multipath_relax: true # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65040' # log_neighbor_changes: true # maxas_limit: 20 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # - neighbor_address: 192.168.1.101 # password: # encryption: 7 # key: 12090404011C03162E # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - allocate_index: 5000 # local_as: '200' # log_neighbor_changes: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 198.51.100.1 # password: # encryption: 3 # key: 13D4D3549493D2877B1DC116EE27A6BE # remote_as: '65562' # - description: site-1-nbr-2 # neighbor_address: 198.51.100.2 # remote_as: '65562' # vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - description: site-2-nbr-1 # neighbor_address: 203.0.113.2 # password: # encryption: 3 # key: AF92F4C16A0A0EC5BDF56CF58BC030F6 # remote_as: '65568' # vrf: site-2 # # commands: # - router bgp 65563 # - no bestpath as-path multipath-relax # - no log-neighbor-changes # - maxas-limit 40 # - no confederation peers 65020 65030 65040 # - confederation peers 65020 65030 65050 # - no neighbor 192.168.1.101 # - vrf site-2 # - neighbor 203.0.113.2 # - no remote-as 65568 # - no description site-2-nbr-1 # - password 7 12090404011C03162E # - no vrf site-1 # after: # as_number: '65563' # bestpath: # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65050' # maxas_limit: 40 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - neighbor_address: 203.0.113.2 # password: # encryption: 7 # key: 12090404011C03162E # vrf: site-2 # # After state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65050 # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 40 # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # password 7 12090404011C03162E # Using deleted # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65040 # bestpath as-path multipath-relax # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 20 # log-neighbor-changes # address-family ipv4 unicast # default-metric 400 # suppress-inactive # default-information originate # address-family ipv6 multicast # wait-igp-convergence # redistribute eigrp eigrp-1 route-map site-1-rmap # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # neighbor 192.168.1.101 # remote-as 65563 # password 7 12090404011C03162E # vrf site-1 # local-as 200 # log-neighbor-changes # allocate-index 5000 # address-family ipv4 multicast # maximum-paths 40 # dampen-igp-metric 1200 # neighbor 198.51.100.1 # remote-as 65562 # description site-1-nbr-1 # password 3 13D4D3549493D2877B1DC116EE27A6BE # neighbor 198.51.100.2 # remote-as 65562 # description site-1-nbr-2 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # remote-as 65568 # description site-1-nbr-1 # password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 - name: Delete BGP configurations handled by this module cisco.nxos.nxos_bgp_global: state: deleted # Task output # ------------- # before: # as_number: '65563' # bestpath: # as_path: # multipath_relax: true # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65040' # log_neighbor_changes: true # maxas_limit: 20 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # - neighbor_address: 192.168.1.101 # password: # encryption: 7 # key: 12090404011C03162E # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - allocate_index: 5000 # local_as: '200' # log_neighbor_changes: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 198.51.100.1 # password: # encryption: 3 # key: 13D4D3549493D2877B1DC116EE27A6BE # remote_as: '65562' # - description: site-1-nbr-2 # neighbor_address: 198.51.100.2 # remote_as: '65562' # vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 203.0.113.2 # password: # encryption: 3 # key: AF92F4C16A0A0EC5BDF56CF58BC030F6 # remote_as: '65568' # vrf: site-2 # # commands: # - router bgp 65563 # - no bestpath as-path multipath-relax # - no bestpath compare-neighborid # - no bestpath cost-community ignore # - no confederation identifier 42 # - no log-neighbor-changes # - no maxas-limit 20 # - no neighbor-down fib-accelerate # - no router-id 192.168.1.1 # - no confederation peers 65020 65030 65040 # - no neighbor 192.168.1.100 # - no neighbor 192.168.1.101 # - no vrf site-1 # - no vrf site-2 # # after: # as_number: '65563' # # After state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # address-family ipv4 unicast # default-metric 400 # suppress-inactive # default-information originate # address-family ipv6 multicast # wait-igp-convergence # redistribute eigrp eigrp-1 route-map site-1-rmap # # Using purged # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65040 # bestpath as-path multipath-relax # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 20 # log-neighbor-changes # address-family ipv4 unicast # default-metric 400 # suppress-inactive # default-information originate # address-family ipv6 multicast # wait-igp-convergence # redistribute eigrp eigrp-1 route-map site-1-rmap # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # neighbor 192.168.1.101 # remote-as 65563 # password 7 12090404011C03162E # vrf site-1 # local-as 200 # log-neighbor-changes # allocate-index 5000 # address-family ipv4 multicast # maximum-paths 40 # dampen-igp-metric 1200 # neighbor 198.51.100.1 # remote-as 65562 # description site-1-nbr-1 # password 3 13D4D3549493D2877B1DC116EE27A6BE # neighbor 198.51.100.2 # remote-as 65562 # description site-1-nbr-2 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # remote-as 65568 # description site-1-nbr-1 # password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 - name: Purge all BGP configurations from the device cisco.nxos.nxos_bgp_global: state: purged # Task output # ------------- # before: # as_number: '65563' # bestpath: # as_path: # multipath_relax: true # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65040' # log_neighbor_changes: true # maxas_limit: 20 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # - neighbor_address: 192.168.1.101 # password: # encryption: 7 # key: 12090404011C03162E # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - allocate_index: 5000 # local_as: '200' # log_neighbor_changes: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 198.51.100.1 # password: # encryption: 3 # key: 13D4D3549493D2877B1DC116EE27A6BE # remote_as: '65562' # - description: site-1-nbr-2 # neighbor_address: 198.51.100.2 # remote_as: '65562' # vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 203.0.113.2 # password: # encryption: 3 # key: AF92F4C16A0A0EC5BDF56CF58BC030F6 # remote_as: '65568' # vrf: site-2 # # commands: # - no router bgp 65563 # # after: {} # # After state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # Nexus9000v# # Using rendered - name: Render platform specific configuration lines (without connecting to the device) cisco.nxos.nxos_bgp_global: config: as_number: 65563 router_id: 192.168.1.1 bestpath: as_path: multipath_relax: True compare_neighborid: True cost_community_ignore: True confederation: identifier: 42 peers: - 65020 - 65030 - 65040 log_neighbor_changes: True maxas_limit: 20 neighbors: - neighbor_address: 192.168.1.100 neighbor_affinity_group: group_id: 160 bmp_activate_server: 1 remote_as: 65563 description: NBR-1 low_memory: exempt: True - neighbor_address: 192.168.1.101 remote_as: 65563 password: encryption: 7 key: 12090404011C03162E neighbor_down: fib_accelerate: True vrfs: - vrf: site-1 allocate_index: 5000 local_as: 200 log_neighbor_changes: True neighbors: - neighbor_address: 198.51.100.1 description: site-1-nbr-1 password: encryption: 3 key: 13D4D3549493D2877B1DC116EE27A6BE remote_as: 65562 - neighbor_address: 198.51.100.2 remote_as: 65562 description: site-1-nbr-2 - vrf: site-2 local_as: 300 log_neighbor_changes: True neighbors: - neighbor_address: 203.0.113.2 description: site-1-nbr-1 password: encryption: 3 key: AF92F4C16A0A0EC5BDF56CF58BC030F6 remote_as: 65568 neighbor_down: fib_accelerate: True # Task Output (redacted) # ----------------------- # rendered: # - router bgp 65563 # - bestpath as-path multipath-relax # - bestpath compare-neighborid # - bestpath cost-community ignore # - confederation identifier 42 # - log-neighbor-changes # - maxas-limit 20 # - neighbor-down fib-accelerate # - router-id 192.168.1.1 # - confederation peers 65020 65030 65040 # - neighbor 192.168.1.100 # - remote-as 65563 # - affinity-group 160 # - bmp-activate-server 1 # - description NBR-1 # - low-memory exempt # - neighbor 192.168.1.101 # - remote-as 65563 # - password 7 12090404011C03162E # - vrf site-1 # - allocate-index 5000 # - local-as 200 # - log-neighbor-changes # - neighbor 198.51.100.1 # - remote-as 65562 # - description site-1-nbr-1 # - password 3 13D4D3549493D2877B1DC116EE27A6BE # - neighbor 198.51.100.2 # - remote-as 65562 # - description site-1-nbr-2 # - vrf site-2 # - local-as 300 # - log-neighbor-changes # - neighbor-down fib-accelerate # - neighbor 203.0.113.2 # - remote-as 65568 # - description site-1-nbr-1 # - password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 # Using parsed # parsed.cfg # ------------ # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65040 # bestpath as-path multipath-relax # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 20 # log-neighbor-changes # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # neighbor 192.168.1.101 # remote-as 65563 # password 7 12090404011C03162E # vrf site-1 # local-as 200 # log-neighbor-changes # allocate-index 5000 # neighbor 198.51.100.1 # remote-as 65562 # description site-1-nbr-1 # password 3 13D4D3549493D2877B1DC116EE27A6BE # neighbor 198.51.100.2 # remote-as 65562 # description site-1-nbr-2 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # remote-as 65568 # description site-1-nbr-1 # password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6 - name: Parse externally provided BGP config cisco.nxos.nxos_bgp_global: running_config: "{{ lookup('file', 'parsed.cfg') }}" state: parsed # Task output (redacted) # ----------------------- # parsed: # as_number: '65563' # bestpath: # as_path: # multipath_relax: true # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65040' # log_neighbor_changes: true # maxas_limit: 20 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # - neighbor_address: 192.168.1.101 # password: # encryption: 7 # key: 12090404011C03162E # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - allocate_index: 5000 # local_as: '200' # log_neighbor_changes: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 198.51.100.1 # password: # encryption: 3 # key: 13D4D3549493D2877B1DC116EE27A6BE # remote_as: '65562' # - description: site-1-nbr-2 # neighbor_address: 198.51.100.2 # remote_as: '65562' # vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - description: site-1-nbr-1 # neighbor_address: 203.0.113.2 # password: # encryption: 3 # key: AF92F4C16A0A0EC5BDF56CF58BC030F6 # remote_as: '65568' # vrf: site-2 # Using gathered # existing config # # Nexus9000v# show running-config | section "^router bgp" # router bgp 65563 # router-id 192.168.1.1 # confederation identifier 42 # confederation peers 65020 65030 65050 # bestpath cost-community ignore # bestpath compare-neighborid # neighbor-down fib-accelerate # maxas-limit 40 # neighbor 192.168.1.100 # low-memory exempt # bmp-activate-server 1 # remote-as 65563 # description NBR-1 # affinity-group 160 # vrf site-1 # vrf site-2 # local-as 300 # neighbor-down fib-accelerate # log-neighbor-changes # neighbor 203.0.113.2 # password 7 12090404011C03162E - name: Gather BGP facts using gathered cisco.nxos.nxos_bgp_global: state: gathered # Task output (redacted) # ----------------------- # gathered: # as_number: '65563' # bestpath: # compare_neighborid: true # cost_community_ignore: true # confederation: # identifier: '42' # peers: # - '65020' # - '65030' # - '65050' # maxas_limit: 40 # neighbor_down: # fib_accelerate: true # neighbors: # - bmp_activate_server: 1 # description: NBR-1 # low_memory: # exempt: true # neighbor_address: 192.168.1.100 # neighbor_affinity_group: # group_id: 160 # remote_as: '65563' # router_id: 192.168.1.1 # vrfs: # - vrf: site-1 # - local_as: '300' # log_neighbor_changes: true # neighbor_down: # fib_accelerate: true # neighbors: # - neighbor_address: 203.0.113.2 # password: # encryption: 7 # key: 12090404011C03162E # vrf: site-2 # Remove a neighbor having AF configurations with state replaced (will fail) # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65536 # log-neighbor-changes # maxas-limit 20 # router-id 198.51.100.2 # neighbor 203.0.113.2 # address-family ipv4 unicast # next-hop-self # remote-as 65538 # affinity-group 160 # description NBR-1 # low-memory exempt # neighbor 192.0.2.1 # remote-as 65537 # password 7 12090404011C03162E - name: Remove a neighbor having AF configurations (should fail) cisco.nxos.nxos_bgp_global: config: as_number: 65536 router_id: 198.51.100.2 maxas_limit: 20 log_neighbor_changes: True neighbors: - neighbor_address: 192.0.2.1 remote_as: 65537 password: encryption: 7 key: 12090404011C03162E state: replaced # Task output (redacted) # ----------------------- # fatal: [Nexus9000v]: FAILED! => changed=false # msg: Neighbor 203.0.113.2 has address-family configurations. # Please use the nxos_bgp_neighbor_af module to remove those first. # Remove a VRF having AF configurations with state replaced (will fail) # Before state: # ------------- # Nexus9000v# show running-config | section "^router bgp" # router bgp 65536 # log-neighbor-changes # maxas-limit 20 # router-id 198.51.100.2 # neighbor 192.0.2.1 # remote-as 65537 # password 7 12090404011C03162E # vrf site-1 # address-family ipv4 unicast # default-information originate # neighbor 203.0.113.2 # remote-as 65538 # affinity-group 160 # description NBR-1 # low-memory exempt # vrf site-2 # neighbor-down fib-accelerate - name: Remove a VRF having AF configurations (should fail) cisco.nxos.nxos_bgp_global: config: as_number: 65536 router_id: 198.51.100.2 maxas_limit: 20 log_neighbor_changes: True neighbors: - neighbor_address: 192.0.2.1 remote_as: 65537 password: encryption: 7 key: 12090404011C03162E vrfs: - vrf: site-2 neighbor_down: fib_accelerate: True state: replaced # Task output (redacted) # ----------------------- # fatal: [Nexus9000v]: FAILED! => changed=false # msg: VRF site-1 has address-family configurations. # Please use the nxos_bgp_af module to remove those first. """ RETURN = """ before: description: The configuration prior to the model invocation. returned: always type: dict sample: > The configuration returned will always be in the same format of the parameters above. after: description: The resulting configuration model invocation. returned: when changed type: dict sample: > The configuration returned will always be in the same format of the parameters above. commands: description: The set of commands pushed to the remote device. returned: always type: list sample: - router bgp 65563 - maxas-limit 20 - router-id 192.168.1.1 - confederation peers 65020 65030 65040 - neighbor 192.168.1.100 - remote-as 65563 - affinity-group 160 - bmp-activate-server 1 - description NBR-1 - low-memory exempt - vrf site-1 - log-neighbor-changes - neighbor 198.51.100.1 - remote-as 65562 - description site-1-nbr-1 - password 3 13D4D3549493D2877B1DC116EE27A6BE """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bgp_global.bgp_global import ( Bgp_globalArgs, ) from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.config.bgp_global.bgp_global import ( Bgp_global, ) def main(): """ Main entry point for module execution :returns: the result form module invocation """ module = AnsibleModule( argument_spec=Bgp_globalArgs.argument_spec, mutually_exclusive=[["config", "running_config"]], required_if=[ ["state", "merged", ["config"]], ["state", "replaced", ["config"]], ["state", "rendered", ["config"]], ["state", "parsed", ["running_config"]], ], supports_check_mode=True, ) result = Bgp_global(module).execute_module() module.exit_json(**result) if __name__ == "__main__": main()
nilq/baby-python
python
print("merhaba") print("merhaba") print("merhaba") print("merhaba")
nilq/baby-python
python
r""" Base class for polyhedra, part 6 Define methods related to plotting including affine hull projection. """ # **************************************************************************** # Copyright (C) 2008-2012 Marshall Hampton <hamptonio@gmail.com> # Copyright (C) 2011-2015 Volker Braun <vbraun.name@gmail.com> # Copyright (C) 2012-2018 Frederic Chapoton # Copyright (C) 2013 Andrey Novoseltsev # Copyright (C) 2014-2017 Moritz Firsching # Copyright (C) 2014-2019 Thierry Monteil # Copyright (C) 2015 Nathann Cohen # Copyright (C) 2015-2017 Jeroen Demeyer # Copyright (C) 2015-2017 Vincent Delecroix # Copyright (C) 2015-2018 Dima Pasechnik # Copyright (C) 2015-2020 Jean-Philippe Labbe <labbe at math.huji.ac.il> # Copyright (C) 2015-2021 Matthias Koeppe # Copyright (C) 2016-2019 Daniel Krenn # Copyright (C) 2017 Marcelo Forets # Copyright (C) 2017-2018 Mark Bell # Copyright (C) 2019 Julian Ritter # Copyright (C) 2019-2020 Laith Rastanawi # Copyright (C) 2019-2020 Sophia Elia # Copyright (C) 2019-2021 Jonathan Kliem <jonathan.kliem@fu-berlin.de> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from sage.misc.cachefunc import cached_method from sage.modules.vector_space_morphism import linear_transformation from sage.matrix.constructor import matrix from sage.modules.free_module_element import vector from sage.rings.qqbar import AA from sage.geometry.convex_set import AffineHullProjectionData from .base5 import Polyhedron_base5 class Polyhedron_base6(Polyhedron_base5): r""" Methods related to plotting including affine hull projection. TESTS:: sage: from sage.geometry.polyhedron.base6 import Polyhedron_base6 sage: P = polytopes.cube() sage: Polyhedron_base6.plot(P) Graphics3d Object sage: Polyhedron_base6.tikz(P) \begin{tikzpicture}% [x={(1.000000cm, 0.000000cm)}, y={(-0.000000cm, 1.000000cm)}, z={(0.000000cm, -0.000000cm)}, scale=1.000000, back/.style={loosely dotted, thin}, edge/.style={color=blue!95!black, thick}, facet/.style={fill=blue!95!black,fill opacity=0.800000}, vertex/.style={inner sep=1pt,circle,draw=green!25!black,fill=green!75!black,thick}] % % %% This TikZ-picture was produced with Sagemath version ... %% with the command: ._tikz_3d_in_3d and parameters: %% view = [0, 0, 1] %% angle = 0 %% scale = 1 %% edge_color = blue!95!black %% facet_color = blue!95!black %% opacity = 0.8 %% vertex_color = green %% axis = False <BLANKLINE> %% Coordinate of the vertices: %% \coordinate (1.00000, -1.00000, -1.00000) at (1.00000, -1.00000, -1.00000); \coordinate (1.00000, 1.00000, -1.00000) at (1.00000, 1.00000, -1.00000); \coordinate (1.00000, 1.00000, 1.00000) at (1.00000, 1.00000, 1.00000); \coordinate (1.00000, -1.00000, 1.00000) at (1.00000, -1.00000, 1.00000); \coordinate (-1.00000, -1.00000, 1.00000) at (-1.00000, -1.00000, 1.00000); \coordinate (-1.00000, -1.00000, -1.00000) at (-1.00000, -1.00000, -1.00000); \coordinate (-1.00000, 1.00000, -1.00000) at (-1.00000, 1.00000, -1.00000); \coordinate (-1.00000, 1.00000, 1.00000) at (-1.00000, 1.00000, 1.00000); %% %% %% Drawing edges in the back %% \draw[edge,back] (1.00000, -1.00000, -1.00000) -- (1.00000, 1.00000, -1.00000); \draw[edge,back] (1.00000, -1.00000, -1.00000) -- (1.00000, -1.00000, 1.00000); \draw[edge,back] (1.00000, -1.00000, -1.00000) -- (-1.00000, -1.00000, -1.00000); \draw[edge,back] (1.00000, 1.00000, -1.00000) -- (1.00000, 1.00000, 1.00000); \draw[edge,back] (1.00000, 1.00000, -1.00000) -- (-1.00000, 1.00000, -1.00000); \draw[edge,back] (-1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, -1.00000); \draw[edge,back] (-1.00000, -1.00000, -1.00000) -- (-1.00000, 1.00000, -1.00000); \draw[edge,back] (-1.00000, 1.00000, -1.00000) -- (-1.00000, 1.00000, 1.00000); %% %% %% Drawing vertices in the back %% \node[vertex] at (1.00000, -1.00000, -1.00000) {}; \node[vertex] at (1.00000, 1.00000, -1.00000) {}; \node[vertex] at (-1.00000, 1.00000, -1.00000) {}; \node[vertex] at (-1.00000, -1.00000, -1.00000) {}; %% %% %% Drawing the facets %% \fill[facet] (-1.00000, 1.00000, 1.00000) -- (1.00000, 1.00000, 1.00000) -- (1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, 1.00000) -- cycle {}; %% %% %% Drawing edges in the front %% \draw[edge] (1.00000, 1.00000, 1.00000) -- (1.00000, -1.00000, 1.00000); \draw[edge] (1.00000, 1.00000, 1.00000) -- (-1.00000, 1.00000, 1.00000); \draw[edge] (1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, 1.00000); \draw[edge] (-1.00000, -1.00000, 1.00000) -- (-1.00000, 1.00000, 1.00000); %% %% %% Drawing the vertices in the front %% \node[vertex] at (1.00000, 1.00000, 1.00000) {}; \node[vertex] at (1.00000, -1.00000, 1.00000) {}; \node[vertex] at (-1.00000, -1.00000, 1.00000) {}; \node[vertex] at (-1.00000, 1.00000, 1.00000) {}; %% %% \end{tikzpicture} sage: Q = polytopes.hypercube(4) sage: Polyhedron_base6.show(Q) sage: Polyhedron_base6.schlegel_projection(Q) The projection of a polyhedron into 3 dimensions sage: R = polytopes.simplex(5) sage: Polyhedron_base6.affine_hull(R) A 5-dimensional polyhedron in ZZ^6 defined as the convex hull of 1 vertex and 5 lines sage: Polyhedron_base6.affine_hull_projection(R) A 5-dimensional polyhedron in ZZ^5 defined as the convex hull of 6 vertices """ def plot(self, point=None, line=None, polygon=None, # None means unspecified by the user wireframe='blue', fill='green', position=None, orthonormal=True, # whether to use orthonormal projections **kwds): r""" Return a graphical representation. INPUT: - ``point``, ``line``, ``polygon`` -- Parameters to pass to point (0d), line (1d), and polygon (2d) plot commands. Allowed values are: * A Python dictionary to be passed as keywords to the plot commands. * A string or triple of numbers: The color. This is equivalent to passing the dictionary ``{'color':...}``. * ``False``: Switches off the drawing of the corresponding graphics object - ``wireframe``, ``fill`` -- Similar to ``point``, ``line``, and ``polygon``, but ``fill`` is used for the graphics objects in the dimension of the polytope (or of dimension 2 for higher dimensional polytopes) and ``wireframe`` is used for all lower-dimensional graphics objects (default: 'green' for ``fill`` and 'blue' for ``wireframe``) - ``position`` -- positive number; the position to take the projection point in Schlegel diagrams. - ``orthonormal`` -- Boolean (default: True); whether to use orthonormal projections. - ``**kwds`` -- optional keyword parameters that are passed to all graphics objects. OUTPUT: A (multipart) graphics object. EXAMPLES:: sage: square = polytopes.hypercube(2) sage: point = Polyhedron([[1,1]]) sage: line = Polyhedron([[1,1],[2,1]]) sage: cube = polytopes.hypercube(3) sage: hypercube = polytopes.hypercube(4) By default, the wireframe is rendered in blue and the fill in green:: sage: square.plot() # optional - sage.plot Graphics object consisting of 6 graphics primitives sage: point.plot() # optional - sage.plot Graphics object consisting of 1 graphics primitive sage: line.plot() # optional - sage.plot Graphics object consisting of 2 graphics primitives sage: cube.plot() # optional - sage.plot Graphics3d Object sage: hypercube.plot() # optional - sage.plot Graphics3d Object Draw the lines in red and nothing else:: sage: square.plot(point=False, line='red', polygon=False) # optional - sage.plot Graphics object consisting of 4 graphics primitives sage: point.plot(point=False, line='red', polygon=False) # optional - sage.plot Graphics object consisting of 0 graphics primitives sage: line.plot(point=False, line='red', polygon=False) # optional - sage.plot Graphics object consisting of 1 graphics primitive sage: cube.plot(point=False, line='red', polygon=False) # optional - sage.plot Graphics3d Object sage: hypercube.plot(point=False, line='red', polygon=False) # optional - sage.plot Graphics3d Object Draw points in red, no lines, and a blue polygon:: sage: square.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot Graphics object consisting of 2 graphics primitives sage: point.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot Graphics object consisting of 1 graphics primitive sage: line.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot Graphics object consisting of 1 graphics primitive sage: cube.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot Graphics3d Object sage: hypercube.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot Graphics3d Object If we instead use the ``fill`` and ``wireframe`` options, the coloring depends on the dimension of the object:: sage: square.plot(fill='green', wireframe='red') # optional - sage.plot Graphics object consisting of 6 graphics primitives sage: point.plot(fill='green', wireframe='red') # optional - sage.plot Graphics object consisting of 1 graphics primitive sage: line.plot(fill='green', wireframe='red') # optional - sage.plot Graphics object consisting of 2 graphics primitives sage: cube.plot(fill='green', wireframe='red') # optional - sage.plot Graphics3d Object sage: hypercube.plot(fill='green', wireframe='red') # optional - sage.plot Graphics3d Object It is possible to draw polyhedra up to dimension 4, no matter what the ambient dimension is:: sage: hcube = polytopes.hypercube(5) sage: facet = hcube.facets()[0].as_polyhedron();facet A 4-dimensional polyhedron in ZZ^5 defined as the convex hull of 16 vertices sage: facet.plot() # optional - sage.plot Graphics3d Object TESTS:: sage: for p in square.plot(): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) blue Point set defined by 4 point(s) blue Line defined by 2 points blue Line defined by 2 points blue Line defined by 2 points blue Line defined by 2 points green Polygon defined by 4 points sage: for p in line.plot(): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) blue Point set defined by 2 point(s) green Line defined by 2 points sage: for p in point.plot(): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) green Point set defined by 1 point(s) Draw the lines in red and nothing else:: sage: for p in square.plot(point=False, line='red', polygon=False): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Line defined by 2 points red Line defined by 2 points red Line defined by 2 points red Line defined by 2 points Draw vertices in red, no lines, and a blue polygon:: sage: for p in square.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Point set defined by 4 point(s) (0, 0, 1) Polygon defined by 4 points sage: for p in line.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Point set defined by 2 point(s) sage: for p in point.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Point set defined by 1 point(s) Draw in red without wireframe:: sage: for p in square.plot(wireframe=False, fill="red"): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Polygon defined by 4 points sage: for p in line.plot(wireframe=False, fill="red"): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Line defined by 2 points sage: for p in point.plot(wireframe=False, fill="red"): # optional - sage.plot ....: print("{} {}".format(p.options()['rgbcolor'], p)) red Point set defined by 1 point(s) We try to draw the polytope in 2 or 3 dimensions:: sage: type(Polyhedron(ieqs=[(1,)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(polytopes.hypercube(1).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(polytopes.hypercube(2).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(polytopes.hypercube(3).plot()) # optional - sage.plot <class 'sage.plot.plot3d.base.Graphics3dGroup'> In 4d a projection to 3d is used:: sage: type(polytopes.hypercube(4).plot()) # optional - sage.plot <class 'sage.plot.plot3d.base.Graphics3dGroup'> sage: type(polytopes.hypercube(5).plot()) # optional - sage.plot Traceback (most recent call last): ... NotImplementedError: plotting of 5-dimensional polyhedra not implemented If the polyhedron is not full-dimensional, the :meth:`affine_hull_projection` is used if necessary:: sage: type(Polyhedron([(0,), (1,)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(Polyhedron([(0,0), (1,1)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(Polyhedron([(0,0,0), (1,1,1)]).plot()) # optional - sage.plot <class 'sage.plot.plot3d.base.Graphics3dGroup'> sage: type(Polyhedron([(0,0,0,0), (1,1,1,1)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(Polyhedron([(0,0,0,0,0), (1,1,1,1,1)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> sage: type(Polyhedron([(0,0,0,0), (1,1,1,1), (1,0,0,0)]).plot()) # optional - sage.plot <class 'sage.plot.graphics.Graphics'> TESTS: Check that :trac:`30015` is fixed:: sage: fcube = polytopes.hypercube(4) sage: tfcube = fcube.face_truncation(fcube.faces(0)[0]) sage: sp = tfcube.schlegel_projection() sage: for face in tfcube.faces(2): ....: vertices = face.ambient_Vrepresentation() ....: indices = [sp.coord_index_of(vector(x)) for x in vertices] ....: projected_vertices = [sp.transformed_coords[i] for i in indices] ....: assert Polyhedron(projected_vertices).dim() == 2 """ def merge_options(*opts): merged = dict() for i in range(len(opts)): opt = opts[i] if opt is None: continue elif opt is False: return False elif isinstance(opt, (str, list, tuple)): merged['color'] = opt else: merged.update(opt) return merged d = min(self.dim(), 2) opts = [wireframe] * d + [fill] + [False] * (2-d) # The point/line/polygon options take precedence over wireframe/fill opts = [merge_options(opt1, opt2, kwds) for opt1, opt2 in zip(opts, [point, line, polygon])] def project(polyhedron, ortho): if polyhedron.ambient_dim() <= 3: return polyhedron.projection() elif polyhedron.dim() <= 3: if ortho: return polyhedron.affine_hull_projection(orthonormal=True, extend=True).projection() else: return polyhedron.affine_hull_projection().projection() elif polyhedron.dimension() == 4: # For 4d-polyhedron, we can use schlegel projections: return polyhedron.schlegel_projection(position=position) else: return polyhedron.projection() projection = project(self, orthonormal) try: plot_method = projection.plot except AttributeError: raise NotImplementedError('plotting of {0}-dimensional polyhedra not implemented' .format(self.ambient_dim())) return plot_method(*opts) def show(self, **kwds): r""" Display graphics immediately This method attempts to display the graphics immediately, without waiting for the currently running code (if any) to return to the command line. Be careful, calling it from within a loop will potentially launch a large number of external viewer programs. INPUT: - ``kwds`` -- optional keyword arguments. See :meth:`plot` for the description of available options. OUTPUT: This method does not return anything. Use :meth:`plot` if you want to generate a graphics object that can be saved or further transformed. EXAMPLES:: sage: square = polytopes.hypercube(2) sage: square.show(point='red') # optional - sage.plot """ self.plot(**kwds).show() def tikz(self, view=[0, 0, 1], angle=0, scale=1, edge_color='blue!95!black', facet_color='blue!95!black', opacity=0.8, vertex_color='green', axis=False): r""" Return a string ``tikz_pic`` consisting of a tikz picture of ``self`` according to a projection ``view`` and an angle ``angle`` obtained via the threejs viewer. INPUT: - ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below). - ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note below). - ``scale`` - integer (default: 1) specifying the scaling of the tikz picture. - ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz recognize. - ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz recognize. - ``vertex_color`` - string (default: 'green') representing colors which tikz recognize. - ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of the front facets. - ``axis`` - Boolean (default: False) draw the axes at the origin or not. OUTPUT: - LatexExpr -- containing the TikZ picture. .. NOTE:: This is a wrapper of a method of the projection object `self.projection()`. See :meth:`~sage.geometry.polyhedron.plot.Projection.tikz` for more detail. The inputs ``view`` and ``angle`` can be obtained by visualizing it using ``.show(aspect_ratio=1)``. This will open an interactive view in your default browser, where you can rotate the polytope. Once the desired view angle is found, click on the information icon in the lower right-hand corner and select *Get Viewpoint*. This will copy a string of the form '[x,y,z],angle' to your local clipboard. Go back to Sage and type ``Img = P.tikz([x,y,z],angle)``. The inputs ``view`` and ``angle`` can also be obtained from the viewer Jmol:: 1) Right click on the image 2) Select ``Console`` 3) Select the tab ``State`` 4) Scroll to the line ``moveto`` It reads something like:: moveto 0.0 {x y z angle} Scale The ``view`` is then [x,y,z] and ``angle`` is angle. The following number is the scale. Jmol performs a rotation of ``angle`` degrees along the vector [x,y,z] and show the result from the z-axis. EXAMPLES:: sage: co = polytopes.cuboctahedron() sage: Img = co.tikz([0,0,1], 0) sage: print('\n'.join(Img.splitlines()[:9])) \begin{tikzpicture}% [x={(1.000000cm, 0.000000cm)}, y={(0.000000cm, 1.000000cm)}, z={(0.000000cm, 0.000000cm)}, scale=1.000000, back/.style={loosely dotted, thin}, edge/.style={color=blue!95!black, thick}, facet/.style={fill=blue!95!black,fill opacity=0.800000}, vertex/.style={inner sep=1pt,circle,draw=green!25!black,fill=green!75!black,thick}] sage: print('\n'.join(Img.splitlines()[12:21])) %% with the command: ._tikz_3d_in_3d and parameters: %% view = [0, 0, 1] %% angle = 0 %% scale = 1 %% edge_color = blue!95!black %% facet_color = blue!95!black %% opacity = 0.8 %% vertex_color = green %% axis = False sage: print('\n'.join(Img.splitlines()[22:26])) %% Coordinate of the vertices: %% \coordinate (-1.00000, -1.00000, 0.00000) at (-1.00000, -1.00000, 0.00000); \coordinate (-1.00000, 0.00000, -1.00000) at (-1.00000, 0.00000, -1.00000); """ return self.projection().tikz(view, angle, scale, edge_color, facet_color, opacity, vertex_color, axis) def _rich_repr_(self, display_manager, **kwds): r""" Rich Output Magic Method See :mod:`sage.repl.rich_output` for details. EXAMPLES:: sage: from sage.repl.rich_output import get_display_manager sage: dm = get_display_manager() sage: polytopes.hypercube(2)._rich_repr_(dm) OutputPlainText container The ``supplemental_plot`` preference lets us control whether this object is shown as text or picture+text:: sage: dm.preferences.supplemental_plot 'never' sage: del dm.preferences.supplemental_plot sage: polytopes.hypercube(3) A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices (use the .plot() method to plot) sage: dm.preferences.supplemental_plot = 'never' """ prefs = display_manager.preferences is_small = (self.ambient_dim() <= 2) can_plot = (prefs.supplemental_plot != 'never') plot_graph = can_plot and (prefs.supplemental_plot == 'always' or is_small) # Under certain circumstances we display the plot as graphics if plot_graph: plot_kwds = dict(kwds) plot_kwds.setdefault('title', repr(self)) output = self.plot(**plot_kwds)._rich_repr_(display_manager) if output is not None: return output # create text for non-graphical output if can_plot: text = '{0} (use the .plot() method to plot)'.format(repr(self)) else: text = repr(self) # latex() produces huge tikz environment, override tp = display_manager.types if (prefs.text == 'latex' and tp.OutputLatex in display_manager.supported_output()): return tp.OutputLatex(r'\text{{{0}}}'.format(text)) return tp.OutputPlainText(text) @cached_method def gale_transform(self): r""" Return the Gale transform of a polytope as described in the reference below. OUTPUT: A list of vectors, the Gale transform. The dimension is the dimension of the affine dependencies of the vertices of the polytope. EXAMPLES: This is from the reference, for a triangular prism:: sage: p = Polyhedron(vertices = [[0,0],[0,1],[1,0]]) sage: p2 = p.prism() sage: p2.gale_transform() ((-1, 0), (0, -1), (1, 1), (-1, -1), (1, 0), (0, 1)) REFERENCES: Lectures in Geometric Combinatorics, R.R.Thomas, 2006, AMS Press. .. SEEALSO:: :func`~sage.geometry.polyhedron.library.gale_transform_to_polyhedron`. TESTS:: sage: P = Polyhedron(rays=[[1,0,0]]) sage: P.gale_transform() Traceback (most recent call last): ... ValueError: not a polytope Check that :trac:`29073` is fixed:: sage: P = polytopes.icosahedron(exact=False) sage: sum(P.gale_transform()).norm() < 1e-15 True """ if not self.is_compact(): raise ValueError('not a polytope') A = matrix(self.n_vertices(), [[1]+x for x in self.vertex_generator()]) A = A.transpose() A_ker = A.right_kernel_matrix(basis='computed') return tuple(A_ker.columns()) def _test_gale_transform(self, tester=None, **options): r""" Run tests on the method :meth:`.gale_transform` and its inverse :meth:`~sage.geometry.polyhedron.library.gale_transform_to_polytope`. TESTS:: sage: polytopes.cross_polytope(3)._test_gale_transform() """ if tester is None: tester = self._tester(**options) if not self.is_compact(): with tester.assertRaises(ValueError): self.gale_transform() return # Check :trac:`29073`. if not self.base_ring().is_exact() and self.ambient_dim() > 0: g = self.gale_transform() tester.assertTrue(sum(g).norm() < 1e-10 or sum(g).norm()/matrix(g).norm() < 1e-13) return # Prevent very long doctests. if self.n_vertices() + self.n_rays() > 50 or self.n_facets() > 50: return if not self.is_empty(): # ``gale_transform_to_polytope`` needs at least one vertex to work. from sage.geometry.polyhedron.library import gale_transform_to_polytope g = self.gale_transform() P = gale_transform_to_polytope(g, base_ring=self.base_ring(), backend=self.backend()) try: import sage.graphs.graph except ImportError: pass else: tester.assertTrue(self.is_combinatorially_isomorphic(P)) def projection(self, projection=None): r""" Return a projection object. INPUT: - ``proj`` -- a projection function OUTPUT: The identity projection. This is useful for plotting polyhedra. .. SEEALSO:: :meth:`~sage.geometry.polyhedron.base.Polyhedron_base.schlegel_projection` for a more interesting projection. EXAMPLES:: sage: p = polytopes.hypercube(3) sage: proj = p.projection() sage: proj The projection of a polyhedron into 3 dimensions """ from .plot import Projection if projection is not None: self.projection = Projection(self, projection) else: self.projection = Projection(self) return self.projection def render_solid(self, **kwds): r""" Return a solid rendering of a 2- or 3-d polytope. EXAMPLES:: sage: p = polytopes.hypercube(3) sage: p_solid = p.render_solid(opacity = .7) sage: type(p_solid) <class 'sage.plot.plot3d.index_face_set.IndexFaceSet'> """ proj = self.projection() if self.ambient_dim() == 3: return proj.render_solid_3d(**kwds) if self.ambient_dim() == 2: return proj.render_fill_2d(**kwds) raise ValueError("render_solid is only defined for 2 and 3 dimensional polyhedra") def render_wireframe(self, **kwds): r""" For polytopes in 2 or 3 dimensions, return the edges as a list of lines. EXAMPLES:: sage: p = Polyhedron([[1,2,],[1,1],[0,0]]) sage: p_wireframe = p.render_wireframe() sage: p_wireframe._objects [Line defined by 2 points, Line defined by 2 points, Line defined by 2 points] """ proj = self.projection() if self.ambient_dim() == 3: return proj.render_wireframe_3d(**kwds) if self.ambient_dim() == 2: return proj.render_outline_2d(**kwds) raise ValueError("render_wireframe is only defined for 2 and 3 dimensional polyhedra") def schlegel_projection(self, facet=None, position=None): r""" Return the Schlegel projection. * The facet is orthonormally transformed into its affine hull. * The position specifies a point coming out of the barycenter of the facet from which the other vertices will be projected into the facet. INPUT: - ``facet`` -- a PolyhedronFace. The facet into which the Schlegel diagram is created. The default is the first facet. - ``position`` -- a positive number. Determines a relative distance from the barycenter of ``facet``. A value close to 0 will place the projection point close to the facet and a large value further away. Default is `1`. If the given value is too large, an error is returned. OUTPUT: A :class:`~sage.geometry.polyhedron.plot.Projection` object. EXAMPLES:: sage: p = polytopes.hypercube(3) sage: sch_proj = p.schlegel_projection() sage: schlegel_edge_indices = sch_proj.lines sage: schlegel_edges = [sch_proj.coordinates_of(x) for x in schlegel_edge_indices] sage: len([x for x in schlegel_edges if x[0][0] > 0]) 8 The Schlegel projection preserves the convexity of facets, see :trac:`30015`:: sage: fcube = polytopes.hypercube(4) sage: tfcube = fcube.face_truncation(fcube.faces(0)[0]) sage: tfcube.facets()[-1] A 3-dimensional face of a Polyhedron in QQ^4 defined as the convex hull of 8 vertices sage: sp = tfcube.schlegel_projection(tfcube.facets()[-1]) sage: sp.plot() # optional - sage.plot Graphics3d Object The same truncated cube but see inside the tetrahedral facet:: sage: tfcube.facets()[4] A 3-dimensional face of a Polyhedron in QQ^4 defined as the convex hull of 4 vertices sage: sp = tfcube.schlegel_projection(tfcube.facets()[4]) sage: sp.plot() # optional - sage.plot Graphics3d Object A different values of ``position`` changes the projection:: sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],1/2) sage: sp.plot() # optional - sage.plot Graphics3d Object sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],4) sage: sp.plot() # optional - sage.plot Graphics3d Object A value which is too large give a projection point that sees more than one facet resulting in a error:: sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],5) Traceback (most recent call last): ... ValueError: the chosen position is too large """ proj = self.projection() return proj.schlegel(facet, position) def affine_hull(self, *args, **kwds): r""" Return the affine hull of ``self`` as a polyhedron. EXAMPLES:: sage: half_plane_in_space = Polyhedron(ieqs=[(0,1,0,0)], eqns=[(0,0,0,1)]) sage: half_plane_in_space.affine_hull().Hrepresentation() (An equation (0, 0, 1) x + 0 == 0,) sage: polytopes.cube().affine_hull().is_universe() True """ if args or kwds: raise TypeError("the method 'affine_hull' does not take any parameters; perhaps you meant 'affine_hull_projection'") if not self.inequalities(): return self self_as_face = self.faces(self.dimension())[0] return self_as_face.affine_tangent_cone() @cached_method def _affine_hull_projection(self, *, as_convex_set=True, as_affine_map=True, as_section_map=True, orthogonal=False, orthonormal=False, extend=False, minimal=False): r""" Return ``self`` projected into its affine hull. INPUT: See :meth:`affine_hull_projection`. OUTPUT: An instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData`. See :meth:`affine_hull_projection` for details. TESTS: Check that :trac:`23355` is fixed:: sage: P = Polyhedron([[7]]); P A 0-dimensional polyhedron in ZZ^1 defined as the convex hull of 1 vertex sage: P.affine_hull_projection() A 0-dimensional polyhedron in ZZ^0 defined as the convex hull of 1 vertex sage: P.affine_hull_projection(orthonormal='True') A 0-dimensional polyhedron in QQ^0 defined as the convex hull of 1 vertex sage: P.affine_hull_projection(orthogonal='True') A 0-dimensional polyhedron in QQ^0 defined as the convex hull of 1 vertex Check that :trac:`24047` is fixed:: sage: P1 = Polyhedron(vertices=([[-1, 1], [0, -1], [0, 0], [-1, -1]])) sage: P2 = Polyhedron(vertices=[[1, 1], [1, -1], [0, -1], [0, 0]]) sage: P = P1.intersection(P2) sage: A, b = P.affine_hull_projection(as_affine_map=True, orthonormal=True, extend=True) # optional - sage.rings.number_field sage: Polyhedron([(2,3,4)]).affine_hull_projection() A 0-dimensional polyhedron in ZZ^0 defined as the convex hull of 1 vertex Check that backend is preserved:: sage: polytopes.simplex(backend='field').affine_hull_projection().backend() 'field' sage: P = Polyhedron(vertices=[[0,0], [1,0]], backend='field') sage: P.affine_hull_projection(orthogonal=True, orthonormal=True, extend=True).backend() # optional - sage.rings.number_field 'field' Check that :trac:`29116` is fixed:: sage: V =[ ....: [1, 0, -1, 0, 0], ....: [1, 0, 0, -1, 0], ....: [1, 0, 0, 0, -1], ....: [1, 0, 0, +1, 0], ....: [1, 0, 0, 0, +1], ....: [1, +1, 0, 0, 0] ....: ] sage: P = Polyhedron(V) sage: P.affine_hull_projection() A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 6 vertices sage: P.affine_hull_projection(orthonormal=True) Traceback (most recent call last): ... ValueError: the base ring needs to be extended; try with "extend=True" sage: P.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field A 4-dimensional polyhedron in AA^4 defined as the convex hull of 6 vertices """ result = AffineHullProjectionData() if self.is_empty(): raise ValueError('affine hull projection of an empty polyhedron is undefined') # handle trivial full-dimensional case if self.ambient_dim() == self.dim(): if as_convex_set: result.image = self if as_affine_map: identity = linear_transformation(matrix(self.base_ring(), self.dim(), self.dim(), self.base_ring().one())) result.projection_linear_map = result.section_linear_map = identity result.projection_translation = result.section_translation = self.ambient_space().zero() elif orthogonal or orthonormal: # see TODO if not self.is_compact(): raise NotImplementedError('"orthogonal=True" and "orthonormal=True" work only for compact polyhedra') affine_basis = self.an_affine_basis() v0 = affine_basis[0].vector() # We implicitly translate the first vertex of the affine basis to zero. vi = tuple(v.vector() - v0 for v in affine_basis[1:]) M = matrix(self.base_ring(), self.dim(), self.ambient_dim(), vi) # Switch base_ring to AA if necessary, # since gram_schmidt needs to be able to take square roots. # Pick orthonormal basis and transform all vertices accordingly # if the orthonormal transform makes it necessary, change base ring. try: A, G = M.gram_schmidt(orthonormal=orthonormal) except TypeError: if not extend: raise ValueError('the base ring needs to be extended; try with "extend=True"') M = matrix(AA, M) A = M.gram_schmidt(orthonormal=orthonormal)[0] if minimal: from sage.rings.qqbar import number_field_elements_from_algebraics new_ring = number_field_elements_from_algebraics(A.list(), embedded=True, minimal=True)[0] A = A.change_ring(new_ring) L = linear_transformation(A, side='right') ambient_translation = -vector(A.base_ring(), affine_basis[0]) image_translation = A * ambient_translation # Note the order. We compute ``A*self`` and then translate the image. # ``A*self`` uses the incidence matrix and we avoid recomputation. # Also, if the new base ring is ``AA``, we want to avoid computing the incidence matrix in that ring. # ``convert=True`` takes care of the case, where there might be no coercion (``AA`` and quadratic field). if as_convex_set: result.image = self.linear_transformation(A, new_base_ring=A.base_ring()) + image_translation if as_affine_map: result.projection_linear_map = L result.projection_translation = image_translation if as_section_map: L_dagger = linear_transformation(A.transpose() * (A * A.transpose()).inverse(), side='right') result.section_linear_map = L_dagger result.section_translation = v0.change_ring(A.base_ring()) else: # translate one vertex to the origin v0 = self.vertices()[0].vector() gens = [] for v in self.vertices()[1:]: gens.append(v.vector() - v0) for r in self.rays(): gens.append(r.vector()) for l in self.lines(): gens.append(l.vector()) # Pick subset of coordinates to coordinatize the affine span M = matrix(gens) pivots = M.pivots() A = matrix(self.base_ring(), len(pivots), self.ambient_dim(), [[1 if j == i else 0 for j in range(self.ambient_dim())] for i in pivots]) if as_affine_map: image_translation = vector(self.base_ring(), self.dim()) L = linear_transformation(A, side='right') result.projection_linear_map = L result.projection_translation = image_translation if as_convex_set: result.image = A*self if as_section_map: if self.dim(): B = M.transpose()/(A*M.transpose()) else: B = matrix(self.ambient_dim(), 0) L_section = linear_transformation(B, side='right') result.section_linear_map = L_section result.section_translation = v0 - L_section(L(v0) + image_translation) return result def affine_hull_projection(self, as_polyhedron=None, as_affine_map=False, orthogonal=False, orthonormal=False, extend=False, minimal=False, return_all_data=False, *, as_convex_set=None): r""" Return the polyhedron projected into its affine hull. Each polyhedron is contained in some smallest affine subspace (possibly the entire ambient space) -- its affine hull. We provide an affine linear map that projects the ambient space of the polyhedron to the standard Euclidean space of dimension of the polyhedron, which restricts to a bijection from the affine hull. The projection map is not unique; some parameters control the choice of the map. Other parameters control the output of the function. INPUT: - ``as_polyhedron`` (or ``as_convex_set``) -- (boolean or the default ``None``) and - ``as_affine_map`` -- (boolean, default ``False``) control the output The default ``as_polyhedron=None`` translates to ``as_polyhedron=not as_affine_map``, therefore to ``as_polyhedron=True`` if nothing is specified. If exactly one of either ``as_polyhedron`` or ``as_affine_map`` is set, then either a polyhedron or the affine transformation is returned. The affine transformation sends the embedded polytope to a fulldimensional one. It is given as a pair ``(A, b)``, where A is a linear transformation and `b` is a vector, and the affine transformation sends ``v`` to ``A(v)+b``. If both ``as_polyhedron`` and ``as_affine_map`` are set, then both are returned, encapsulated in an instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData`. - ``return_all_data`` -- (boolean, default ``False``) If set, then ``as_polyhedron`` and ``as_affine_map`` will set (possibly overridden) and additional (internal) data concerning the transformation is returned. Everything is encapsulated in an instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData` in this case. - ``orthogonal`` -- boolean (default: ``False``); if ``True``, provide an orthogonal transformation. - ``orthonormal`` -- boolean (default: ``False``); if ``True``, provide an orthonormal transformation. If the base ring does not provide the necessary square roots, the extend parameter needs to be set to ``True``. - ``extend`` -- boolean (default: ``False``); if ``True``, allow base ring to be extended if necessary. This becomes relevant when requiring an orthonormal transformation. - ``minimal`` -- boolean (default: ``False``); if ``True``, when doing an extension, it computes the minimal base ring of the extension, otherwise the base ring is ``AA``. OUTPUT: A full-dimensional polyhedron or an affine transformation, depending on the parameters ``as_polyhedron`` and ``as_affine_map``, or an instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData` containing all data (parameter ``return_all_data``). If the output is an instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData`, the following fields may be set: - ``image`` -- the projection of the original polyhedron - ``projection_map`` -- the affine map as a pair whose first component is a linear transformation and its second component a shift; see above. - ``section_map`` -- an affine map as a pair whose first component is a linear transformation and its second component a shift. It maps the codomain of ``affine_map`` to the affine hull of ``self``. It is a right inverse of ``projection_map``. Note that all of these data are compatible. .. TODO:: - make the parameters ``orthogonal`` and ``orthonormal`` work with unbounded polyhedra. EXAMPLES:: sage: triangle = Polyhedron([(1,0,0), (0,1,0), (0,0,1)]); triangle A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices sage: triangle.affine_hull_projection() A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices sage: half3d = Polyhedron(vertices=[(3,2,1)], rays=[(1,0,0)]) sage: half3d.affine_hull_projection().Vrepresentation() (A ray in the direction (1), A vertex at (3)) The resulting affine hulls depend on the parameter ``orthogonal`` and ``orthonormal``:: sage: L = Polyhedron([[1,0],[0,1]]); L A 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices sage: A = L.affine_hull_projection(); A A 1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices sage: A.vertices() (A vertex at (0), A vertex at (1)) sage: A = L.affine_hull_projection(orthogonal=True); A A 1-dimensional polyhedron in QQ^1 defined as the convex hull of 2 vertices sage: A.vertices() (A vertex at (0), A vertex at (2)) sage: A = L.affine_hull_projection(orthonormal=True) # optional - sage.rings.number_field Traceback (most recent call last): ... ValueError: the base ring needs to be extended; try with "extend=True" sage: A = L.affine_hull_projection(orthonormal=True, extend=True); A # optional - sage.rings.number_field A 1-dimensional polyhedron in AA^1 defined as the convex hull of 2 vertices sage: A.vertices() # optional - sage.rings.number_field (A vertex at (1.414213562373095?), A vertex at (0.?e-18)) More generally:: sage: S = polytopes.simplex(); S A 3-dimensional polyhedron in ZZ^4 defined as the convex hull of 4 vertices sage: S.vertices() (A vertex at (0, 0, 0, 1), A vertex at (0, 0, 1, 0), A vertex at (0, 1, 0, 0), A vertex at (1, 0, 0, 0)) sage: A = S.affine_hull_projection(); A A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices sage: A.vertices() (A vertex at (0, 0, 0), A vertex at (0, 0, 1), A vertex at (0, 1, 0), A vertex at (1, 0, 0)) sage: A = S.affine_hull_projection(orthogonal=True); A A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices sage: A.vertices() (A vertex at (0, 0, 0), A vertex at (2, 0, 0), A vertex at (1, 3/2, 0), A vertex at (1, 1/2, 4/3)) sage: A = S.affine_hull_projection(orthonormal=True, extend=True); A A 3-dimensional polyhedron in AA^3 defined as the convex hull of 4 vertices sage: A.vertices() (A vertex at (0.7071067811865475?, 0.4082482904638630?, 1.154700538379252?), A vertex at (0.7071067811865475?, 1.224744871391589?, 0.?e-18), A vertex at (1.414213562373095?, 0.?e-18, 0.?e-18), A vertex at (0.?e-18, 0.?e-18, 0.?e-18)) With the parameter ``minimal`` one can get a minimal base ring:: sage: s = polytopes.simplex(3) sage: s_AA = s.affine_hull_projection(orthonormal=True, extend=True) sage: s_AA.base_ring() Algebraic Real Field sage: s_full = s.affine_hull_projection(orthonormal=True, extend=True, minimal=True) sage: s_full.base_ring() Number Field in a with defining polynomial y^4 - 4*y^2 + 1 with a = 0.5176380902050415? More examples with the ``orthonormal`` parameter:: sage: P = polytopes.permutahedron(3); P # optional - sage.combinat # optional - sage.rings.number_field A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices sage: set([F.as_polyhedron().affine_hull_projection(orthonormal=True, extend=True).volume() for F in P.affine_hull_projection().faces(1)]) == {1, sqrt(AA(2))} # optional - sage.combinat # optional - sage.rings.number_field True sage: set([F.as_polyhedron().affine_hull_projection(orthonormal=True, extend=True).volume() for F in P.affine_hull_projection(orthonormal=True, extend=True).faces(1)]) == {sqrt(AA(2))} # optional - sage.combinat # optional - sage.rings.number_field True sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field sage: F = D.faces(2)[0].as_polyhedron() # optional - sage.rings.number_field sage: F.affine_hull_projection(orthogonal=True) # optional - sage.rings.number_field A 2-dimensional polyhedron in (Number Field in sqrt5 with defining polynomial x^2 - 5 with sqrt5 = 2.236067977499790?)^2 defined as the convex hull of 5 vertices sage: F.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field A 2-dimensional polyhedron in AA^2 defined as the convex hull of 5 vertices sage: K.<sqrt2> = QuadraticField(2) # optional - sage.rings.number_field sage: P = Polyhedron([2*[K.zero()],2*[sqrt2]]); P # optional - sage.rings.number_field A 1-dimensional polyhedron in (Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?)^2 defined as the convex hull of 2 vertices sage: P.vertices() # optional - sage.rings.number_field (A vertex at (0, 0), A vertex at (sqrt2, sqrt2)) sage: A = P.affine_hull_projection(orthonormal=True); A # optional - sage.rings.number_field A 1-dimensional polyhedron in (Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?)^1 defined as the convex hull of 2 vertices sage: A.vertices() # optional - sage.rings.number_field (A vertex at (0), A vertex at (2)) sage: K.<sqrt3> = QuadraticField(3) # optional - sage.rings.number_field sage: P = Polyhedron([2*[K.zero()],2*[sqrt3]]); P # optional - sage.rings.number_field A 1-dimensional polyhedron in (Number Field in sqrt3 with defining polynomial x^2 - 3 with sqrt3 = 1.732050807568878?)^2 defined as the convex hull of 2 vertices sage: P.vertices() # optional - sage.rings.number_field (A vertex at (0, 0), A vertex at (sqrt3, sqrt3)) sage: A = P.affine_hull_projection(orthonormal=True) # optional - sage.rings.number_field Traceback (most recent call last): ... ValueError: the base ring needs to be extended; try with "extend=True" sage: A = P.affine_hull_projection(orthonormal=True, extend=True); A # optional - sage.rings.number_field A 1-dimensional polyhedron in AA^1 defined as the convex hull of 2 vertices sage: A.vertices() # optional - sage.rings.number_field (A vertex at (0), A vertex at (2.449489742783178?)) sage: sqrt(6).n() # optional - sage.rings.number_field 2.44948974278318 The affine hull is combinatorially equivalent to the input:: sage: P.is_combinatorially_isomorphic(P.affine_hull_projection()) # optional - sage.rings.number_field True sage: P.is_combinatorially_isomorphic(P.affine_hull_projection(orthogonal=True)) # optional - sage.rings.number_field True sage: P.is_combinatorially_isomorphic(P.affine_hull_projection(orthonormal=True, extend=True)) # optional - sage.rings.number_field True The ``orthonormal=True`` parameter preserves volumes; it provides an isometric copy of the polyhedron:: sage: Pentagon = polytopes.dodecahedron().faces(2)[0].as_polyhedron() # optional - sage.rings.number_field sage: P = Pentagon.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field sage: _, c= P.is_inscribed(certificate=True) # optional - sage.rings.number_field sage: c # optional - sage.rings.number_field (0.4721359549995794?, 0.6498393924658126?) sage: circumradius = (c-vector(P.vertices()[0])).norm() # optional - sage.rings.number_field sage: p = polytopes.regular_polygon(5) # optional - sage.rings.number_field sage: p.volume() # optional - sage.rings.number_field 2.377641290737884? sage: P.volume() # optional - sage.rings.number_field 1.53406271079097? sage: p.volume()*circumradius^2 # optional - sage.rings.number_field 1.534062710790965? sage: P.volume() == p.volume()*circumradius^2 # optional - sage.rings.number_field True One can also use ``orthogonal`` parameter to calculate volumes; in this case we don't need to switch base rings. One has to divide by the square root of the determinant of the linear part of the affine transformation times its transpose:: sage: Pentagon = polytopes.dodecahedron().faces(2)[0].as_polyhedron() # optional - sage.rings.number_field sage: Pnormal = Pentagon.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field sage: Pgonal = Pentagon.affine_hull_projection(orthogonal=True) # optional - sage.rings.number_field sage: A, b = Pentagon.affine_hull_projection(orthogonal=True, as_affine_map=True) # optional - sage.rings.number_field sage: Adet = (A.matrix().transpose()*A.matrix()).det() # optional - sage.rings.number_field sage: Pnormal.volume() # optional - sage.rings.number_field 1.53406271079097? sage: Pgonal.volume()/Adet.sqrt(extend=True) # optional - sage.rings.number_field -80*(55*sqrt(5) - 123)/sqrt(-6368*sqrt(5) + 14240) sage: Pgonal.volume()/AA(Adet).sqrt().n(digits=20) # optional - sage.rings.number_field 1.5340627107909646813 sage: AA(Pgonal.volume()^2) == (Pnormal.volume()^2)*AA(Adet) # optional - sage.rings.number_field True Another example with ``as_affine_map=True``:: sage: P = polytopes.permutahedron(4) # optional - sage.combinat # optional - sage.rings.number_field sage: A, b = P.affine_hull_projection(orthonormal=True, as_affine_map=True, extend=True) # optional - sage.combinat # optional - sage.rings.number_field sage: Q = P.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.combinat # optional - sage.rings.number_field sage: Q.center() # optional - sage.combinat # optional - sage.rings.number_field (0.7071067811865475?, 1.224744871391589?, 1.732050807568878?) sage: A(P.center()) + b == Q.center() # optional - sage.combinat # optional - sage.rings.number_field True For unbounded, non full-dimensional polyhedra, the ``orthogonal=True`` and ``orthonormal=True`` is not implemented:: sage: P = Polyhedron(ieqs=[[0, 1, 0], [0, 0, 1], [0, 0, -1]]); P A 1-dimensional polyhedron in QQ^2 defined as the convex hull of 1 vertex and 1 ray sage: P.is_compact() False sage: P.is_full_dimensional() False sage: P.affine_hull_projection(orthogonal=True) Traceback (most recent call last): ... NotImplementedError: "orthogonal=True" and "orthonormal=True" work only for compact polyhedra sage: P.affine_hull_projection(orthonormal=True) Traceback (most recent call last): ... NotImplementedError: "orthogonal=True" and "orthonormal=True" work only for compact polyhedra Setting ``as_affine_map`` to ``True`` without ``orthogonal`` or ``orthonormal`` set to ``True``:: sage: S = polytopes.simplex() sage: S.affine_hull_projection(as_affine_map=True) (Vector space morphism represented by the matrix: [1 0 0] [0 1 0] [0 0 1] [0 0 0] Domain: Vector space of dimension 4 over Rational Field Codomain: Vector space of dimension 3 over Rational Field, (0, 0, 0)) If the polyhedron is full-dimensional, it is returned:: sage: polytopes.cube().affine_hull_projection() A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices sage: polytopes.cube().affine_hull_projection(as_affine_map=True) (Vector space morphism represented by the matrix: [1 0 0] [0 1 0] [0 0 1] Domain: Vector space of dimension 3 over Rational Field Codomain: Vector space of dimension 3 over Rational Field, (0, 0, 0)) Return polyhedron and affine map:: sage: S = polytopes.simplex(2) sage: data = S.affine_hull_projection(orthogonal=True, ....: as_polyhedron=True, ....: as_affine_map=True); data AffineHullProjectionData(image=A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices, projection_linear_map=Vector space morphism represented by the matrix: [ -1 -1/2] [ 1 -1/2] [ 0 1] Domain: Vector space of dimension 3 over Rational Field Codomain: Vector space of dimension 2 over Rational Field, projection_translation=(1, 1/2), section_linear_map=None, section_translation=None) Return all data:: sage: data = S.affine_hull_projection(orthogonal=True, return_all_data=True); data AffineHullProjectionData(image=A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices, projection_linear_map=Vector space morphism represented by the matrix: [ -1 -1/2] [ 1 -1/2] [ 0 1] Domain: Vector space of dimension 3 over Rational Field Codomain: Vector space of dimension 2 over Rational Field, projection_translation=(1, 1/2), section_linear_map=Vector space morphism represented by the matrix: [-1/2 1/2 0] [-1/3 -1/3 2/3] Domain: Vector space of dimension 2 over Rational Field Codomain: Vector space of dimension 3 over Rational Field, section_translation=(1, 0, 0)) The section map is a right inverse of the projection map:: sage: data.image.linear_transformation(data.section_linear_map.matrix().transpose()) + data.section_translation == S True Same without ``orthogonal=True``:: sage: data = S.affine_hull_projection(return_all_data=True); data AffineHullProjectionData(image=A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices, projection_linear_map=Vector space morphism represented by the matrix: [1 0] [0 1] [0 0] Domain: Vector space of dimension 3 over Rational Field Codomain: Vector space of dimension 2 over Rational Field, projection_translation=(0, 0), section_linear_map=Vector space morphism represented by the matrix: [ 1 0 -1] [ 0 1 -1] Domain: Vector space of dimension 2 over Rational Field Codomain: Vector space of dimension 3 over Rational Field, section_translation=(0, 0, 1)) sage: data.image.linear_transformation(data.section_linear_map.matrix().transpose()) + data.section_translation == S True :: sage: P0 = Polyhedron( ....: ieqs=[(0, -1, 0, 1, 1, 1), (0, 1, 1, 0, -1, -1), (0, -1, 1, 1, 0, 0), ....: (0, 1, 0, 0, 0, 0), (0, 0, 1, 1, -1, -1), (0, 0, 0, 0, 0, 1), ....: (0, 0, 0, 0, 1, 0), (0, 0, 0, 1, 0, -1), (0, 0, 1, 0, 0, 0)]) sage: P = P0.intersection(Polyhedron(eqns=[(-1, 1, 1, 1, 1, 1)])) sage: P.dim() 4 sage: P.affine_hull_projection(orthogonal=True, as_affine_map=True)[0] Vector space morphism represented by the matrix: [ 0 0 0 1/3] [ -2/3 -1/6 0 -1/12] [ 1/3 -1/6 1/2 -1/12] [ 0 1/2 0 -1/12] [ 1/3 -1/6 -1/2 -1/12] Domain: Vector space of dimension 5 over Rational Field Codomain: Vector space of dimension 4 over Rational Field """ if as_polyhedron is not None: as_convex_set = as_polyhedron return super().affine_hull_projection( as_convex_set=as_convex_set, as_affine_map=as_affine_map, orthogonal=orthogonal, orthonormal=orthonormal, extend=extend, minimal=minimal, return_all_data=return_all_data) def _test_affine_hull_projection(self, tester=None, verbose=False, **options): r""" Run tests on the method :meth:`.affine_hull_projection`. TESTS:: sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field sage: D.facets()[0].as_polyhedron()._test_affine_hull_projection() # optional - sage.rings.number_field """ if tester is None: tester = self._tester(**options) if self.is_empty(): # Undefined, nothing to test return if self.n_vertices() > 30 or self.n_facets() > 30 or self.dim() > 6: # Avoid very long doctests. return data_sets = [None]*4 data_sets[0] = self.affine_hull_projection(return_all_data=True) if self.is_compact(): data_sets[1] = self.affine_hull_projection(return_all_data=True, orthogonal=True, extend=True) data_sets[2] = self.affine_hull_projection(return_all_data=True, orthonormal=True, extend=True) data_sets[3] = self.affine_hull_projection(return_all_data=True, orthonormal=True, extend=True, minimal=True) else: data_sets = data_sets[:1] for i, data in enumerate(data_sets): if verbose: print("Running test number {}".format(i)) M = data.projection_linear_map.matrix().transpose() tester.assertEqual(self.linear_transformation(M, new_base_ring=M.base_ring()) + data.projection_translation, data.image) M = data.section_linear_map.matrix().transpose() if M.base_ring() is AA: self_extend = self.change_ring(AA) else: self_extend = self tester.assertEqual(data.image.linear_transformation(M) + data.section_translation, self_extend) if i == 0: tester.assertEqual(data.image.base_ring(), self.base_ring()) else: # Test whether the map is orthogonal. M = data.projection_linear_map.matrix() tester.assertTrue((M.transpose() * M).is_diagonal()) if i > 1: # Test whether the map is orthonormal. tester.assertTrue((M.transpose() * M).is_one()) if i == 3: # Test that the extension is indeed minimal. if self.base_ring() is not AA: tester.assertIsNot(data.image.base_ring(), AA) def affine_hull_manifold(self, name=None, latex_name=None, start_index=0, ambient_space=None, ambient_chart=None, names=None, **kwds): r""" Return the affine hull of ``self`` as a manifold. If ``self`` is full-dimensional, it is just the ambient Euclidean space. Otherwise, it is a Riemannian submanifold of the ambient Euclidean space. INPUT: - ``ambient_space`` -- a :class:`~sage.manifolds.differentiable.examples.euclidean.EuclideanSpace` of the ambient dimension (default: the manifold of ``ambient_chart``, if provided; otherwise, a new instance of ``EuclideanSpace``). - ``ambient_chart`` -- a chart on ``ambient_space``. - ``names`` -- names for the coordinates on the affine hull. - optional arguments accepted by :meth:`affine_hull_projection`. The default chart is determined by the optional arguments of :meth:`affine_hull_projection`. EXAMPLES:: sage: triangle = Polyhedron([(1,0,0), (0,1,0), (0,0,1)]); triangle A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices sage: A = triangle.affine_hull_manifold(name='A'); A 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3 sage: A.embedding().display() A → E^3 (x0, x1) ↦ (x, y, z) = (t0 + x0, t0 + x1, t0 - x0 - x1 + 1) sage: A.embedding().inverse().display() E^3 → A (x, y, z) ↦ (x0, x1) = (x, y) sage: A.adapted_chart() [Chart (E^3, (x0_E3, x1_E3, t0_E3))] sage: A.normal().display() n = 1/3*sqrt(3) e_x + 1/3*sqrt(3) e_y + 1/3*sqrt(3) e_z sage: A.induced_metric() # Need to call this before volume_form Riemannian metric gamma on the 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3 sage: A.volume_form() 2-form eps_gamma on the 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3 Orthogonal version:: sage: A = triangle.affine_hull_manifold(name='A', orthogonal=True); A 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3 sage: A.embedding().display() A → E^3 (x0, x1) ↦ (x, y, z) = (t0 - 1/2*x0 - 1/3*x1 + 1, t0 + 1/2*x0 - 1/3*x1, t0 + 2/3*x1) sage: A.embedding().inverse().display() E^3 → A (x, y, z) ↦ (x0, x1) = (-x + y + 1, -1/2*x - 1/2*y + z + 1/2) Arrangement of affine hull of facets:: sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field sage: E3 = EuclideanSpace(3) # optional - sage.rings.number_field sage: submanifolds = [ # optional - sage.rings.number_field ....: F.as_polyhedron().affine_hull_manifold(name=f'F{i}', orthogonal=True, ambient_space=E3) ....: for i, F in enumerate(D.facets())] sage: sum(FM.plot({}, srange(-2, 2, 0.1), srange(-2, 2, 0.1), opacity=0.2) # not tested # optional - sage.plot # optional - sage.rings.number_field ....: for FM in submanifolds) + D.plot() Graphics3d Object Full-dimensional case:: sage: cube = polytopes.cube(); cube A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices sage: cube.affine_hull_manifold() Euclidean space E^3 """ if ambient_space is None: if ambient_chart is not None: ambient_space = ambient_chart.manifold() else: from sage.manifolds.differentiable.examples.euclidean import EuclideanSpace ambient_space = EuclideanSpace(self.ambient_dim(), start_index=start_index) if ambient_space.dimension() != self.ambient_dim(): raise ValueError('ambient_space and ambient_chart must match the ambient dimension') if self.is_full_dimensional(): return ambient_space if ambient_chart is None: ambient_chart = ambient_space.default_chart() CE = ambient_chart from sage.manifolds.manifold import Manifold if name is None: name, latex_name = self._affine_hull_name_latex_name() H = Manifold(self.dim(), name, ambient=ambient_space, structure="Riemannian", latex_name=latex_name, start_index=start_index) if names is None: names = tuple(f'x{i}' for i in range(self.dim())) CH = H.chart(names=names) data = self.affine_hull_projection(return_all_data=True, **kwds) projection_matrix = data.projection_linear_map.matrix().transpose() projection_translation_vector = data.projection_translation section_matrix = data.section_linear_map.matrix().transpose() section_translation_vector = data.section_translation from sage.symbolic.ring import SR # We use the slacks of the (linear independent) equations as the foliation parameters foliation_parameters = vector(SR.var(f't{i}') for i in range(self.ambient_dim() - self.dim())) normal_matrix = matrix(equation.A() for equation in self.equation_generator()).transpose() slack_matrix = normal_matrix.pseudoinverse() phi = H.diff_map(ambient_space, {(CH, CE): (section_matrix * vector(CH._xx) + section_translation_vector + normal_matrix * foliation_parameters).list()}) phi_inv = ambient_space.diff_map(H, {(CE, CH): (projection_matrix * vector(CE._xx) + projection_translation_vector).list()}) foliation_scalar_fields = {parameter: ambient_space.scalar_field({CE: slack_matrix.row(i) * (vector(CE._xx) - section_translation_vector)}) for i, parameter in enumerate(foliation_parameters)} H.set_embedding(phi, inverse=phi_inv, var=list(foliation_parameters), t_inverse=foliation_scalar_fields) return H def _affine_hull_name_latex_name(self, name=None, latex_name=None): r""" Return the default name of the affine hull. EXAMPLES:: sage: polytopes.cube()._affine_hull_name_latex_name('C', r'\square') ('aff_C', '\\mathop{\\mathrm{aff}}(\\square)') sage: Polyhedron(vertices=[[0, 1], [1, 0]])._affine_hull_name_latex_name() ('aff_P', '\\mathop{\\mathrm{aff}}(P)') """ if name is None: name = 'P' if latex_name is None: latex_name = name operator = 'aff' aff_name = f'{operator}_{name}' aff_latex_name = r'\mathop{\mathrm{' + operator + '}}(' + latex_name + ')' return aff_name, aff_latex_name
nilq/baby-python
python