text
stringlengths
38
1.54M
#!/usr/bin/env python """Dagor tests 0.2 Usage: test.py (ha | de) transmission from <angle_0> to <angle_1> speed <S> increment <I> test.py [-h | --help | help] test.py --version Commands: transmission Test motor pulse to IK220 encoder ratio. Options: -h --help Show this screen. --quiet Only data on stdout --space=<file> File defining allowed space region [default: space.txt] """ from __future__ import division from time import sleep from docopt import docopt import sys import math from common import exit_, wait_for_stop, EnterAbort import position def _init(): pass def _transmition(axes, start, finish, speed, increment): import motors motors.init() if axes == 'ha': motor = motors._ha elif axes == 'de': motor = motors._de else: raise ValueError('Invalid axes!') current = position.get_internal()[axes] reverse = start > finish gear_deadzone = -500000 increment = math.fabs(increment) if reverse: increment *= -1 gear_deadzone *= -1 delta = start - current motor.Task1_Speed = 3000 sys.stdout.write("TRANSMISSION TEST\n") sys.stdout.write("Press Enter to abort\n") sys.stdout.write("moving to start position") sys.stdout.flush() motor.move_delta(delta, case=axes) wait_for_stop(motor, dots=True, after=1, skip_dots=5, enter_abort=True) sys.stdout.write("\naligning gears") sys.stdout.flush() motor.move_delta(-gear_deadzone) wait_for_stop(motor, dots=True, after=1, skip_dots=5, enter_abort=True) motor.move_delta(gear_deadzone) wait_for_stop(motor, dots=True, after=1, skip_dots=5, enter_abort=True) sys.stdout.write("\ntesting!\n") sys.stdout.write("tested_axes\t{}\n".format(axes)) sys.stdout.write("increment\t{}\n".format(increment)) sys.stdout.write("testing_speed\t{}\n".format(speed)) sys.stdout.write("mean_angle\tratio\n") sys.stdout.flush() previous_angle = position.get_internal()[axes] previous_raw = position.get_normalised()[axes] motor.Task1_Speed = speed while True: motor.move_delta(increment, case=axes) wait_for_stop(motor, after=1, enter_abort=True) current_angle = position.get_internal()[axes] current_raw = position.get_normalised()[axes] pulses = motor.Position ratio = pulses / (current_raw - previous_raw) mean_angle = (previous_angle + current_angle) / 2 sys.stdout.write("%s\t%s\n" % (mean_angle, ratio)) sys.stdout.flush() if (current_angle > finish) != reverse: break sleep(0.2) previous_angle = current_angle previous_raw = current_raw # Run as CLI client def _main(args): _init() if args['transmission']: if args['ha']: _transmition('ha', float(args['<angle_0>']), float(args['<angle_1>']), int(args['<S>']), float(args['<I>'])) if args['de']: _transmition('de', float(args['<angle_0>']), float(args['<angle_1>']), int(args['<S>']), float(args['<I>'])) if __name__ == '__main__': args = docopt(__doc__, version=__doc__.split("\n")[0], options_first=True) if len(sys.argv) == 1 or args['help']: print __doc__.strip() exit(0) try: _main(args) exit_('OK') except EnterAbort: exit_('ENTER_ABORT') except: raise #exit_('ERROR')
from django import forms from django.conf import settings from django.contrib.auth import get_user_model, password_validation from django.contrib.auth.forms import PasswordResetForm, AuthenticationForm from django.template import loader from django.utils.translation import ugettext_lazy as _ from core import string_constants from utils.send_mail import send_mail class UserCreationForm(forms.ModelForm): """Base model from for user creation""" error_messages = { 'password_mismatch': _("The two password fields didn't match.") } password1 = forms.CharField( label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), help_text=password_validation.password_validators_help_text_html(), ) password2 = forms.CharField( label=_("Password confirmation"), widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}), strip=False, help_text=_("Enter the same password as before, for verification."), ) class Meta: model = get_user_model() fields = ('email',) field_classes = {'email': forms.EmailField} def __init__(self, *args, **kwargs): super(UserCreationForm, self).__init__(*args, **kwargs) self.label_suffix = "" if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD] \ .widget.attrs['autofocus'] = True def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) return password2 def _post_clean(self): super()._post_clean() password = self.cleaned_data.get('password2') if password and settings.ENV == string_constants.ENV_PRODUCTION: try: password_validation.validate_password(password, self.instance) except forms.ValidationError as error: self.add_error('password2', error) def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user class SignUpForm(UserCreationForm): """Sign up form which extends user creation form. Used with the registration view in order to register user""" first_name = forms.CharField(max_length=100, required=False) last_name = forms.CharField(max_length=100, required=False) class Meta: model = get_user_model() fields = ('email', 'first_name', 'last_name', 'password1', 'password2') def __init__(self, *args, **kwargs): super(SignUpForm, self).__init__(*args, **kwargs) self.label_suffix = "" class CustomPasswordResetForm(PasswordResetForm): """Custom password reset form passed to django's PasswordResetView class""" def send_mail(self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): subject = loader.render_to_string(subject_template_name, context) subject = ''.join(subject.splitlines()) body = loader.render_to_string(email_template_name, context) send_mail(subject, body, [to_email], sender=from_email) def __init__(self, *args, **kwargs): super(PasswordResetForm, self).__init__(*args, **kwargs) self.label_suffix = "" class LoginForm(AuthenticationForm): """Extended AuthenticationForm in order to remove lable sufix""" def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.label_suffix = "" class TwoFactorForm(forms.Form): token = forms.CharField(label=_('Authentication token'), max_length=12, label_suffix='')
import logging import os from logging.config import fileConfig from fastapi import Depends, FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.openapi.utils import get_openapi from square_auth.auth import Auth from model_manager.app.core.config import settings from model_manager.app.core.event_handlers import start_app_handler, stop_app_handler from model_manager.app.routers.api import api_router auth = Auth( keycloak_base_url=os.getenv("KEYCLOAK_BASE_URL", "https://square.ukp-lab.de") ) logger = logging.getLogger(__name__) def get_app() -> FastAPI: # Set logging config. try: fileConfig("logging.conf", disable_existing_loggers=False) except Exception: logger.info( "Failed to load 'logging.conf'. Continuing without configuring the server logger" ) fast_app = FastAPI( title=settings.APP_NAME, version=settings.APP_VERSION, openapi_url=settings.OPENAPI_URL, # dependencies=[Depends(auth)], # removing auth for now ) fast_app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) fast_app.include_router(api_router, prefix=settings.API_PREFIX) fast_app.add_event_handler("startup", start_app_handler()) fast_app.add_event_handler("shutdown", stop_app_handler()) return fast_app def custom_openapi(): """ change api paths as per the end-user requirements """ if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( title=settings.APP_NAME, version=settings.APP_VERSION, description="API reference for model management.", routes=app.routes, ) replaced_keys = dict() prefix = os.getenv("API_PREFIX", "models") for api in openapi_schema["paths"].keys(): api_split = list(api.split("/")) api_split.insert(2, prefix) api_mod = "/".join(api_split) replaced_keys[api] = api_mod new_openapi_paths = { replaced_keys[k]: v for k, v in openapi_schema["paths"].items() } openapi_schema["paths"] = new_openapi_paths app.openapi_schema = openapi_schema return app.openapi_schema app = get_app() app.openapi_schema = custom_openapi() if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8004)
import json from flask.ext.sqlalchemy import SQLAlchemy from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() db = SQLAlchemy() class BaseModel(object): def to_json(self): if not hasattr(self, 'TAG'): raise NotImplementedError return json.dumps({self.TAG: self.to_dict()}, indent=2) def to_dict(self): return self.get_pub_vars() def get_pub_vars(self): pub_vars = {} ignore_vars = ['TAG'] attrs = dir(self) for attr_name in attrs: attr = getattr(self, attr_name) if attr is None or attr_name in ignore_vars or \ hasattr(attr, '__call__'): continue if attr_name[0] != '_': attr_value = self._attribute_to_dict(attr_name) if attr_value is not None: pub_vars[attr_name] = attr_value return pub_vars def _attribute_to_dict(self, attr_name): ret = None accepted_types = (int, long, float, str, unicode, bool, dict, list, BaseModel) if hasattr(self, '_child_classes'): recurse_classes = self._child_classes else: recurse_classes = () attr = getattr(self, attr_name) if isinstance(attr, accepted_types): if isinstance(attr, list): dict_list = [] for item in attr: if isinstance(item, accepted_types): if isinstance(item, recurse_classes): dict_list.append(item.to_dict()) else: dict_list.append(item) ret = dict_list elif isinstance(attr, recurse_classes): ret = attr.to_dict() elif not isinstance(attr, BaseModel): ret = attr return ret
import unittest from flask import Flask from flask_testing import TestCase from app import app, db_session_users from schemas import base_users from migrations.setup_api import add_plans from models import * from settings import SECRET_JWT import jwt import datetime as dt import json import os import time from random import shuffle import factories import schemas.jurasticsearch as jsearch import migrations.setup_api as setup_db from helpers.agency_helper import DefaultAgencies import logging this_folder = os.path.dirname(os.path.realpath(__file__)) # hack to get before all test suite behavior, surely a better way than this global SETUP_RUN SETUP_RUN = False # turn down the log level for some noisy libraries when unit tests fail # as far as i can tell it isn't super useful, and removing these lines would bring it back, # if we ever found otherwise logging.getLogger('elasticsearch').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.ERROR) logging.getLogger('botocore').setLevel(logging.ERROR) logging.getLogger('factory').setLevel(logging.ERROR) class AppTest(TestCase): def create_app(self): app.config['TESTING'] = True return app # helper method to create a user and return the login token def create_user_and_return_id_and_token(self, roles=[], suspended=None, is_internal_user=None, enabled=None): # create a user to login with user = factories.UserFactory(password='foobar') user.roles = roles if suspended is not None: user.suspended = suspended if is_internal_user is not None: user.is_internal_user = is_internal_user if enabled is not None: user.enabled = enabled db_session_users.add(user) db_session_users.flush() db_session_users.refresh(user) # create a token to use for authorization for all api calls for the user created above seconds_until_expiration = 60 * 60 * 24 * 14 expiration_datetime = dt.datetime.utcnow() + dt.timedelta(seconds=seconds_until_expiration) return user, jwt.encode({'user_id': user.id, 'exp': expiration_datetime}, SECRET_JWT) @classmethod def setUpClass(cls): # break off here if we've run the setup once for before all suite behavior, since this gets called once # per subclass of AppTest. # if we ever truly want class-agnostic per-class steps, we could name this method something else and call # it from here if SETUP_RUN: return cls.maxDiff = None cls.tearDownClassForRealz() base_users.BaseUsers.metadata.create_all(base_users.engine_users) jsearch.setup_test_index() time.sleep(0.2) ## avoid race conditions fixtures = json.loads(open(this_folder + '/fixtures/fixtures_201712.json').read()) all_agencies = json.loads(open(this_folder + '/fixtures/agencies_20160721.json').read())['agencies'] default_agency_lookup = set(DefaultAgencies) cls.agencies = [a for a in all_agencies if a['id'] in default_agency_lookup] cls.all_documents = fixtures['documents'] cls.acts = fixtures['acts'] cls.regulations = fixtures['named_regulations'] cls.concept_mentions = fixtures['concepts'] cls.jurisdictions = fixtures['jurisdictions'] cls.banks = fixtures['banks'] cls.document_citations = fixtures['document_citations'] cls.topics = fixtures['topics'] cls.all_topics = fixtures['all_topics'] cls.news_sources = fixtures['news_sources'] topic_judgments = [] topic = {"id": 1, "name": "General Provisions"} for i in range(0, 5): topic_judgments.append({ 'topic_id': topic['id'], 'topic_name': topic['name'], 'doc_id': cls.all_documents[0]['id'], 'status': 'queued' }) db_session_users.add_all([base_users.TopicJudgment(x) for x in topic_judgments]) db_session_users.commit() # once everything is shoved into the db that we need to read, index it once! ## TODO: not yet covered # indexer.index_concepts() # indexer.index_dockets() for agency in cls.agencies: jsearch.index_jsearch_dict(agency, 'agencies') for act in cls.acts: jsearch.index_jsearch_dict(act, 'acts') for reg in cls.regulations: jsearch.index_jsearch_dict(reg, 'named_regulations') for doc in cls.all_documents: jsearch.index_jsearch_dict(doc, 'documents') for cm in cls.concept_mentions: jsearch.index_jsearch_dict(cm, 'concepts') for j in cls.jurisdictions: jsearch.index_jsearch_dict(j, 'jurisdictions') for b in cls.banks: jsearch.index_jsearch_dict(b, 'banks') for dc in cls.document_citations: jsearch.index_jsearch_dict(dc, 'document_citations') for t in cls.topics: jsearch.index_jsearch_dict(t, 'topics') for t in cls.all_topics: jsearch.index_jsearch_dict(t, 'all_topics') for n in cls.news_sources: jsearch.index_jsearch_dict(n, 'news_sources') time.sleep(1.0) ## avoid race conditions # make sure we note that we've run this method once global SETUP_RUN SETUP_RUN = True def setUp(self): self.user, self.token = self.create_user_and_return_id_and_token() # create a second user/token so the user updates tests are independent of all else self.new_user, self.new_user_token = self.create_user_and_return_id_and_token() self.admin_user, self.admin_user_token = self.create_user_and_return_id_and_token(roles=['admin']) self.qa_user, self.qa_user_token = self.create_user_and_return_id_and_token(roles=['qa']) self.suspended_user, self.suspended_user_token = self.create_user_and_return_id_and_token(suspended=True) self.internal_user, self.internal_user_token = self.create_user_and_return_id_and_token(is_internal_user=True) self.contributor_user, self.contributor_user_token = self.create_user_and_return_id_and_token( roles=['contributor']) self.unenabled_user, self.unenabled_user_token = self.create_user_and_return_id_and_token(enabled=False) api_key = ApiKey({'enabled': True}) api_key.gen_token() db_session_users.add(api_key) db_session_users.commit() db_session_users.refresh(api_key) self.api_key = api_key # for the sake of testing populate plans table add_plans() # n.b. this should be removed when mainstream news category is launched os.environ["MAINSTREAM_NEWS_ENABLED"] = "true" # hackery to ensure register tests still work, multiple tests depended on the same email address, which # in a before/all world must be made unique for each independent test def tearDown(self): foobar_user = db_session_users.query(User).filter_by(email="foobar@example.com").first() if foobar_user: db_session_users.query(UserAgency).filter_by(user_id=foobar_user.id).delete() db_session_users.commit() db_session_users.delete(foobar_user) db_session_users.commit() @classmethod def tearDownClassForRealz(cls): db_session_users.remove() base_users.BaseUsers.metadata.drop_all(base_users.engine_users) # remove the testing index if it exists on elasticsearch # FIXME: this is a total hack since setUp and tearDown run on each test, but we don't want indexing # to run on every single test try: jsearch.client.indices.delete(index='testing') except: pass
# Generated by Django 2.1 on 2018-09-17 14:32 from django.db import migrations, models import django.utils.timezone import model_utils.fields class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20180904_1503'), ] operations = [ migrations.CreateModel( name='PromoCode', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('start', models.DateTimeField(blank=True, null=True, verbose_name='start')), ('end', models.DateTimeField(blank=True, null=True, verbose_name='end')), ('percentage', models.FloatField(default=0.0)), ('code', models.CharField(max_length=10)), ], options={ 'abstract': False, }, ), ]
import numpy as np import tensorflow as tf import re import time # importing the datasets line=open('movie_lines.txt', encoding='utf-8', errors= 'ignore').read().split('\n') conversation = open('movie_conversations.txt', encoding='utf-8', errors= 'ignore').read().split('\n') #mapping of dataset to Dictionary line nad its id id2line = {} for lines in line: _lines = lines.split(' +++$+++ ') if len(_lines)==5: id2line[_lines[0]] = _lines[4] #Creating a list of all the conversations conversations_ids=[] for conversations in conversation[:-1]: _conversations=conversations.split(' +++$+++ ')[-1][1:-1].replace("'", "").replace(" ", "") conversations_ids.append(_conversations.split(',')) #getting separately the question and answers questions = [] answers = [] for conversation in conversations_ids: for i in range(len(conversation) -1): questions.append(id2line[conversation[i]]) answers.append(id2line[conversation[i+1]]) # Doing a first cleaning of the texts: def clean_text(text): text=text.lower() text=re.sub(r"i'm", "i am", text) text=re.sub(r"he's", "he is", text) text=re.sub(r"she's", "she is", text) text=re.sub(r"that's", "that is", text) text=re.sub(r"what's", "what is", text) text=re.sub(r"where's", "where is", text) text=re.sub(r"\'ll", " will", text) text=re.sub(r"\'ve", " have", text) text=re.sub(r"\'re", " are", text) text=re.sub(r"\'d", " would", text) text=re.sub(r"won't", "will not", text) text=re.sub(r"can't", "can not", text) text=re.sub(r"[-()\"#/;:{}+|.?,]", "", text) return text # Cleaning the quistions clean_questions=[] for question in questions: clean_questions.append(clean_text(question)) # Cleaning the answers clean_answers=[] for answer in answers: clean_answers.append(clean_text(answer)) # Creating a dictinary that maps each word to its number of accurences word2count={} for question in clean_questions: for word in question.split(): if word not in word2count: word2count[word]=1 else: word2count[word] += 1 for answer in clean_answers: for word in answer.split(): if word not in word2count: word2count[word]=1 else: word2count[word] += 1 #Creating tow dictionaries that map the questions words and the answers worsds to a unique integer threshold=20 questionswords2int={} word_number=0 for word, count in word2count.items(): if count >= threshold: questionswords2int[word]=word_number word_number +=1 answerswords2int={} word_number=0 for word, count in word2count.items(): if count >= threshold: answerswords2int[word]=word_number word_number +=1 # Adding the last tokens to these two dictinaries tokens=['<PAD>', '<EOS>', '<OUT>', '<SOS>', ] for token in tokens: questionswords2int[token]=len(questionswords2int) + 1 for token in tokens: answerswords2int[token]=len(answerswords2int) + 1 # Creating the inverse dictionary of the answersword2int dictionary answersints2word = {w_i: w for w, w_i in answerswords2int.items()} # Adding the End of string token to the end of every answer for i in range(len(clean_answers)): clean_answers[i] +=' <EOS>' # Translating all the questins and the answeres int integers # and Replacing all the words the were filtered out by <OUT> questions_into_int=[] for question in clean_questions: ints=[] for word in question.split(): if word not in questionswords2int: ints.append(questionswords2int['<OUT>']) else: ints.append(questionswords2int[word]) questions_into_int.append(ints) answers_into_int=[] for answer in clean_answers: ints=[] for word in answer.split(): if word not in answerswords2int: ints.append(answerswords2int['<OUT>']) else: ints.append(answerswords2int[word]) answers_into_int.append(ints) # Sorting quistions and answers by the length of questions sorted_clean_questions = [] sorted_clean_answers = [] for length in range(1, 25 +1): for i in enumerate(questions_into_int): if len(i[1])==length: sorted_clean_questions.append(questions_into_int[i[0]]) sorted_clean_answers.append(questions_into_int[i[0]]) # Creating placehoders for the inputs and the targets def model_inputs(): inputs = tf.placeholder(tf.int32, [None, None], name = 'intput') targets = tf.placeholder(tf.int32, [None, None], name = 'target') lr = tf.placeholder(tf.float32, name = 'learning_rate') keep_prob = tf.placeholder(tf.float32, name = 'keep_prob') return inputs, targets, lr, keep_prob # Preprocessing the targets (Pattern of targets) for decoder def preprocess_targets(targets, word2int, batch_size): left_side = tf.fill([batch_size, 1], word2int['<SOS>']) right_side = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1,1]) preprocessed_targets = tf.concat([left_side, right_side], 1) return preprocessed_targets # Creating the Encoder RNN layer def encoder_rnn(rnn_inputs, rnn_size, num_layers, keep_prob, sequence_length): lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob) encoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout]*num_layers) encoder_output, encoder_state = tf.nn.bidirectional_rnn(cell_fw = encoder_cell, cell_bw = encoder_cell, sequence_length = sequence_length, inputs = rnn_inputs, dtype = tf.float32) return encoder_state # Decoding the training set def decode_training_set(encoder_state, decoder_cell, decoder_embedded_input, sequence_length, decoding_scope, output_function, keep_prob, batch_size): attention_states = tf.zeros([batch_size, 1, decoder_cell.output_size]) attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_states, attention_option = 'bahdanau', num_units = decoder_cell.output_size) training_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_train(encoder_state[0], attention_keys, attention_values, attention_score_function, attention_construct_function, name = "attn_dec_train") decoder_output, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(decoder_cell, training_decoder_function, decoder_embedded_input, sequence_length, scope = decoding_scope) decoder_output_dropout = tf.nn.dropout(decoder_output, keep_prob) return output_function(decoder_output_dropout) #Decoding the test/validation set def decode_test_set(encoder_state, decoder_cell, decoder_embeddeding_matrix, sos_id, eos_id, maximum_length, num_words, sequence_length, decoding_scope, output_function, keep_prob, batch_size): attention_states = tf.zeros([batch_size, 1, decoder_cell.output_size]) attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_states, attention_option = 'bahdanau', num_units = decoder_cell.output_size) test_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_inference(output_function, encoder_state[0], attention_keys, attention_values, attention_score_function, attention_construct_function, decoder_embeddeding_matrix, sos_id, eos_id, maximum_length, num_words, name = "attn_dec_inf") test_predictions, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(decoder_cell, test_decoder_function, scope = decoding_scope) return test_predictions # Creating the Decoding RNN def decoder_rnn(decoder_embedded_input, decoder_embeddings_matrix, encoder_state, num_words, sequence_length, rnn_size, num_layers, word2int, keep_prob, batch_size): with tf.variable_scope("decoding") as decoding_scope: lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob) decoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout] * num_layers) weights = tf.truncated_normal_initializer(stddev = 0.1) biases = tf.zeros_initializer() output_function = lambda x: tf.contrib.layers.fully_connected(x, num_words, None, scope = decoding_scope, weights_intitializer = weights, biases_initializer = biases) training_predictions = decode_training_set(encoder_state, decoder_cell, decoder_embedded_input, sequence_length, decoding_scope, output_function, keep_prob, batch_size) decoding_scope.reuse_ariables() test_predictions = decode_test_set(encoder_state, decoder_cell, decoder_embeddeding_matrix, word2int['<SOS>'], word2int['<EOS>'], sequence_length - 1, num_words, decoding_scope, output_function, keep_prob, batch_size) return training_predictions, test_predictions # Building sec2sec model def seq2seq(inputs, targets, keep_prob, batch_size, sequence_length, answers_num_words, questions_num_words, encoder_embedding_size, decoder_embedding_size, rnn_size, num_layers, questionswords2int): encoder_embedded_input = tf.contrib.layers.embed_sequence(inputs, answers_num_words + 1, initializer = tf.random_uniform_initializer(0, 1)) encoder_state = encoder_rnn(encoder_embedded_input, rnn_size, num_layers, keep_prob, sequence_length) preprocessed_targets = preprocess_targets(targets, questionswords2int, batch_size) decoder_embeddings_matrix = tf.Variable(tf.random_uniform([questions_num_words + 1, decoder_embedding_size], 0, 1)) decoder_embedded_input = tf.nn.embedding_lookup(decoder_embeddings_matrix, preprocessed_targets) training_predictions, test_predictions = decoder_rnn(decoder_embedded_input, decoder_embeddings_matrix, encoder_state, questions_num_words, sequence_length, rnn_size, num_layers, questionswords2int, keep_prob, batch_size) # Training Part # Setting the Hyperparaamters epochs = 100 batch_size = 64 rnn_layers = 3 encoding_embedding_size = 512 decoding_embedding_size = 512 learning_rate_decay = 0.9 min_learning_rate = 0.0001 keep_probability = 0.5 # defining a session tf.reset_default_graph() session = tf.InteractiveSession() # Loading the model inputs inputs, targets, lr, keep_prob = model_inputs() # Setting the sequence lenhth sequence_length = tf.placeholder_with_default(25, None, name = 'sequence_length') # Getting the shape of the inputs tensor input_shape = tf.shape(inputs)
# %% # import libraries import os import csv from pathlib import Path ROOT_PATH = Path(os.path.dirname(__file__)).parent # %% # read mimic data procedures_data_path = f'{ROOT_PATH}/data/mimic/PROCEDURES_ICD.csv' admission_data_path = f'{ROOT_PATH}/data/mimic/ADMISSIONS.csv' procedures_columns = [] procedures = [] with open(procedures_data_path, newline='') as f: procedures = list(csv.reader(f)) procedures_columns = procedures.pop(0) #remove first row (column names) admission_columns = [] admissions = [] with open(admission_data_path, newline='') as f: admissions = list(csv.reader(f)) admission_columns = admissions.pop(0) # %% # extract and write data # create procedure dictionary with adm_id as key, list of procedure_icd codes as values procedures_dict = {} for p in procedures: adm_id = p[2] # get adm_id from prescription table proc_icd = p[4] # get drug from prescription table if adm_id in procedures_dict: procedures_dict[adm_id].append(proc_icd) # if adm_id is already in dict append new drug else: procedures_dict[adm_id] = [proc_icd] # add new adm_id in dictionary with list of drugs #assert len(procedures_dict.keys()) == len(admissions) # %% patient_to_procedure = [] for adm in admissions: patient_id = adm[1] adm_id = adm[2] adm_time = adm[3].split()[0] # get only date without time if adm_id not in procedures_dict: continue procedures = procedures_dict[adm_id] # get drugs for this adm_id (we already get drugs for adm_id in drugs_dict) for procedure in procedures: quadruple = [patient_id, 'patient_to_procedure', procedure, adm_time] patient_to_procedure.append(quadruple) # remove duplicates unique_patient_to_procedure = list(set(['|'.join(pd) for pd in patient_to_procedure])) patient_to_procedure = [upd.split('|') for upd in unique_patient_to_procedure] # %% # write data write_path = f'{ROOT_PATH}/data/kg/patient_to_procedure.tsv' with open(write_path, 'w') as outfile: writer = csv.writer(outfile, delimiter='\t') writer.writerows(patient_to_procedure)
import os from google.cloud import pubsub_v1 from jrdb.client import JRDBClient from jrdb import urlcodec def main(data, context): auth = (os.environ['JRDB_ID'], os.environ['JRDB_PW']) jrdbclient = JRDBClient(auth) urls = jrdbclient.fetch_latest_urls() print(f"Extracted urls: {urls}") compressed_urls = urlcodec.encode(urls) print(f"Compressed urls: {compressed_urls}") publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path('otomarukanta-a', 'jrdb-urls') publisher.publish(topic_path, compressed_urls)
#!/usr/bin/env python import MySQLdb # INFORMATION TO YOUR DATABASE MUST BE ENTERED HERE mydb = MySQLdb.connect(host='localhost', user='root', passwd='lobster', db='mydb') cursor = mydb.cursor() try: cursor.execute("DROP TABLE IF EXISTS m;") except MySQLdb.OperationalError, MySQLdb.Warning: print "Table never existed in the first place" try: # Don't need PK cuz we need to count our dupes here cursor.execute("CREATE TABLE m (addr VARCHAR(255) NOT NULL);") print "Success" except MySQLdb.OperationalError: print "Error creating 'm' table" # now we create the m table so I can use it def getrows(filename): with open(filename) as f: for line in f: yield line for item in getrows('dummy.txt'): item = item.rstrip() query = 'INSERT INTO m VALUES ("%s");' % item cursor.execute(query) mydb.commit() cursor.close() print "Done, successfully created 'm' table"
from ftw.bumblebee.mimetypes import is_mimetype_supported from opengever.bumblebee import is_bumblebee_feature_enabled from opengever.document.behaviors import IBaseDocument from opengever.document.document import IDocumentSchema from opengever.document.interfaces import ICheckinCheckoutManager from opengever.document.interfaces import IFileActions from opengever.officeconnector.helpers import is_officeconnector_attach_feature_enabled # noqa from opengever.officeconnector.helpers import is_officeconnector_checkout_feature_enabled # noqa from opengever.trash.trash import ITrashable from opengever.trash.trash import TrashError from plone import api from zope.component import adapter from zope.component import getMultiAdapter from zope.interface import implementer from zope.interface import Interface @implementer(IFileActions) @adapter(IBaseDocument, Interface) class BaseDocumentFileActions(object): """Define availability for actions and action sets on IBaseDocument. Methods should return availability for one single action. """ def __init__(self, context, request): self.context = context self.request = request def is_versioned(self): return False def is_edit_metadata_action_available(self): return api.user.has_permission( 'Modify portal content', obj=self.context) def is_any_checkout_or_edit_available(self): return False def is_oc_direct_checkout_action_available(self): return False def is_oc_direct_edit_action_available(self): return False def is_oc_zem_checkout_action_available(self): return False def is_oc_zem_edit_action_available(self): return False def is_oc_unsupported_file_checkout_action_available(self): return False def is_checkin_without_comment_available(self): return False def is_checkin_with_comment_available(self): return False def is_cancel_checkout_action_available(self): return False def is_download_copy_action_available(self): return self.context.has_file() def is_attach_to_email_action_available(self): return ( is_officeconnector_attach_feature_enabled() and self.context.has_file()) def is_oneoffixx_retry_action_available(self): return False def is_open_as_pdf_action_available(self): if not is_bumblebee_feature_enabled(): return False if not self.context.has_file(): return False mime_type_item = self.context.get_mimetype() if not mime_type_item: return False return is_mimetype_supported(mime_type_item[0]) def is_revert_to_version_action_available(self): return False def is_trash_document_available(self): trasher = ITrashable(self.context) return trasher.verify_may_trash(raise_on_violations=False) def is_untrash_document_available(self): trasher = ITrashable(self.context) return trasher.verify_may_untrash(raise_on_violations=False) @implementer(IFileActions) @adapter(IDocumentSchema, Interface) class DocumentFileActions(BaseDocumentFileActions): def is_versioned(self): version_id = self.request.get('version_id', '') if isinstance(version_id, basestring): return version_id.isdigit() try: int(version_id) except ValueError: return False else: return True def is_any_checkout_or_edit_available(self): return ( not self.is_versioned() and self.context.has_file() and self.context.is_checkout_and_edit_available()) def is_edit_metadata_action_available(self): manager = getMultiAdapter( (self.context, self.request), ICheckinCheckoutManager) return ( super(DocumentFileActions, self).is_edit_metadata_action_available() and not self.is_versioned() and not manager.is_locked() and not manager.is_checked_out_by_another_user()) def is_oc_direct_checkout_action_available(self): return (self.is_any_checkout_or_edit_available() and self.context.is_office_connector_editable() and not self.context.is_checked_out() and is_officeconnector_checkout_feature_enabled()) def is_oc_direct_edit_action_available(self): return (self.is_any_checkout_or_edit_available() and self.context.is_office_connector_editable() and self.context.is_checked_out() and is_officeconnector_checkout_feature_enabled()) def is_oc_zem_checkout_action_available(self): return (self.is_any_checkout_or_edit_available() and self.context.is_office_connector_editable() and not self.context.is_checked_out() and not is_officeconnector_checkout_feature_enabled()) def is_oc_zem_edit_action_available(self): return (self.is_any_checkout_or_edit_available() and self.context.is_office_connector_editable() and self.context.is_checked_out() and not is_officeconnector_checkout_feature_enabled()) def is_oc_unsupported_file_checkout_action_available(self): return (self.is_any_checkout_or_edit_available() and not self.context.is_office_connector_editable() and not self.context.is_checked_out()) def is_checkin_without_comment_available(self): return (not self.is_versioned() and self.is_checkin_with_comment_available() and not self.context.is_locked()) def is_checkin_with_comment_available(self): return (not self.is_versioned() and self.context.has_file() and self.context.is_checkin_allowed()) def is_cancel_checkout_action_available(self): if not self.context.has_file(): return False manager = getMultiAdapter( (self.context, self.request), ICheckinCheckoutManager) return manager.is_cancel_allowed() def is_download_copy_action_available(self): """Disable downloading copies when the document is checked out by another user. """ manager = getMultiAdapter( (self.context, self.request), ICheckinCheckoutManager) return ( super(DocumentFileActions, self).is_download_copy_action_available() and not manager.is_checked_out_by_another_user()) def is_attach_to_email_action_available(self): manager = getMultiAdapter( (self.context, self.request), ICheckinCheckoutManager) return ( super(DocumentFileActions, self).is_attach_to_email_action_available() and not manager.is_checked_out_by_another_user()) def is_oneoffixx_retry_action_available(self): return self.context.is_oneoffixx_creatable() def is_revert_to_version_action_available(self): manager = getMultiAdapter( (self.context, self.request), ICheckinCheckoutManager) return (self.is_versioned() and self.context.has_file() and not self.context.is_checked_out() and manager.is_checkout_allowed())
from unittest import TestCase from chat_transformer.commands import Command, InvalidActionError class CommandTests(TestCase): def test_str_representation(self): """ String representation of an OSCCommand should be its `name` """ command = Command( name='My Command', ) self.assertEqual(str(command), 'My Command') def test_invalid_actions_raises_invalid_action_error(self): """ Passing an action not in `allowed_actions` to `run_action` should raise an `InvalidActionError` """ command = Command( name='My Command', allowed_actions=['set', 'get'], ) with self.assertRaises(InvalidActionError) as error: command.run_action('increment') self.assertEqual( str(error.exception), '"increment" is not a valid action for command "MY COMMAND"', ) def test_increment_inside_range(self): """ Running the "increment" action should add the command's `delta` to the current value. Repeated runs should continue to increase the value """ command = Command( name='My Command', initial=0.5, delta=0.07, max=1.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('increment') self.assertAlmostEqual(response.value, 0.57) self.assertAlmostEqual(command.current, 0.57) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.57 (Max 1.0)' ) response = command.run_action('increment') self.assertAlmostEqual(response.value, 0.64) self.assertAlmostEqual(command.current, 0.64) response = command.run_action('increment') self.assertAlmostEqual(response.value, 0.71) self.assertAlmostEqual(command.current, 0.71) def test_increment_overlapping_range(self): """ if the delta + current is greater than the max, the `increment` value should be clamped down to max, but an osc_value should still be sent """ command = Command( name='My Command', initial=0.97, delta=0.07, max=1.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('increment') self.assertAlmostEqual(response.value, 1.0) self.assertAlmostEqual(command.current, 1.0) self.assertEqual( response.irc_message, 'MY COMMAND is at 1.0 (Max 1.0)' ) def test_increment_already_max(self): """ If the `current` is already at `max`, no OSC values should be sent from "increment", but the IRC message should still be created """ command = Command( name='My Command', initial=1.0, delta=0.07, max=1.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('increment') self.assertAlmostEqual(command.current, 1.0) self.assertIsNone(response.value) self.assertEqual( response.irc_message, 'MY COMMAND is at 1.0 (Max 1.0)' ) def test_decrement_inside_range(self): """ Running the "decrement" action should subtract the command's `delta` to the current value. Repeated runs should continue to decrease the value """ command = Command( name='My Command', initial=0.5, delta=0.07, min=0.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('decrement') self.assertAlmostEqual(response.value, 0.43) self.assertAlmostEqual(command.current, 0.43) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.43 (Min 0.0)' ) response = command.run_action('decrement') self.assertAlmostEqual(response.value, 0.36) self.assertAlmostEqual(command.current, 0.36) response = command.run_action('decrement') self.assertAlmostEqual(response.value, 0.29) self.assertAlmostEqual(command.current, 0.29) def test_decrement_overlapping_range(self): """ if the delta + current is less than the min, the `decrement` value should be clamped up to min, but an osc_value should still be sent """ command = Command( name='My Command', initial=0.03, delta=0.07, min=0.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('decrement') self.assertAlmostEqual(response.value, 0.0) self.assertAlmostEqual(command.current, 0.0) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.0 (Min 0.0)' ) def test_decrement_already_min(self): """ If the `current` is already at `min`, no OSC values should be sent from "decrement", but the IRC message should still be created """ command = Command( name='My Command', initial=0.0, delta=0.07, min=0.0, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('decrement') self.assertAlmostEqual(command.current, 0.0) self.assertIsNone(response.value) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.0 (Min 0.0)' ) def test_set_in_range(self): """ If the passed value is within the range, the `current` value should be updated and the message should be generated based on the direction that the value moved """ command = Command( name='My Command', initial=0.5, min=0.0, max=1.0, delta=0.07, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('set', 0.65) self.assertAlmostEqual(response.value, 0.65) self.assertAlmostEqual(command.current, 0.65) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.65 (Max 1.0)' ) response = command.run_action('set', 0.43) self.assertAlmostEqual(response.value, 0.43) self.assertAlmostEqual(command.current, 0.43) self.assertEqual( response.irc_message, 'MY COMMAND is at 0.43 (Min 0.0)' ) def test_set_outside_range(self): """ If the set value is outside the min or max, the out_of_bounds message should be sent to IRC, and no osc_value should be sent """ command = Command( name='My Command', initial=0.5, min=0.1, max=1.0, delta=0.07, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('set', 1.1) self.assertAlmostEqual(command.current, 0.5) self.assertIsNone(response.value) self.assertEqual( response.irc_message, '1.1 is out of bounds for MY COMMAND (Min 0.1, Max 1.0)' ) response = command.run_action('set', 0.05) self.assertAlmostEqual(command.current, 0.5) self.assertIsNone(response.value) self.assertEqual( response.irc_message, '0.05 is out of bounds for MY COMMAND (Min 0.1, Max 1.0)' ) def test_invalid_value(self): """ If a non-floatable is passed to a `run_action`, it should send a `not valid value` error """ command = Command( name='My Command', initial=0.5, min=0.1, max=1.0, delta=0.07, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('set', 'bar') self.assertEqual( response.irc_message, '"MY COMMAND set" requires a number value between 0.1 and 1.0', ) self.assertIsNone(response.value) def test_increment_extra_value(self): """ Passing an extra value with increment into `run_action` should have no effect """ command = Command( name='My Command', initial=0.5, min=0.1, max=1.0, delta=0.07, allowed_actions=['get', 'set', 'increment', 'decrement'], ) response = command.run_action('increment', '.75') self.assertAlmostEqual(response.value, 0.57) self.assertAlmostEqual(command.current, 0.57) def test_echo(self): """ Calling `run_action` with no action should call the associated "echo" value """ command = Command( name='My Command', echo="This is a description of my command" ) response = command.run_action() self.assertEqual( response.irc_message, "This is a description of my command", ) self.assertIsNone(response.value)
# Converts XML back to MKV # Licence = MIT, 2012, Vitaly "_Vi" Shukela # the whole parser has been retrieved from: https://github.com/vi/mkvparse # xml > mkv: cat filename.xml | ./xml2mkv > filename.mkv # get permission on ubuntu: chmod +x scriptname.extension import sys from xml.sax import make_parser, handler from struct import pack import binascii if sys.version < '3': reload(sys) sys.setdefaultencoding('utf-8') maybe_encode=lambda x:x range = xrange maybe_encode_utf8=lambda x:x else: def maybe_encode(x): if type(x) == str: return x.encode("ascii") else: return x chr=lambda x:bytes((x,)) maybe_encode_utf8=lambda x:x.encode("UTF-8") # More information can be obtained from https://matroska.org/technical/specs/notes.html#Table_Columns # or more detailed https://github.com/Matroska-Org/ebml-specification/blob/master/specification.markdown class EBMLTypes: VOID = 0 # used to overwrite damaged data or reserve space within "Master" for later use MASTER = 1 # contains 0, 1, or other EBML elements UNSIGNED = 2 # stores positive or 0 integer. Since it is limited to 8 octets then the length is # from 0 to 18,446,744,073,709,551,615 SIGNED = 3 TEXTA = 4 TEXTU = 5 BINARY = 6 # declares a length in octets from zero to VINTMAX FLOAT = 7 # stores a floating-point number as defined in [@!IEEE.754.1985] DATE = 8 # stores stores an integer in the same format as SIGNED that expresses a point in time # referenced in nanoseconds from the precise beginning of the third millennium. Provides # expression of time from 1708-09-11T00:12:44.854775808 UTC to 2293-04-11T11:47:16.854775807 UTC PROCEED = 10 table_element_name = { "EBML": (b"\x1A\x45\xDF\xA3", EBMLTypes.MASTER), "EBMLVersion": (b"\x42\x86", EBMLTypes.UNSIGNED), "EBMLReadVersion": (b"\x42\xF7", EBMLTypes.UNSIGNED), "EBMLMaxIDLength": (b"\x42\xF2", EBMLTypes.UNSIGNED), "EBMLMaxSizeLength": (b"\x42\xF3", EBMLTypes.UNSIGNED), "DocType": (b"\x42\x82", EBMLTypes.TEXTA), "DocTypeVersion": (b"\x42\x87", EBMLTypes.UNSIGNED), "DocTypeReadVersion": (b"\x42\x85", EBMLTypes.UNSIGNED), "Void": (b"\xEC", EBMLTypes.BINARY), "CRC-32": (b"\xBF", EBMLTypes.BINARY), "SignatureSlot": (b"\x1B\x53\x86\x67", EBMLTypes.MASTER), "SignatureAlgo": (b"\x7E\x8A", EBMLTypes.UNSIGNED), "SignatureHash": (b"\x7E\x9A", EBMLTypes.UNSIGNED), "SignaturePublicKey": (b"\x7E\xA5", EBMLTypes.BINARY), "Signature": (b"\x7E\xB5", EBMLTypes.BINARY), "SignatureElements": (b"\x7E\x5B", EBMLTypes.MASTER), "SignatureElementList": (b"\x7E\x7B", EBMLTypes.MASTER), "SignedElement": (b"\x65\x32", EBMLTypes.BINARY), "Segment": (b"\x18\x53\x80\x67", EBMLTypes.PROCEED), "SeekHead": (b"\x11\x4D\x9B\x74", EBMLTypes.MASTER), "Seek": (b"\x4D\xBB", EBMLTypes.MASTER), "SeekID": (b"\x53\xAB", EBMLTypes.BINARY), "SeekPosition": (b"\x53\xAC", EBMLTypes.UNSIGNED), "Info": (b"\x15\x49\xA9\x66", EBMLTypes.MASTER), "SegmentUID": (b"\x73\xA4", EBMLTypes.BINARY), "SegmentFilename": (b"\x73\x84", EBMLTypes.TEXTU), "PrevUID": (b"\x3C\xB9\x23", EBMLTypes.BINARY), "PrevFilename": (b"\x3C\x83\xAB", EBMLTypes.TEXTU), "NextUID": (b"\x3E\xB9\x23", EBMLTypes.BINARY), "NextFilename": (b"\x3E\x83\xBB", EBMLTypes.TEXTU), "SegmentFamily": (b"\x44\x44", EBMLTypes.BINARY), "ChapterTranslate": (b"\x69\x24", EBMLTypes.MASTER), "ChapterTranslateEditionUID": (b"\x69\xFC", EBMLTypes.UNSIGNED), "ChapterTranslateCodec": (b"\x69\xBF", EBMLTypes.UNSIGNED), "ChapterTranslateID": (b"\x69\xA5", EBMLTypes.BINARY), "TimecodeScale": (b"\x2A\xD7\xB1", EBMLTypes.UNSIGNED), "Duration": (b"\x44\x89", EBMLTypes.FLOAT), "DateUTC": (b"\x44\x61", EBMLTypes.DATE), "Title": (b"\x7B\xA9", EBMLTypes.TEXTU), "MuxingApp": (b"\x4D\x80", EBMLTypes.TEXTU), "WritingApp": (b"\x57\x41", EBMLTypes.TEXTU), "Cluster": (b"\x1F\x43\xB6\x75", EBMLTypes.PROCEED), "Timecode": (b"\xE7", EBMLTypes.UNSIGNED), "SilentTracks": (b"\x58\x54", EBMLTypes.MASTER), "SilentTrackNumber": (b"\x58\xD7", EBMLTypes.UNSIGNED), "Position": (b"\xA7", EBMLTypes.UNSIGNED), "PrevSize": (b"\xAB", EBMLTypes.UNSIGNED), "SimpleBlock": (b"\xA3", EBMLTypes.BINARY), "BlockGroup": (b"\xA0", EBMLTypes.MASTER), "Block": (b"\xA1", EBMLTypes.BINARY), "BlockVirtual": (b"\xA2", EBMLTypes.BINARY), "BlockAdditions": (b"\x75\xA1", EBMLTypes.MASTER), "BlockMore": (b"\xA6", EBMLTypes.MASTER), "BlockAddID": (b"\xEE", EBMLTypes.UNSIGNED), "BlockAdditional": (b"\xA5", EBMLTypes.BINARY), "BlockDuration": (b"\x9B", EBMLTypes.UNSIGNED), "ReferencePriority": (b"\xFA", EBMLTypes.UNSIGNED), "ReferenceBlock": (b"\xFB", EBMLTypes.SIGNED), "ReferenceVirtual": (b"\xFD", EBMLTypes.SIGNED), "CodecState": (b"\xA4", EBMLTypes.BINARY), "Slices": (b"\x8E", EBMLTypes.MASTER), "TimeSlice": (b"\xE8", EBMLTypes.MASTER), "LaceNumber": (b"\xCC", EBMLTypes.UNSIGNED), "FrameNumber": (b"\xCD", EBMLTypes.UNSIGNED), "BlockAdditionID": (b"\xCB", EBMLTypes.UNSIGNED), "Delay": (b"\xCE", EBMLTypes.UNSIGNED), "SliceDuration": (b"\xCF", EBMLTypes.UNSIGNED), "ReferenceFrame": (b"\xC8", EBMLTypes.MASTER), "ReferenceOffset": (b"\xC9", EBMLTypes.UNSIGNED), "ReferenceTimeCode": (b"\xCA", EBMLTypes.UNSIGNED), "EncryptedBlock": (b"\xAF", EBMLTypes.BINARY), "Tracks": (b"\x16\x54\xAE\x6B", EBMLTypes.MASTER), "TrackEntry": (b"\xAE", EBMLTypes.MASTER), "TrackNumber": (b"\xD7", EBMLTypes.UNSIGNED), "TrackUID": (b"\x73\xC5", EBMLTypes.UNSIGNED), "TrackType": (b"\x83", EBMLTypes.UNSIGNED), "FlagEnabled": (b"\xB9", EBMLTypes.UNSIGNED), "FlagDefault": (b"\x88", EBMLTypes.UNSIGNED), "FlagForced": (b"\x55\xAA", EBMLTypes.UNSIGNED), "FlagLacing": (b"\x9C", EBMLTypes.UNSIGNED), "MinCache": (b"\x6D\xE7", EBMLTypes.UNSIGNED), "MaxCache": (b"\x6D\xF8", EBMLTypes.UNSIGNED), "DefaultDuration": (b"\x23\xE3\x83", EBMLTypes.UNSIGNED), "TrackTimecodeScale": (b"\x23\x31\x4F", EBMLTypes.FLOAT), "TrackOffset": (b"\x53\x7F", EBMLTypes.SIGNED), "MaxBlockAdditionID": (b"\x55\xEE", EBMLTypes.UNSIGNED), "Name": (b"\x53\x6E", EBMLTypes.TEXTU), "Language": (b"\x22\xB5\x9C", EBMLTypes.TEXTA), "CodecID": (b"\x86", EBMLTypes.TEXTA), "CodecPrivate": (b"\x63\xA2", EBMLTypes.BINARY), "CodecName": (b"\x25\x86\x88", EBMLTypes.TEXTU), "AttachmentLink": (b"\x74\x46", EBMLTypes.UNSIGNED), "CodecSettings": (b"\x3A\x96\x97", EBMLTypes.TEXTU), "CodecInfoURL": (b"\x3B\x40\x40", EBMLTypes.TEXTA), "CodecDownloadURL": (b"\x26\xB2\x40", EBMLTypes.TEXTA), "CodecDecodeAll": (b"\xAA", EBMLTypes.UNSIGNED), "TrackOverlay": (b"\x6F\xAB", EBMLTypes.UNSIGNED), "TrackTranslate": (b"\x66\x24", EBMLTypes.MASTER), "TrackTranslateEditionUID": (b"\x66\xFC", EBMLTypes.UNSIGNED), "TrackTranslateCodec": (b"\x66\xBF", EBMLTypes.UNSIGNED), "TrackTranslateTrackID": (b"\x66\xA5", EBMLTypes.BINARY), "Video": (b"\xE0", EBMLTypes.MASTER), "FlagInterlaced": (b"\x9A", EBMLTypes.UNSIGNED), "StereoMode": (b"\x53\xB8", EBMLTypes.UNSIGNED), "OldStereoMode": (b"\x53\xB9", EBMLTypes.UNSIGNED), "PixelWidth": (b"\xB0", EBMLTypes.UNSIGNED), "PixelHeight": (b"\xBA", EBMLTypes.UNSIGNED), "PixelCropBottom": (b"\x54\xAA", EBMLTypes.UNSIGNED), "PixelCropTop": (b"\x54\xBB", EBMLTypes.UNSIGNED), "PixelCropLeft": (b"\x54\xCC", EBMLTypes.UNSIGNED), "PixelCropRight": (b"\x54\xDD", EBMLTypes.UNSIGNED), "DisplayWidth": (b"\x54\xB0", EBMLTypes.UNSIGNED), "DisplayHeight": (b"\x54\xBA", EBMLTypes.UNSIGNED), "DisplayUnit": (b"\x54\xB2", EBMLTypes.UNSIGNED), "AspectRatioType": (b"\x54\xB3", EBMLTypes.UNSIGNED), "ColourSpace": (b"\x2E\xB5\x24", EBMLTypes.BINARY), "GammaValue": (b"\x2F\xB5\x23", EBMLTypes.FLOAT), "FrameRate": (b"\x23\x83\xE3", EBMLTypes.FLOAT), "Audio": (b"\xE1", EBMLTypes.MASTER), "SamplingFrequency": (b"\xB5", EBMLTypes.FLOAT), "OutputSamplingFrequency": (b"\x78\xB5", EBMLTypes.FLOAT), "Channels": (b"\x9F", EBMLTypes.UNSIGNED), "ChannelPositions": (b"\x7D\x7B", EBMLTypes.BINARY), "BitDepth": (b"\x62\x64", EBMLTypes.UNSIGNED), "TrackOperation": (b"\xE2", EBMLTypes.MASTER), "TrackCombinePlanes": (b"\xE3", EBMLTypes.MASTER), "TrackPlane": (b"\xE4", EBMLTypes.MASTER), "TrackPlaneUID": (b"\xE5", EBMLTypes.UNSIGNED), "TrackPlaneType": (b"\xE6", EBMLTypes.UNSIGNED), "TrackJoinBlocks": (b"\xE9", EBMLTypes.MASTER), "TrackJoinUID": (b"\xED", EBMLTypes.UNSIGNED), "TrickTrackUID": (b"\xC0", EBMLTypes.UNSIGNED), "TrickTrackSegmentUID": (b"\xC1", EBMLTypes.BINARY), "TrickTrackFlag": (b"\xC6", EBMLTypes.UNSIGNED), "TrickMasterTrackUID": (b"\xC7", EBMLTypes.UNSIGNED), "TrickMasterTrackSegmentUID": (b"\xC4", EBMLTypes.BINARY), "ContentEncodings": (b"\x6D\x80", EBMLTypes.MASTER), "ContentEncoding": (b"\x62\x40", EBMLTypes.MASTER), "ContentEncodingOrder": (b"\x50\x31", EBMLTypes.UNSIGNED), "ContentEncodingScope": (b"\x50\x32", EBMLTypes.UNSIGNED), "ContentEncodingType": (b"\x50\x33", EBMLTypes.UNSIGNED), "ContentCompression": (b"\x50\x34", EBMLTypes.MASTER), "ContentCompAlgo": (b"\x42\x54", EBMLTypes.UNSIGNED), "ContentCompSettings": (b"\x42\x55", EBMLTypes.BINARY), "ContentEncryption": (b"\x50\x35", EBMLTypes.MASTER), "ContentEncAlgo": (b"\x47\xE1", EBMLTypes.UNSIGNED), "ContentEncKeyID": (b"\x47\xE2", EBMLTypes.BINARY), "ContentSignature": (b"\x47\xE3", EBMLTypes.BINARY), "ContentSigKeyID": (b"\x47\xE4", EBMLTypes.BINARY), "ContentSigAlgo": (b"\x47\xE5", EBMLTypes.UNSIGNED), "ContentSigHashAlgo": (b"\x47\xE6", EBMLTypes.UNSIGNED), "Cues": (b"\x1C\x53\xBB\x6B", EBMLTypes.MASTER), "CuePoint": (b"\xBB", EBMLTypes.MASTER), "CueTime": (b"\xB3", EBMLTypes.UNSIGNED), "CueTrackPositions": (b"\xB7", EBMLTypes.MASTER), "CueTrack": (b"\xF7", EBMLTypes.UNSIGNED), "CueClusterPosition": (b"\xF1", EBMLTypes.UNSIGNED), "CueBlockNumber": (b"\x53\x78", EBMLTypes.UNSIGNED), "CueCodecState": (b"\xEA", EBMLTypes.UNSIGNED), "CueReference": (b"\xDB", EBMLTypes.MASTER), "CueRefTime": (b"\x96", EBMLTypes.UNSIGNED), "CueRefCluster": (b"\x97", EBMLTypes.UNSIGNED), "CueRefNumber": (b"\x53\x5F", EBMLTypes.UNSIGNED), "CueRefCodecState": (b"\xEB", EBMLTypes.UNSIGNED), "Attachments": (b"\x19\x41\xA4\x69", EBMLTypes.MASTER), "AttachedFile": (b"\x61\xA7", EBMLTypes.MASTER), "FileDescription": (b"\x46\x7E", EBMLTypes.TEXTU), "FileName": (b"\x46\x6E", EBMLTypes.TEXTU), "FileMimeType": (b"\x46\x60", EBMLTypes.TEXTA), "FileData": (b"\x46\x5C", EBMLTypes.BINARY), "FileUID": (b"\x46\xAE", EBMLTypes.UNSIGNED), "FileReferral": (b"\x46\x75", EBMLTypes.BINARY), "FileUsedStartTime": (b"\x46\x61", EBMLTypes.UNSIGNED), "FileUsedEndTime": (b"\x46\x62", EBMLTypes.UNSIGNED), "Chapters": (b"\x10\x43\xA7\x70", EBMLTypes.MASTER), "EditionEntry": (b"\x45\xB9", EBMLTypes.MASTER), "EditionUID": (b"\x45\xBC", EBMLTypes.UNSIGNED), "EditionFlagHidden": (b"\x45\xBD", EBMLTypes.UNSIGNED), "EditionFlagDefault": (b"\x45\xDB", EBMLTypes.UNSIGNED), "EditionFlagOrdered": (b"\x45\xDD", EBMLTypes.UNSIGNED), "ChapterAtom": (b"\xB6", EBMLTypes.MASTER), "ChapterUID": (b"\x73\xC4", EBMLTypes.UNSIGNED), "ChapterTimeStart": (b"\x91", EBMLTypes.UNSIGNED), "ChapterTimeEnd": (b"\x92", EBMLTypes.UNSIGNED), "ChapterFlagHidden": (b"\x98", EBMLTypes.UNSIGNED), "ChapterFlagEnabled": (b"\x45\x98", EBMLTypes.UNSIGNED), "ChapterSegmentUID": (b"\x6E\x67", EBMLTypes.BINARY), "ChapterSegmentEditionUID": (b"\x6E\xBC", EBMLTypes.UNSIGNED), "ChapterPhysicalEquiv": (b"\x63\xC3", EBMLTypes.UNSIGNED), "ChapterTrack": (b"\x8F", EBMLTypes.MASTER), "ChapterTrackNumber": (b"\x89", EBMLTypes.UNSIGNED), "ChapterDisplay": (b"\x80", EBMLTypes.MASTER), "ChapString": (b"\x85", EBMLTypes.TEXTU), "ChapLanguage": (b"\x43\x7C", EBMLTypes.TEXTA), "ChapCountry": (b"\x43\x7E", EBMLTypes.TEXTA), "ChapProcess": (b"\x69\x44", EBMLTypes.MASTER), "ChapProcessCodecID": (b"\x69\x55", EBMLTypes.UNSIGNED), "ChapProcessPrivate": (b"\x45\x0D", EBMLTypes.BINARY), "ChapProcessCommand": (b"\x69\x11", EBMLTypes.MASTER), "ChapProcessTime": (b"\x69\x22", EBMLTypes.UNSIGNED), "ChapProcessData": (b"\x69\x33", EBMLTypes.BINARY), "Tags": (b"\x12\x54\xC3\x67", EBMLTypes.MASTER), "Tag": (b"\x73\x73", EBMLTypes.MASTER), "Targets": (b"\x63\xC0", EBMLTypes.MASTER), "TargetTypeValue": (b"\x68\xCA", EBMLTypes.UNSIGNED), "TargetType": (b"\x63\xCA", EBMLTypes.TEXTA), "TagTrackUID": (b"\x63\xC5", EBMLTypes.UNSIGNED), "TagEditionUID": (b"\x63\xC9", EBMLTypes.UNSIGNED), "TagChapterUID": (b"\x63\xC4", EBMLTypes.UNSIGNED), "TagAttachmentUID": (b"\x63\xC6", EBMLTypes.UNSIGNED), "SimpleTag": (b"\x67\xC8", EBMLTypes.MASTER), "TagName": (b"\x45\xA3", EBMLTypes.TEXTU), "TagLanguage": (b"\x44\x7A", EBMLTypes.TEXTA), "TagDefault": (b"\x44\x84", EBMLTypes.UNSIGNED), "TagString": (b"\x44\x87", EBMLTypes.TEXTU), "TagBinary": (b"\x44\x85", EBMLTypes.BINARY), "CodecDelay": (b"\x56\xAA", EBMLTypes.UNSIGNED), "SeekPreRoll": (b"\x56\xBB", EBMLTypes.UNSIGNED), "CueRelativePosition": (b"\xF0", EBMLTypes.UNSIGNED), "AlphaMode": (b"\x53\xC0", EBMLTypes.UNSIGNED), "BitsPerChannel": (b"\x55\xB2", EBMLTypes.UNSIGNED), "CbSubsamplingHorz": (b"\x55\xB5", EBMLTypes.UNSIGNED), "CbSubsamplingVert": (b"\x55\xB6", EBMLTypes.UNSIGNED), "ChapterStringUID": (b"\x56\x54", EBMLTypes.TEXTU), "ChromaSitingHorz": (b"\x55\xB7", EBMLTypes.UNSIGNED), "ChromaSitingVert": (b"\x55\xB8", EBMLTypes.UNSIGNED), "ChromaSubsamplingHorz": (b"\x55\xB3", EBMLTypes.UNSIGNED), "ChromaSubsamplingVert": (b"\x55\xB4", EBMLTypes.UNSIGNED), "Colour": (b"\x55\xB0", EBMLTypes.MASTER), "DefaultDecodedFieldDuration": (b"\x23\x4E\x7A", EBMLTypes.UNSIGNED), "DiscardPadding": (b"\x75\xA2", EBMLTypes.SIGNED), "FieldOrder": (b"\x9D", EBMLTypes.UNSIGNED), "LuminanceMax": (b"\x55\xD9", EBMLTypes.FLOAT), "LuminanceMin": (b"\x55\xDA", EBMLTypes.FLOAT), "MasteringMetadata": (b"\x55\xD0", EBMLTypes.MASTER), "MatrixCoefficients": (b"\x55\xB1", EBMLTypes.UNSIGNED), "MaxCLL": (b"\x55\xBC", EBMLTypes.UNSIGNED), "MaxFALL": (b"\x55\xBD", EBMLTypes.UNSIGNED), "Primaries": (b"\x55\xBB", EBMLTypes.UNSIGNED), "PrimaryBChromaticityX": (b"\x55\xD5", EBMLTypes.FLOAT), "PrimaryBChromaticityY": (b"\x55\xD6", EBMLTypes.FLOAT), "PrimaryGChromaticityX": (b"\x55\xD3", EBMLTypes.FLOAT), "PrimaryGChromaticityY": (b"\x55\xD4", EBMLTypes.FLOAT), "PrimaryRChromaticityX": (b"\x55\xD1", EBMLTypes.FLOAT), "PrimaryRChromaticityY": (b"\x55\xD2", EBMLTypes.FLOAT), "Range": (b"\x55\xB9", EBMLTypes.UNSIGNED), "TransferCharacteristics": (b"\x55\xBA", EBMLTypes.UNSIGNED), "WhitePointChromaticityX": (b"\x55\xD7", EBMLTypes.FLOAT), "WhitePointChromaticityY": (b"\x55\xD8", EBMLTypes.FLOAT), } def get_largest_byte(byte, signed=False): if byte < 0: x = 0x100 while (byte + x) < (x / 2): x <<= 8 # >> means i / 2**1; << means i * 2**1 byte += x if byte < 0x100: return chr(byte) signed = False elif (byte < 0x100 and not signed) or (byte < 0x80): return chr(byte) return get_largest_byte(byte >> 8, signed) + chr(byte & 0xFF) def encode_embl(byte): def move_bits(rest_of_byte, bits): if bits==8: return chr(rest_of_byte & 0xFF); else: return move_bits(rest_of_byte >> 8, bits - 8) + chr(rest_of_byte & 0xFF) if byte == -1: return chr(0xFF) if byte < 2**7 - 1: return chr(byte | 0x80) if byte < 2**14 - 1: return chr(0x40 | (byte >> 8)) + move_bits(byte, 8) if byte < 2**21 - 1: return chr(0x20 | (byte >> 16)) + move_bits(byte, 16) if byte < 2**28 - 1: return chr(0x10 | (byte >> 24)) + move_bits(byte, 24) if byte < 2**35 - 1: return chr(0x08 | (byte >> 32)) + move_bits(byte, 32) if byte < 2**42 - 1: return chr(0x04 | (byte >> 40)) + move_bits(byte, 40) if byte < 2**49 - 1: return chr(0x02 | (byte >> 48)) + move_bits(byte, 48) if byte < 2**56 - 1: return chr(0x01) + move_bits(byte, 56) raise Exception("FOUND BYTE IS TOO BIG") def get_ebml_element(element_id, element_data, element_length = None): if element_length == None: element_length = len(element_data) return get_largest_byte(element_id) + encode_embl(element_length) + element_data class ConvertXMLtoMKV(handler.ContentHandler): def __init__(self): self.stack_table = [] self.pump_stack = [] self.timecodeScale = 1000000 self.check_duration = None self.last_block_duration = 0 self.compress_header = {} self.current_track = None self.current_compiling_algorithm = None self.current_character_character_content_compiling_settings = None pass def get_duration(self, duration): if not self.check_duration: self.check_duration = duration else: if duration != self.check_duration: sys.stderr.write("Duration has been ignored due to being edited\n") sys.stderr.write("<duration> is meaningful in <block>s that are used in\n") sys.stderr.write("\"nocluster\" mode (mkv2xml -C)\n") def startElement(self, name, attributes): self.element_name = name self.element_data= [] if name == "mkv2xml": pass elif name == "Segment": sys.stdout.write(b"\x18\x53\x80\x67"+b"\xFF") sys.stdout.write(b"\xEC\x40\x80" + (b"\x00" * 128)) pass elif name == "track" or name == "duration" or name == "timecode": pass elif name == "data": self.character_content_of_the_frame = False if "encoding" in attributes: if attributes["encoding"] == "text": self.character_content_of_the_frame = True elif name == "discardable": self.discardable_frame = True elif name == "keyframe": self.keyframe_frame = True elif name == "invisible": self.invisible_frame = True else: if name in table_element_name: (element_id, element_type) = table_element_name[name] self.current_element_type = element_type self.stack_table.append((name, attributes)) self.pump_stack.append(b"") if "encoding" in attributes: if attributes["encoding"] == "text": self.current_element_type = EBMLTypes.TEXTA if name == "SimpleBlock" or name == "Block" or name == "block": self.discardable_frame = False self.invisible_frame = False self.keyframe_frame = False self.current_pumped_frame = "" self.frame_buffering = [] self.duration_of_the_frame = None self.current_element_type = None # prevent usual binary processing if name == "BlockGroup": self.check_duration = None def characters(self, character_content): self.element_data.append(character_content) def characters_old(self, character_content): if self.element_name == "data": self.current_pumped_frame += character_content return if not character_content.isspace(): pump = "" if self.element_name == "track": self.track_of_the_frame = int(character_content) return elif self.element_name == "timecode": self.timecode_of_the_frame = float(character_content) return elif self.element_name == "duration": self.duration_of_the_frame = float(character_content) return elif self.current_element_type == EBMLTypes.TEXTA: pump = str(character_content).encode("ascii") elif self.current_element_type == EBMLTypes.TEXTU: pump = str(character_content).encode("UTF-8") elif self.current_element_type == EBMLTypes.UNSIGNED: pump = get_largest_byte(int(character_content)) if self.element_name == "TimecodeScale": self.timecodeScale = int(character_content) elif self.element_name == "Timecode": self.last_cluster = int(character_content) elif self.element_name == "BlockDuration": self.get_duration(int(character_content)) elif self.current_element_type == EBMLTypes.SIGNED: pump = get_largest_byte(int(character_content), True) elif self.current_element_type == EBMLTypes.BINARY: character_content = character_content.replace("\n","").replace("\r","").replace(" ",""); pump = binascii.unhexlify(maybe_encode(character_content)) pass elif self.current_element_type == EBMLTypes.DATE: actual_duration = float(character_content) actual_duration -= 978300000 actual_duration *= 1000000000.0; pump = get_largest_byte(int(actual_duration), True) elif self.current_element_type == EBMLTypes.FLOAT: pump = pack(">d", float(character_content)) self.pump_stack[-1]+=pump def endElement(self, name): character_content = "".join(self.element_data) if character_content: self.characters_old(character_content) self.element_data=[] if name == "track" or name == "timecode" or name == "discardable" or name == "keyframe" or \ name == "invisible" or name == "duration": return elif name == "data": if self.character_content_of_the_frame: self.frame_buffering.append(maybe_encode_utf8(str(self.current_pumped_frame))) else: text = self.current_pumped_frame.replace("\n", "").replace("\t", "").replace(" ", "") self.frame_buffering.append(binascii.unhexlify(maybe_encode(text))) self.current_pumped_frame="" return if name != "block": if not name in table_element_name: if not name == "mkv2xml": sys.stderr.write("Element is not known %s\n"%name) return if name=="TrackEntry": if self.current_compiling_algorithm == 3: self.compress_header[self.current_track] = binascii.unhexlify(maybe_encode(self.current_character_character_content_compiling_settings)) self.current_character_character_content_compiling_settings = None self.current_compiling_algorithm = None sys.stderr.write("Using header compression for track "+str(self.current_track)+"\n"); if name=="TrackNumber": self.current_track = int(character_content) if name=="ContentCompAlgo": self.current_compiling_algorithm = int(character_content) if name=="ContentCompSettings": self.current_character_character_content_compiling_settings = character_content if name=="Segment": return (element_id, element_type) = (None, None) if name != "block": (element_id, element_type) = table_element_name[name]; if (name == "SimpleBlock") or (name == "Block") or (name == "block"): exact_timecode = int(self.timecode_of_the_frame * 1000000000 / self.timecodeScale) similar_timecode = 0 if name != "block": similar_timecode = exact_timecode - self.last_cluster if self.duration_of_the_frame: scale_down_the_duration = int(self.duration_of_the_frame * 1000000000 / self.timecodeScale) self.get_duration(scale_down_the_duration) if (similar_timecode < -0x8000) or (similar_timecode > 0x7FFF): sys.stderr.write("Block timecode is too far from outer Cluster's timecode\n") sys.stderr.write("Use no-cluster mode (mkv2xml -C) with <block> elements\n") similar_timecode = 0; if similar_timecode < 0: similar_timecode+=0x10000 flags = 0x00 if self.keyframe_frame: flags |= 0x80 if self.discardable_frame: flags |= 0x01 if self.invisible_frame: flags |= 0x08 length_of_the_XIPH=[] character_content = b"" header = None if self.track_of_the_frame in self.compress_header: header = self.compress_header[self.track_of_the_frame] for j in self.frame_buffering: if header is not None: if j[0:len(header)] != header: sys.stderr.write("Unable to apply header compression here\n") else: j=j[len(header):] character_content += j length_of_the_XIPH.append(len(j)) laced_XIPH=b"" laced_XIPH_count = len(length_of_the_XIPH)-1 for j in range(0,laced_XIPH_count): length = length_of_the_XIPH[j] while length >= 255: laced_XIPH += b"\xFF" length -= 255 laced_XIPH += chr(length) current_frame = None if len(length_of_the_XIPH) <= 1: current_frame = encode_embl(self.track_of_the_frame) + \ chr(similar_timecode >> 8) + chr(similar_timecode & 0xFF) + chr(flags) + character_content else: flags |= 0x02 # Xiph lacing current_frame = encode_embl(self.track_of_the_frame) + chr(similar_timecode >> 8) + chr(similar_timecode & 0xFF) + \ chr(flags) + chr(laced_XIPH_count) + laced_XIPH + character_content if name == "block": if not self.duration_of_the_frame: # Cluster + Timecode + simpleblock cluster = get_ebml_element(0x1F43B675, b"" + get_ebml_element(0xE7, get_largest_byte(exact_timecode)) + get_ebml_element(0xA3, current_frame)) else: scale_down_the_duration = int(self.duration_of_the_frame * 1000000000 / self.timecodeScale) # Cluster + Timecode + BlockGroup + Block Duration + Block cluster = get_ebml_element(0x1F43B675, b"" + get_ebml_element(0xE7, get_largest_byte(exact_timecode)) + get_ebml_element(0xA0, b"" + get_ebml_element(0x9B, get_largest_byte(scale_down_the_duration)) + get_ebml_element(0xA1, current_frame))) sys.stdout.write(cluster) return else: self.pump_stack[-1] = current_frame if not len(self.stack_table): return (_, attributes) = self.stack_table.pop() pump = self.pump_stack.pop() if name == "WritingApp" or name == "MuxingApp": if pump.find(b"xml2mkv") == -1: pump+=b"; xml2mkv" if len(self.pump_stack): self.pump_stack[-1] += element_id + encode_embl(len(pump)) + pump else: sys.stdout.write(element_id) sys.stdout.write(encode_embl(len(pump))) sys.stdout.write(pump) pass if sys.version >= '3': sys.stdout = sys.stdout.detach() parser = make_parser() parser.setContentHandler(ConvertXMLtoMKV()) parser.parse(sys.stdin)
import pickle import torch data = pickle.load(open('0ae94cff1c998450d76df87ebe81dc91a0da20ae.p', 'rb')) torch.nn.functional.adaptive_avg_pool2d(**data)
# """ Python tools for creating the MIRI MRS dither sequences for a given set of distortion files. These functions will be called from the associated notebook front-end. Beta dithers: Ch1 long offset is 5.5 times the Ch1 width because that will be half-integer for all other channels. Ch 2/3/4 are odd multiples of the 5.5 times slice width offset so that it will similarly be half-integer for all channels. Short beta dithers are 1.5x integer in the local channel because we're trying to optimize for this channel and using larger than 0.5x offset will significantly help simultaneous imaging by having a larger throw. Alpha dithers both long and short offsets are defined related to the channel-specific pixel size because we're optimizing for that channel. However, alpha/beta axes in Ch1 aren't quite aligned with other channels, and alpha sampling changes discontinuously between slices, so attemps to do half-integer alpha while changing slices aren't perfect. Assess performance using dedicated simulation code. Add additional tweak-ups so that that pattern is centered for a given channel. Author: David R. Law (dlaw@stsci.edu) REVISION HISTORY: 15-Apr-2019 Adapt from old IDL routines (D. Law; dlaw@stsci.edu) 05-Aug-2020 Remove dither flips for mirisim per ticket MIRI-677 (D. Law) 08-Jun-2021 Fold JDox figure creation into these functions instead of notebook (D. Law) 27-May-2022 Update for FLT-1 distortion (D. Law) """ import matplotlib as mpl import matplotlib.pyplot as plt import os as os import math import numpy as np import datetime from astropy.io import fits import csv import getpass import socket import pdb import miricoord.mrs.mrs_tools as mrst import miricoord.mrs.makesiaf.makesiaf_mrs as makesiaf ############################# # Global record of pixel sizes so that we only need to calculate once def setsizes(**kwargs): global pixsize,slicewidth slicewidth=np.array([mrst.slicewidth('1A'),mrst.slicewidth('2A'),mrst.slicewidth('3A'),mrst.slicewidth('4A')]) pixsize=np.array([mrst.pixsize('1A'),mrst.pixsize('2A'),mrst.pixsize('3A'),mrst.pixsize('4A')]) if ('verbose' in kwargs): print('Slice widths: ',slicewidth) print('Pixel sizes: ',pixsize) return ############################# # Generate the 8-position point source pattern # given a set of input long and short offsets # in the alpha and beta directions. Note that positions 3 and 4 (plus 7 and 8) # are swapped relative to the IDT original pattern. def makepattern_generic(astart,along,ashort,bstart,blong,bshort): pattern_alpha=np.array([0,along,ashort,along+ashort,along+ashort,ashort,along,0])+astart pattern_beta=np.array([blong+bshort,bshort,blong,0,blong+bshort,bshort,blong,0])-blong-bshort+bstart return pattern_alpha,pattern_beta ############################# # A routine to return rough maximum PSF FWHM in arcsec for a given channel def maxfwhm(channel): # Maximum wavelength in microns for the channel wave=0. if (channel == 1): wave=8.0 if (channel == 2): wave=12.0 if (channel == 3): wave=18.0 if (channel == 4): wave=29.0 return 0.31*wave/8.0 ############################# # A routine to recenter a given dither pattern within a particular channel FOV # Must be passed SIAF structures for all 3 bands within a channel def recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC): # Average the three mean field positions v2_fieldmean=(siafA['inscr_v2ref']+siafB['inscr_v2ref']+siafC['inscr_v2ref'])/3. v3_fieldmean=(siafA['inscr_v3ref']+siafB['inscr_v3ref']+siafC['inscr_v3ref'])/3. v2_mean=np.mean(pat_v2) v3_mean=np.mean(pat_v3) newpat_v2 = pat_v2 - v2_mean + v2_fieldmean newpat_v3 = pat_v3 - v3_mean + v3_fieldmean return newpat_v2,newpat_v3 ############################# # A routine to recenter a given dither pattern with respect to a given channel reference point # (which is not quite the same thing as centering wrt the FOV) # Must be passed SIAF structure for a given band def recenterRP(pat_v2,pat_v3,siaf): v2ref,v3ref=siaf['inscr_v2ref'],siaf['inscr_v3ref'] v2_mean=np.mean(pat_v2) v3_mean=np.mean(pat_v3) newpat_v2 = pat_v2 - v2_mean + v2ref newpat_v3 = pat_v3 - v3_mean + v3ref return newpat_v2,newpat_v3 ############################# # Generate the commissioning Ch1 point-source patterns # SIAF structures for the 3 bands must be passed in def makepattern_ch1(siafA,siafB,siafC): # See if the pixel and slice sizes have already been calculated try: slicewidth pixsize # If not, calculate them except: setsizes() pixsiz1=pixsize[0]# Ch1 pixsiz=pixsize[0]# Ch1 slicesiz1=slicewidth[0]# Ch1 slicesiz=slicewidth[0]# Ch1 along=10.5*pixsiz ashort=1.5*pixsiz astart=0 blong=5.5*slicesiz1 bshort=1.5*slicesiz bstart=0 pat_a,pat_b=makepattern_generic(astart,along,ashort,bstart,blong,bshort) # Transform assuming input in Ch1A alpha-beta pat_v2,pat_v3=mrst.abtov2v3(pat_a,pat_b,'1A') # Recenter the pattern within the field pat_v2,pat_v3=recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC) return pat_v2,pat_v3 ############################# # Generate the commissioning Ch2 point-source patterns # SIAF structures for the 3 bands must be passed in def makepattern_ch2(siafA,siafB,siafC): # See if the pixel and slice sizes have already been calculated try: slicewidth pixsize # If not, calculate them except: print('Recalculating pixel/slice sizes') setsizes() pixsiz1=pixsize[0]# Ch1 pixsiz=pixsize[1]# Ch2 slicesiz1=slicewidth[0]# Ch1 slicesiz=slicewidth[1]# Ch2 along=10.5*pixsiz ashort=1.5*pixsiz astart=0 blong=8.5*slicesiz bshort=1.5*slicesiz bstart=0 pat_a,pat_b=makepattern_generic(astart,along,ashort,bstart,blong,bshort) # Transform assuming input pattern in Ch2A alpha-beta pat_v2,pat_v3=mrst.abtov2v3(pat_a,pat_b,'2A') # Recenter the pattern within the field pat_v2,pat_v3=recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC) return pat_v2,pat_v3 ############################# # Generate the commissioning Ch3 point-source patterns # SIAF structures for the 3 bands must be passed in def makepattern_ch3(siafA,siafB,siafC): # See if the pixel and slice sizes have already been calculated try: slicewidth pixsize # If not, calculate them except: setsizes() pixsiz1=pixsize[0]# Ch1 pixsiz=pixsize[2]# Ch3 slicesiz1=slicewidth[0]# Ch1 slicesiz=slicewidth[2]# Ch3 along=10.5*pixsiz ashort=1.5*pixsiz astart=0 blong=16.5*slicesiz1 bshort=1.5*slicesiz bstart=0 pat_a,pat_b=makepattern_generic(astart,along,ashort,bstart,blong,bshort) # Transform assuming input in Ch3A alpha-beta pat_v2,pat_v3=mrst.abtov2v3(pat_a,pat_b,'3A') # Recenter the pattern within the field pat_v2,pat_v3=recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC) return pat_v2,pat_v3 ############################# # Generate the commissioning Ch4 point-source patterns # SIAF structures for the 3 bands must be passed in def makepattern_ch4(siafA,siafB,siafC): # See if the pixel and slice sizes have already been calculated try: slicewidth pixsize # If not, calculate them except: setsizes() pixsiz1=pixsize[0]# Ch1 pixsiz=pixsize[3]# Ch4 slicesiz1=slicewidth[0]# Ch1 slicesiz=slicewidth[3]# Ch4 along=12.5*pixsiz ashort=1.5*pixsiz astart=0 blong=5.5*slicesiz bshort=1.5*slicesiz bstart=0 pat_a,pat_b=makepattern_generic(astart,along,ashort,bstart,blong,bshort) # Transform assuming input in Ch4A alpha-beta pat_v2,pat_v3=mrst.abtov2v3(pat_a,pat_b,'4A') # Recenter the pattern within the field pat_v2,pat_v3=recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC) return pat_v2,pat_v3 ############################# # Routine to generate the extended-source pattern optimized for ALL channels def makepattern_ext_all(siafA,siafB,siafC): # See if the pixel and slice sizes have already been calculated try: slicewidth pixsize # If not, calculate them except: setsizes() # Ch3 and Ch4 are well-sampled in the pixel direction already, so optimize the along-slice # offset to be half-integer in Ch1 and Ch2 da=pixsize[0]*3/2.# Ch1 # Use the mathematically related slice widths in each channel to construct a half-integer # offset for all channels db=slicewidth[0]*5.5# Ch1 pat_a=np.array([-da/2.,da/2.,da/2.,-da/2.]) pat_b=np.array([db/2.,-db/2.,db/2.,-db/2.]) # Transform assuming input in Ch1A alpha-beta pat_v2,pat_v3=mrst.abtov2v3(pat_a,pat_b,'1A') # Recenter the pattern within the field pat_v2,pat_v3=recenterFOV(pat_v2,pat_v3,siafA,siafB,siafC) return pat_v2,pat_v3 ############################# # Routine to generate the extended-source pattern optimized for a given channel def makepattern_ext_ChX(ptpat_v2,ptpat_v3,siaf): # First dither pair; pull out short-dithers with parity = 1 temp1_v2=ptpat_v2[[0,2]] temp1_v3=ptpat_v3[[0,2]] # Recenter within field temp1_v2,temp1_v3=recenterRP(temp1_v2,temp1_v3,siaf) # Second dither pair; pull out short-dithers with parity = -1 temp2_v2=ptpat_v2[[4,6]] temp2_v3=ptpat_v3[[4,6]] # Recenter within field temp2_v2,temp2_v3=recenterRP(temp2_v2,temp2_v3,siaf) # Combine the dither pairs pat_v2,pat_v3=np.append(temp1_v2,temp2_v2),np.append(temp1_v3,temp2_v3) # And recenter the combined dithers pat_v2,pat_v3=recenterRP(pat_v2,pat_v3,siaf) return pat_v2,pat_v3 ############################# # Routine to convert fixed v2,v3 dither points into actual xideal,yideal offsets # relative to the fiducial reference point for a given channel # Must be passed the siafRP structure containing the reference point to be used, # and optionally the siaf1A structure used to define Ideal coordinates def compute_dxdyideal(pat_v2,pat_v3,siaf,**kwargs): v2ref,v3ref=siaf['inscr_v2ref'],siaf['inscr_v3ref'] # Ideal coordinate of the dither position x,y=mrst.v2v3_to_xyideal(pat_v2,pat_v3,**kwargs) # Ideal coordinate of the fiducial (undithered) point xref,yref=mrst.v2v3_to_xyideal(v2ref,v3ref,**kwargs) # Delta offsets dxidl=x-xref dyidl=y-yref return dxidl,dyidl ############################# # Routine to write results to a file formatted for the PRD def writeresults_prd(dxidl,dyidl,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') relfile='dithers/temp/MiriMrsDithers.txt' outfile=os.path.join(data_dir,relfile) else: relfile='MiriMrsDithers.txt' outfile=os.path.join(outdir,relfile) now=datetime.datetime.now() thisfile=__file__ _,thisfile=os.path.split(thisfile) # No header information is allowed, and specific names must be given for each set of points # which makes the file quite fragile print('CHANNEL1-POINT_SOURCE-NEGATIVE',file=open(outfile,"w")) for ii in range(0,4): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii+1,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL1-POINT_SOURCE-POSITIVE',file=open(outfile,"a")) for ii in range(4,8): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-3,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL2-POINT_SOURCE-NEGATIVE',file=open(outfile,"a")) for ii in range(8,12): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-7,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL2-POINT_SOURCE-POSITIVE',file=open(outfile,"a")) for ii in range(12,16): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-11,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL3-POINT_SOURCE-NEGATIVE',file=open(outfile,"a")) for ii in range(16,20): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-15,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL3-POINT_SOURCE-POSITIVE',file=open(outfile,"a")) for ii in range(20,24): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-19,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL4-POINT_SOURCE-NEGATIVE',file=open(outfile,"a")) for ii in range(24,28): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-23,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL4-POINT_SOURCE-POSITIVE',file=open(outfile,"a")) for ii in range(28,32): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-27,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('ALL-EXTENDED_SOURCE',file=open(outfile,"a")) for ii in range(32,36): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-31,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL1-EXTENDED_SOURCE',file=open(outfile,"a")) for ii in range(36,40): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-35,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL2-EXTENDED_SOURCE',file=open(outfile,"a")) for ii in range(40,44): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-39,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL3-EXTENDED_SOURCE',file=open(outfile,"a")) for ii in range(44,48): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-43,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('CHANNEL4-EXTENDED_SOURCE',file=open(outfile,"a")) for ii in range(48,52): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-47,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('DEDICATED-BG',file=open(outfile,"a")) for ii in range(52,56): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-51,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('SCAN-CALIBRATION',file=open(outfile,"a")) for ii in range(56,len(dxidl)): print("{0:<3}{1:>10.6f} {2:>10.6f}".format(ii-55,dxidl[ii],dyidl[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) # Log the operation is complete print('Successfully wrote PRD dithers file to ',relfile) ############################# # Routine to write results to a file formatted for mirisim def writeresults_mirisim(ch,v2,v3,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') relfile='dithers/temp/mrs_recommended_dither.dat' outfile=os.path.join(data_dir,relfile) else: relfile='mrs_recommended_dither.dat' outfile=os.path.join(outdir,relfile) now=datetime.datetime.now() thisfile=__file__ _,thisfile=os.path.split(thisfile) # Mirisim specifies dithers in alpha-beta, so we need to convert to that frame from v2-v3 # It looks like we should always use the 1A reference point and coordinate system, # even though this is strictly ill-defed outside 1A footprint ndither=len(v2) dalpha,dbeta=np.zeros(ndither),np.zeros(ndither) band=["" for ii in range(ndither)] for ii in range(0,ndither): thisch=ch[ii] # Reference point of this channel if (thisch == 1): band[ii]='1A' if (thisch == 2): band[ii]='1A' if (thisch == 3): band[ii]='1A' if (thisch == 4): band[ii]='1A' dalpha[ii],dbeta[ii]=mrst.v2v3toab(v2[ii],v3[ii],band[ii]) # As of mirisim 2.3.0 (ticket MIRI-677) we no longer need to invert dithers # to work properly with mirisim # Write header information to the output text file print('# Default MIRISim dither pattern for MRS.',file=open(outfile,"w")) print('#',file=open(outfile,"a")) print('# Created ',now.isoformat(),file=open(outfile,"a")) print('# Using program miricoord.',thisfile,file=open(outfile,"a")) print('#',file=open(outfile,"a")) print('# Offsets are defined in the MRS 1A channel-band field-of-view,',file=open(outfile,"a")) print('# and are tabulated as (alpha, beta) coordinates (in units of arcsec)',file=open(outfile,"a")) print('# relative to initial pointing center at (0, 0).',file=open(outfile,"a")) print('#',file=open(outfile,"a")) print('',file=open(outfile,"a")) # Mirisim doesn't use column names, they are taken as a given # It also add comments before each set, so we'll need to break them up # Note that this makes the code less robust against changes! print('# Optimized for channel 1 point sources.',file=open(outfile,"a")) for ii in range(0,8): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 2 point sources.',file=open(outfile,"a")) for ii in range(8,16): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 3 point sources.',file=open(outfile,"a")) for ii in range(16,24): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 4 point sources.',file=open(outfile,"a")) for ii in range(24,32): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for ALL channel extended sources.',file=open(outfile,"a")) for ii in range(32,36): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 1 extended sources.',file=open(outfile,"a")) for ii in range(36,40): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 2 extended sources.',file=open(outfile,"a")) for ii in range(40,44): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 3 extended sources.',file=open(outfile,"a")) for ii in range(44,48): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) print('',file=open(outfile,"a")) print('# Optimized for channel 4 extended sources.',file=open(outfile,"a")) for ii in range(48,52): print("{0:>7.4f}, {1:>7.4f}".format(dalpha[ii],dbeta[ii]),file=open(outfile,"a")) # Log the operation is complete print('Successfully wrote mirisim dithers file to ',relfile) ############################# # Routine to write full results to a file def writeresults_full(index,ch,v2,v3,dxidl,dyidl,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') relfile='dithers/temp/mrs_dithers.txt' outfile=os.path.join(data_dir,relfile) else: relfile='mrs_dithers.txt' outfile=os.path.join(outdir,relfile) now=datetime.datetime.now() thisfile=__file__ _,thisfile=os.path.split(thisfile) # Save info in alpha-beta too ndither=len(v2) dalpha,dbeta=np.zeros(ndither),np.zeros(ndither) band=["" for ii in range(ndither)] for ii in range(0,ndither): thisch=ch[ii] # Reference point of this channel if (thisch == 1): band[ii]='1A' if (thisch == 2): band[ii]='2A' if (thisch == 3): band[ii]='3A' if (thisch == 4): band[ii]='4A' dalpha[ii],dbeta[ii]=mrst.v2v3toab(v2[ii],v3[ii],band[ii]) # Write header information to the output text file print('# Created ',now.isoformat(),file=open(outfile,"w")) print('# Using program',thisfile,file=open(outfile,"a")) # Column names print("{:<10} {:<8} {:<10} {:<10} {:<10} {:<10} {:<15} {:<15}".format('PosnIndex','Band','alpha','beta','V2','V3','dXIdeal','dYIdeal'),file=open(outfile,"a")) for i in range(0,len(index)): # Write information to a text file print("{0:<10} {1:<8} {2:<10.5f} {3:<10.5f} {4:<10.5f} {5:<10.5f} {6:<15.5f} {7:<15.5f}".format(index[i],band[i],dalpha[i],dbeta[i],v2[i],v3[i],dxidl[i],dyidl[i]),file=open(outfile,"a")) # Log the operation is complete print('Successfully wrote full dithers file to ',relfile) ############################# # Make assorted plots for JDox def make_jdox(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir='./'): qaplot_ptsourceloc(v2_all,v3_all,allsiaf,vertxt,outdir) qaplot_extsourceloc(v2_all,v3_all,allsiaf,vertxt,outdir) qaplot_ps4all(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_ps2ch4(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_ext2all(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_ext4all(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_ext2ch3(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_ext4ch3(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) qaplot_bg(v2_all,v3_all,dx_all,dy_all,allsiaf,vertxt,outdir) ############################# # Plot showing the location of the point-source dithers def qaplot_ptsourceloc(v2,v3,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_pt.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(-497.4,-509.4) plt.ylim(-325.8,-313.8) plt.plot(siaf1A['inscr_v2_corners'],siaf1A['inscr_v3_corners'],color='b',linewidth=1.5,label='Ch1') plt.plot(siaf1B['inscr_v2_corners'],siaf1B['inscr_v3_corners'],color='b',linewidth=1.5,linestyle='--') plt.plot(siaf1C['inscr_v2_corners'],siaf1C['inscr_v3_corners'],color='b',linewidth=1.5,linestyle=':') plt.plot(siaf2A['inscr_v2_corners'],siaf2A['inscr_v3_corners'],color='g',linewidth=1.5,label='Ch2') plt.plot(siaf2B['inscr_v2_corners'],siaf2B['inscr_v3_corners'],color='g',linewidth=1.5,linestyle='--') plt.plot(siaf2C['inscr_v2_corners'],siaf2C['inscr_v3_corners'],color='g',linewidth=1.5,linestyle=':') plt.plot(siaf3A['inscr_v2_corners'],siaf3A['inscr_v3_corners'],color='gold',linewidth=1.5,label='Ch3') plt.plot(siaf3B['inscr_v2_corners'],siaf3B['inscr_v3_corners'],color='gold',linewidth=1.5,linestyle='--') plt.plot(siaf3C['inscr_v2_corners'],siaf3C['inscr_v3_corners'],color='gold',linewidth=1.5,linestyle=':') plt.plot(siaf4A['inscr_v2_corners'],siaf4A['inscr_v3_corners'],color='r',linewidth=1.5,label='Ch4') plt.plot(siaf4B['inscr_v2_corners'],siaf4B['inscr_v3_corners'],color='r',linewidth=1.5,linestyle='--') plt.plot(siaf4C['inscr_v2_corners'],siaf4C['inscr_v3_corners'],color='r',linewidth=1.5,linestyle=':') plt.plot(v2[0:8],v3[0:8],'+',color='b',linewidth=1.5) plt.plot(v2[8:16],v3[8:16],'+',color='g') plt.plot(v2[16:24],v3[16:24],'+',color='gold') plt.plot(v2[24:32],v3[24:32],'+',color='r') plt.xlabel('V2 (arcsec)') plt.ylabel('V3 (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing the location of the extended-source dithers def qaplot_extsourceloc(v2,v3,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ext.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(-497.4,-509.4) plt.ylim(-325.8,-313.8) plt.plot(siaf1A['inscr_v2_corners'],siaf1A['inscr_v3_corners'],color='b',linewidth=1.5,label='Ch1') plt.plot(siaf1B['inscr_v2_corners'],siaf1B['inscr_v3_corners'],color='b',linewidth=1.5,linestyle='--') plt.plot(siaf1C['inscr_v2_corners'],siaf1C['inscr_v3_corners'],color='b',linewidth=1.5,linestyle=':') plt.plot(siaf2A['inscr_v2_corners'],siaf2A['inscr_v3_corners'],color='g',linewidth=1.5,label='Ch2') plt.plot(siaf2B['inscr_v2_corners'],siaf2B['inscr_v3_corners'],color='g',linewidth=1.5,linestyle='--') plt.plot(siaf2C['inscr_v2_corners'],siaf2C['inscr_v3_corners'],color='g',linewidth=1.5,linestyle=':') plt.plot(siaf3A['inscr_v2_corners'],siaf3A['inscr_v3_corners'],color='gold',linewidth=1.5,label='Ch3') plt.plot(siaf3B['inscr_v2_corners'],siaf3B['inscr_v3_corners'],color='gold',linewidth=1.5,linestyle='--') plt.plot(siaf3C['inscr_v2_corners'],siaf3C['inscr_v3_corners'],color='gold',linewidth=1.5,linestyle=':') plt.plot(siaf4A['inscr_v2_corners'],siaf4A['inscr_v3_corners'],color='r',linewidth=1.5,label='Ch4') plt.plot(siaf4B['inscr_v2_corners'],siaf4B['inscr_v3_corners'],color='r',linewidth=1.5,linestyle='--') plt.plot(siaf4C['inscr_v2_corners'],siaf4C['inscr_v3_corners'],color='r',linewidth=1.5,linestyle=':') plt.plot(v2[32:36],v3[32:36],'+',color='black',linewidth=1.5) plt.plot(v2[36:40],v3[36:40],'+',color='b',linewidth=1.5) plt.plot(v2[40:44],v3[40:44],'+',color='g') plt.plot(v2[44:48],v3[44:48],'+',color='gold') plt.plot(v2[48:52],v3[48:52],'+',color='r') plt.xlabel('V2 (arcsec)') plt.ylabel('V3 (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 4-pt ALL point-source dither def qaplot_ps4all(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ps4ALL.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf1A['inscr_v2ref'],siaf1A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(6,-6) plt.ylim(-6,6) plt.plot(v2corn_1A+dx[0],v3corn_1A-dy[0],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[1],v3corn_1A-dy[1],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[2],v3corn_1A-dy[2],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[3],v3corn_1A-dy[3],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[0],v3corn_2A-dy[0],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[1],v3corn_2A-dy[1],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[2],v3corn_2A-dy[2],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[3],v3corn_2A-dy[3],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[0],v3corn_3A-dy[0],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[1],v3corn_3A-dy[1],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[2],v3corn_3A-dy[2],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[3],v3corn_3A-dy[3],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[0],v3corn_4A-dy[0],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[1],v3corn_4A-dy[1],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[2],v3corn_4A-dy[2],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[3],v3corn_4A-dy[3],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[0:4]-v2ref,v3[0:4]-v3ref,'+',color='b',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(1,5,'ALL, 4-PT, point source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 2-pt Ch4 point-source dither def qaplot_ps2ch4(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ps2ch4.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf4A['inscr_v2ref'],siaf4A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(8,-8) plt.ylim(-8,8) plt.plot(v2corn_1A+dx[24],v3corn_1A-dy[24],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[25],v3corn_1A-dy[25],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[24],v3corn_2A-dy[24],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[25],v3corn_2A-dy[25],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[24],v3corn_3A-dy[24],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[25],v3corn_3A-dy[25],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[24],v3corn_4A-dy[24],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[25],v3corn_4A-dy[25],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[24:26]-v2ref,v3[24:26]-v3ref,'+',color='r',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(7,7,'Ch4, 2-PT, point source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 2-pt ALL extended-source dither def qaplot_ext2all(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ext2all.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf1A['inscr_v2ref'],siaf1A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(6,-6) plt.ylim(-6,6) plt.plot(v2corn_1A+dx[32],v3corn_1A-dy[32],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[33],v3corn_1A-dy[33],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[32],v3corn_2A-dy[32],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[33],v3corn_2A-dy[33],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[32],v3corn_3A-dy[32],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[33],v3corn_3A-dy[33],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[32],v3corn_4A-dy[32],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[33],v3corn_4A-dy[33],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[32:34]-v2ref,v3[32:34]-v3ref,'+',color='b',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(5,5,'ALL, 2-PT, extended source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 4-pt ALL extended-source dither def qaplot_ext4all(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ext4all.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf1A['inscr_v2ref'],siaf1A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(6,-6) plt.ylim(-6,6) plt.plot(v2corn_1A+dx[32],v3corn_1A-dy[32],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[33],v3corn_1A-dy[33],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[34],v3corn_1A-dy[34],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[35],v3corn_1A-dy[35],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[32],v3corn_2A-dy[32],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[33],v3corn_2A-dy[33],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[34],v3corn_2A-dy[34],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[35],v3corn_2A-dy[35],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[32],v3corn_3A-dy[32],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[33],v3corn_3A-dy[33],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[34],v3corn_3A-dy[34],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[35],v3corn_3A-dy[35],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[32],v3corn_4A-dy[32],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[33],v3corn_4A-dy[33],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[34],v3corn_4A-dy[34],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[35],v3corn_4A-dy[35],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[32:36]-v2ref,v3[32:36]-v3ref,'+',color='b',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(5,5,'ALL, 4-PT, extended source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 2-pt Ch3 extended-source dither def qaplot_ext2ch3(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ext2ch3.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf3A['inscr_v2ref'],siaf3A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(6,-6) plt.ylim(-6,6) plt.plot(v2corn_1A+dx[44],v3corn_1A-dy[44],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[45],v3corn_1A-dy[45],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[44],v3corn_2A-dy[44],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[45],v3corn_2A-dy[45],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[44],v3corn_3A-dy[44],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[45],v3corn_3A-dy[45],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[44],v3corn_4A-dy[44],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[45],v3corn_4A-dy[45],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[44:46]-v2ref,v3[44:46]-v3ref,'+',color='gold',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(5,5,'Ch3, 2-PT, extended source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 4-pt Ch3 extended-source dither def qaplot_ext4ch3(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_ext4ch3.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf3A['inscr_v2ref'],siaf3A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(6,-6) plt.ylim(-6,6) plt.plot(v2corn_1A+dx[44],v3corn_1A-dy[44],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[45],v3corn_1A-dy[45],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[46],v3corn_1A-dy[46],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[47],v3corn_1A-dy[47],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[44],v3corn_2A-dy[44],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[45],v3corn_2A-dy[45],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[46],v3corn_2A-dy[46],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[47],v3corn_2A-dy[47],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[44],v3corn_3A-dy[44],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[45],v3corn_3A-dy[45],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[46],v3corn_3A-dy[46],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[47],v3corn_3A-dy[47],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[44],v3corn_4A-dy[44],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[45],v3corn_4A-dy[45],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[46],v3corn_4A-dy[46],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[47],v3corn_4A-dy[47],color='r',linewidth=1.2) plt.plot(0,0,'x',linewidth=1.5,color='black') circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) ax.add_artist(circle1) plt.plot(v2[44:48]-v2ref,v3[44:48]-v3ref,'+',color='gold',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(5,5,'Ch3, 4-PT, extended source') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot showing field coverage of a 4-pt Background dither def qaplot_bg(v2,v3,dx,dy,allsiaf,vertxt,outdir=''): # Set the default output data directory if it was not provided if (outdir == ''): data_dir=os.path.expandvars('$MIRICOORD_DATA_DIR') outdir=os.path.join(data_dir,'dithers/temp/') # Set the output filename filename=os.path.join(outdir,'dithers_bg.pdf') now=datetime.datetime.now() nowstring=now.ctime() nowstring=nowstring[4:8]+nowstring[20:24] # Field locations siaf1A=allsiaf[0] siaf1B=allsiaf[1] siaf1C=allsiaf[2] siaf2A=allsiaf[3] siaf2B=allsiaf[4] siaf2C=allsiaf[5] siaf3A=allsiaf[6] siaf3B=allsiaf[7] siaf3C=allsiaf[8] siaf4A=allsiaf[9] siaf4B=allsiaf[10] siaf4C=allsiaf[11] # Recenter everything to be based around zero v2ref,v3ref=siaf3A['inscr_v2ref'],siaf3A['inscr_v3ref'] v2corn_1A=siaf1A['inscr_v2_corners']-v2ref v3corn_1A=siaf1A['inscr_v3_corners']-v3ref v2corn_2A=siaf2A['inscr_v2_corners']-v2ref v3corn_2A=siaf2A['inscr_v3_corners']-v3ref v2corn_3A=siaf3A['inscr_v2_corners']-v2ref v3corn_3A=siaf3A['inscr_v3_corners']-v3ref v2corn_4A=siaf4A['inscr_v2_corners']-v2ref v3corn_4A=siaf4A['inscr_v3_corners']-v3ref # Plot thickness mpl.rcParams['axes.linewidth'] = 1.5 plt.figure(figsize=(5,5),dpi=150) ax = plt.gca() plt.tick_params(axis='both',direction='in',which='both',top=True,right=True,width=2.) plt.minorticks_on() plt.xlim(15,-15) plt.ylim(-15,15) plt.plot(v2corn_1A+dx[52],v3corn_1A-dy[52],color='b',linewidth=1.2,label='Ch1') plt.plot(v2corn_1A+dx[53],v3corn_1A-dy[53],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[54],v3corn_1A-dy[54],color='b',linewidth=1.2) plt.plot(v2corn_1A+dx[55],v3corn_1A-dy[55],color='b',linewidth=1.2) plt.plot(v2corn_2A+dx[52],v3corn_2A-dy[52],color='g',linewidth=1.2,label='Ch2') plt.plot(v2corn_2A+dx[53],v3corn_2A-dy[53],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[54],v3corn_2A-dy[54],color='g',linewidth=1.2) plt.plot(v2corn_2A+dx[55],v3corn_2A-dy[55],color='g',linewidth=1.2) plt.plot(v2corn_3A+dx[52],v3corn_3A-dy[52],color='gold',linewidth=1.2,label='Ch3') plt.plot(v2corn_3A+dx[53],v3corn_3A-dy[53],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[54],v3corn_3A-dy[54],color='gold',linewidth=1.2) plt.plot(v2corn_3A+dx[55],v3corn_3A-dy[55],color='gold',linewidth=1.2) plt.plot(v2corn_4A+dx[52],v3corn_4A-dy[52],color='r',linewidth=1.2,label='Ch4') plt.plot(v2corn_4A+dx[53],v3corn_4A-dy[53],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[54],v3corn_4A-dy[54],color='r',linewidth=1.2) plt.plot(v2corn_4A+dx[55],v3corn_4A-dy[55],color='r',linewidth=1.2) #plt.plot(0,0,'x',linewidth=1.5,color='black') #circle1 = mpl.patches.Circle((0., 0.), maxfwhm(1),linewidth=1,edgecolor='b', facecolor=(0, 0, 0, .0125)) #ax.add_artist(circle1) #circle1 = mpl.patches.Circle((0., 0.), maxfwhm(4),linewidth=1,edgecolor='r', facecolor=(0, 0, 0, .0125)) #ax.add_artist(circle1) #plt.plot(v2[52:56]-v2ref,v3[52:56]-v3ref,'+',color='blue',linewidth=1.5) plt.xlabel('$\Delta$ R.A. (arcsec)') plt.ylabel('$\Delta$ Decl. (arcsec)') plt.title('MRS Dithers: Flight '+vertxt+' ('+nowstring+')') plt.text(14,12,'Background') plt.legend() plt.savefig(filename) #plt.show() plt.close() ############################# # Plot the pt-source points for a given channel with field bound in ab space # Plot points in v2/v3 space
#This is a comment #print writes to console print('kajfkldj') ## name the variable, set it with = myName = "ava" up = 2 low = 2.4 isTrue = True ## if condition ## = means "set this to" ## == means "if these two are equal" or "is equal to" ## one tab to do what you want to do if the condition evaluates true if myName == "ava": print('My name is Ava') print('isTrue =', isTrue) if low < up: isTrue = False print('isTrue =', isTrue) #def means define # its a way to store a block of code inside of a "function", which is like a variable for code def compare(n1 = "x", n2 = "x"): try: ## cant do this, because you cant assign a function return to a function call #float(n1) = input("enter a number: ") #float(n2) = input("enter a number: ") if n1 == "x" and n2 == "x": n1 = float(input("enter a number: ")) n2 = float(input("enter a number: ")) elif n1 == "x": n1 = float(input("enter a number: ")) elif n2 == "x": n2 = float(input("enter a number: ")) print('n1 =', type(n1)) print('n2 =', type(n2)) if n1 < n2: print("less") elif n1 > n2: print("more") else: print("idk bruh") except: print("that's not a number retard") n3 = 5 n4 = 10 compare() compare(n3) compare(n3, n4) #int(n1) #int(n2) ## a string (str) is anything in quotes "this is a string" ## an int is a whole signed number, 1, -2 ## a float is a floating point number #all input is a string, so this doesnt work for eval ##if type(n1) is str or type(n2) is str: ##print("that's not a number yo")
from ..models import Snippet, Vote import tornado.web from addons import route from apps.base import BaseHandler @route('/snippets/vote/(?P<guid>[^/]+)') class SnippetVoteHandler(BaseHandler): def get(self, guid=None): if not self.current_user: raise tornado.web.HTTPError(404) vote = self.get_argument('vote', 0) snippet = Snippet.objects.get(guid=guid) obj, created = Vote.objects.get_or_create(snippet=snippet, user=self.current_user) if not created: obj.value = vote obj.save() self.finish({'count': obj.snippet.vote_count });
# AUTO GENERATED FILE - DO NOT EDIT from dash.development.base_component import Component, _explicitize_args class CrystalToolkitScene(Component): """A CrystalToolkitScene component. Keyword arguments: - children (optional): First child will be rendered as the settings panel. Second child will be rendered as the bottom panel (legend). - id (optional): The ID used to identify this component in Dash callbacks. - animation (optional): Animation type. - axisView (optional): Where is the axis displayed ( 'NW' / 'NE' / 'SE' / 'SW' / 'HIDDEN' ). - className (optional): Class name that will wrap around the whole scene component. When enlarged, this class name is applied to the modal-content element. - currentCameraState (optional): THIS PROP IS SET AUTOMATICALLY Object that maintains the current state of the camera. e.g. { position: {x: 0, y: 0, z: 0}, quarternion: {x: 0, y: 0, z: 0, w: 0}, zoom: 1, setByComponentId: \"1\", following: True }. - customCameraState (optional): Object for setting the scene to a custom camera state. When modified, the camera will update to new custom state. e.g. { position: {x: 0, y: 0, z: 0}, quarternion: {x: 0, y: 0, z: 0, w: 0}, (optional) zoom: 1 (optional) }. - data (optional): Simple3DScene JSON, the easiest way to generate this is to use the Scene class in crystal_toolkit.core.scene and its to_json method. - debug (optional): Display a debug view. - fileOptions (optional): List of options to show in file download dropdown. - fileTimestamp (optional): THIS PROP IS SET AUTOMATICALLY Date string that represents the time fileType was set. Use this prop in dash callbacks to trigger file downloads. - fileType (optional): THIS PROP IS SET AUTOMATICALLY The last file type clicked in the file download menu. - imageData (optional): THIS PROP IS SET AUTOMATICALLY Data string for the image generated on image button click. - imageDataTimestamp (optional): THIS PROP IS SET AUTOMATICALLY Date string that represents the time imageData was set. Use this prop in dash callbacks to trigger downloads of imageData. - imageType (optional): File type to be downloaded as an image. Either png or dae. - inletPadding (optional): Padding of the axis view. - inletSize (optional): Size of the axis view. - sceneSize (optional): Width / Height of the displayed component. - selectedObject (optional): This points to the last clicked objects. Use it in your dash callback to know which objects are currently selected in the scene. - setProps (optional): Dash-assigned callback that should be called whenever any of the properties change. - settings (optional): Options used for generating scene. Supported options and their defaults are given as follows: { antialias: True, // set to False to improve performance renderer: 'webgl', // 'svg' also an option, used for unit testing transparentBackground: False, // transparent background background: '#ffffff', // background color if not transparent, sphereSegments: 32, // decrease to improve performance cylinderSegments: 16, // decrease to improve performance staticScene: True, // disable if animation required defaultZoom: 1, // 1 will zoom to fit object exactly, <1 will add padding between object and box bounds zoomToFit2D: False // if True, will zoom to fit object only along the X and Y axes (not Z) extractAxis: False // will remove the axis from the main scene isMultiSelectionEnabled: False // allow to use shift to select, secondaryObjectView: True // show the selected object in a detail view animation: 'play' | 'slider' | 'none' // choose which style of animation is use } There are several additional options used for debugging and testing, please consult the source code directly for these. - showControls (optional) - showExpandButton (optional) - showExportButton (optional) - showImageButton (optional) - showPositionButton (optional) - toggleVisibility (optional): Hide/show nodes in scene by its name (key), value is 1 to show the node and 0 to hide it.""" _children_props = [] _base_nodes = ['children'] _namespace = 'dash_mp_components' _type = 'CrystalToolkitScene' @_explicitize_args def __init__(self, children=None, id=Component.UNDEFINED, className=Component.UNDEFINED, data=Component.UNDEFINED, settings=Component.UNDEFINED, toggleVisibility=Component.UNDEFINED, imageType=Component.UNDEFINED, imageData=Component.UNDEFINED, imageDataTimestamp=Component.UNDEFINED, fileOptions=Component.UNDEFINED, fileType=Component.UNDEFINED, fileTimestamp=Component.UNDEFINED, selectedObject=Component.UNDEFINED, sceneSize=Component.UNDEFINED, axisView=Component.UNDEFINED, inletSize=Component.UNDEFINED, inletPadding=Component.UNDEFINED, debug=Component.UNDEFINED, animation=Component.UNDEFINED, currentCameraState=Component.UNDEFINED, customCameraState=Component.UNDEFINED, showControls=Component.UNDEFINED, showExpandButton=Component.UNDEFINED, showImageButton=Component.UNDEFINED, showExportButton=Component.UNDEFINED, showPositionButton=Component.UNDEFINED, **kwargs): self._prop_names = ['children', 'id', 'animation', 'axisView', 'className', 'currentCameraState', 'customCameraState', 'data', 'debug', 'fileOptions', 'fileTimestamp', 'fileType', 'imageData', 'imageDataTimestamp', 'imageType', 'inletPadding', 'inletSize', 'sceneSize', 'selectedObject', 'setProps', 'settings', 'showControls', 'showExpandButton', 'showExportButton', 'showImageButton', 'showPositionButton', 'toggleVisibility'] self._valid_wildcard_attributes = [] self.available_properties = ['children', 'id', 'animation', 'axisView', 'className', 'currentCameraState', 'customCameraState', 'data', 'debug', 'fileOptions', 'fileTimestamp', 'fileType', 'imageData', 'imageDataTimestamp', 'imageType', 'inletPadding', 'inletSize', 'sceneSize', 'selectedObject', 'setProps', 'settings', 'showControls', 'showExpandButton', 'showExportButton', 'showImageButton', 'showPositionButton', 'toggleVisibility'] self.available_wildcard_properties = [] _explicit_args = kwargs.pop('_explicit_args') _locals = locals() _locals.update(kwargs) # For wildcard attrs and excess named props args = {k: _locals[k] for k in _explicit_args if k != 'children'} for k in []: if k not in args: raise TypeError( 'Required argument `' + k + '` was not specified.') super(CrystalToolkitScene, self).__init__(children=children, **args)
import i2c_lib import lcddriver import sys def main(): # Main program block lcd = lcddriver.lcd() while True: print("Escribe un texto por líneas de 20 caracteres y pulsa Enter seguido de Ctrl+D") msg = sys.stdin.readlines() lcd.lcd_clear() for x in range(len(msg)): lcd.lcd_display_string(msg[x].replace("\n", ""), x+1) if __name__ == '__main__': main()
from mnmt.inputter import ModuleArgsFeeder import torch from typing import List class ArgsFeeder: def __init__(self, encoder_args_feeder: ModuleArgsFeeder, decoder_args_feeders: List[ModuleArgsFeeder], batch_size: int, src_pad_idx: int, trg_pad_idx: int, optim_choice: str, learning_rate: float, decay_patience: int, lr_decay_factor: float, valid_criterion: str, early_stopping_patience: int, total_epochs: int, report_interval: int, exp_num: int, multi_task_ratio, valid_out_path, test_out_path, data_container, src_lang, trg_lang, auxiliary_name=None, quiet_translate=True, beam_size=1, trg_eos_idx=None): """ Args: encoder_args_feeder (ModuleArgsFeeder): decoder_args_feeders List[ModuleArgsFeeder]: batch_size (int): number of samples in a batch src_pad_idx (int): trg_pad_idx (int): optim_choice (str): "Adam", "SGD", etc. learning_rate (float): decay_patience (int): lr_decay_factor (float): valid_criterion (str): "ACC" or "LOSS" early_stopping_patience (int): total_epochs (int): report_interval (int): exp_num (int): multi_task_ratio: valid_out_path: test_out_path: data_container: src_lang: trg_lang: auxiliary_name: quiet_translate: """ self.encoder_args_feeder = encoder_args_feeder self.decoder_args_feeders = decoder_args_feeders self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("The current device for PyTorch is {}".format(self.device)) self.batch_size = batch_size self.src_pad_idx = src_pad_idx self.trg_pad_idx = trg_pad_idx # For the optimizer self.learning_rate = learning_rate self.decay_patience = decay_patience self.optim_choice = optim_choice self.lr_decay_factor = lr_decay_factor self.valid_criterion = valid_criterion # For early stopping self.early_stopping_patience = early_stopping_patience # General settings self.total_epochs = total_epochs self.report_interval = report_interval self.exp_num = exp_num self.multi_task_ratio = multi_task_ratio self.valid_out_path = valid_out_path self.test_out_path = test_out_path # For data container self.data_container = data_container self.src_lang = src_lang self.trg_lang = trg_lang self.auxiliary_name = auxiliary_name self.quiet_translate = quiet_translate # For beam-search self.beam_size = beam_size # For early ending of searching self.trg_eos_idx = trg_eos_idx
"""Insert default meeting. Revision ID: 1d70ecd3db0d Revises: 29ecac35d8b2 Create Date: 2014-12-16 18:24:44.906930 """ # revision identifiers, used by Alembic. revision = '1d70ecd3db0d' down_revision = '29ecac35d8b2' from alembic import op from datetime import date from mrt.models import Meeting, MeetingType, Translation translation_table = Translation.__table__ meeting_type_table = MeetingType.__table__ meeting_table = Meeting.__table__ def upgrade(): conn = op.get_bind() res = conn.execute( translation_table.insert().values({'english': 'Default Meeting'}) ) [title_id] = res.inserted_primary_key res = conn.execute( meeting_type_table.insert().values( {'slug': 'def', 'label': 'Default Meeting', 'default': True}) ) [meeting_type] = res.inserted_primary_key conn.execute( meeting_table.insert().values({ 'title_id': title_id, 'badge_header_id': title_id, 'acronym': op.inline_literal('DEFAULT'), 'date_start': op.inline_literal(date.today().isoformat()), 'date_end': op.inline_literal(date.today().isoformat()), 'venue_city_id': op.inline_literal(title_id), 'venue_country': op.inline_literal('RO'), 'meeting_type': op.inline_literal(meeting_type), }) ) def downgrade(): conn = op.get_bind() sel = ( meeting_type_table.select() .where(meeting_type_table.c.default == op.inline_literal(True)) .with_only_columns(['slug']) ) [default_meeting_type] = conn.execute(sel).fetchone() sel = ( meeting_table.select() .where(meeting_table.c.meeting_type == op.inline_literal(default_meeting_type)) .with_only_columns(['title_id']) ) [title_id] = conn.execute(sel).fetchone() conn.execute( meeting_table.delete() .where(meeting_table.c.meeting_type == op.inline_literal(default_meeting_type)) ) conn.execute( translation_table.delete() .where(translation_table.c.id == op.inline_literal(title_id)) ) conn.execute( meeting_type_table.delete() .where(meeting_type_table.c.slug == op.inline_literal(default_meeting_type)) )
import numpy as np from scipy.spatial.transform import Rotation import matplotlib.pyplot as plt from scipy.spatial import distance_matrix # coordinates = np.zeros((10,3)) # coordinates[:,:2]= 30*np.random.randn(10,2) def get_predicted(coordinates, noise=.2): X = (coordinates - coordinates[0]) x_true, y_true, z_true = X.transpose() D = distance_matrix(coordinates, coordinates) M = np.inner(X, X) noise = noise * np.random.randn(*M.shape) noise = (noise + noise.transpose()) / 2 w, v = np.linalg.eigh(M + noise) size = len(w) nd = coordinates.shape[1] vevs = np.arange(size - nd, size)[::-1] x_v = v[:, vevs] sqrt_s = np.expand_dims(np.sqrt(w[vevs]), 0) x_v = (sqrt_s * x_v) x, y, z = x_v.transpose() R, loss = Rotation.align_vectors(x_v, X) x_new, y_new, z_new = R.apply(x_v, inverse=True).transpose() return x_true, y_true, x_new, y_new, loss def get_predicted_distance(coordinates, noise=0): X = (coordinates - coordinates[0]) x_true, y_true, z_true = X.transpose() D = distance_matrix(coordinates, coordinates) D2 = D ** 2 M = np.zeros_like(D2) for i in range(10): for j in range(10): M[i, j] = (D2[0, j] + D2[i, 0] - D2[i, j]) / 2 M[j, i] = M[i, j] noise = noise * np.random.randn(*M.shape) noise = (noise + noise.transpose()) / 2 w, v = np.linalg.eigh(M + noise) size = len(w) nd = coordinates.shape[1] vevs = np.arange(size - nd, size)[::-1] x_v = v[:, vevs] sqrt_s = np.expand_dims(np.sqrt(w[vevs]), 0) x_v = (sqrt_s * x_v) x, y, z = x_v.transpose() R, loss = Rotation.align_vectors(x_v, X) x_new, y_new, z_new = R.apply(x_v, inverse=True).transpose() return x_true, y_true, x_new, y_new, D, D2, loss if __name__ == "__main__": noise_base = 30 coordinates = np.zeros((10, 3)) coordinates[:, :2] = noise_base * np.random.randn(10, 2) losses_a = [] noise_vals = [] increment = .2 for i in range(200): losses_b = [] noise_vals.append((i * increment) / noise_base) for _ in range(50): x_true, y_true, x_new, y_new, loss = get_predicted(coordinates, noise=i * increment) losses_b.append(loss) losses_a.append(np.mean(losses_b)) plt.figure(figsize=(15, 15)) plt.plot(np.array(noise_vals) / 30, losses_a) plt.show() noise_base = 30 coordinates = np.zeros((10, 3)) coordinates[:, :2] = noise_base * np.random.randn(10, 2) x_true, y_true, x_new, y_new, D, D2, loss = get_predicted_distance(coordinates, noise=10) fig, axes = plt.subplots(1, 3) fig.set_size_inches((12, 12)) axes[0].scatter(x_new, y_new) axes[0].set_title("prediction") axes[1].set_title("true") axes[2].set_title("together") axes[1].scatter(x_true, y_true) axes[2].scatter(x_true, y_true) axes[2].scatter(x_new, y_new) axes[0].set_aspect(1) axes[1].set_aspect(1) axes[2].set_aspect(1) plt.show()
from make import Clear, getUSTVGO, replaceUStVicons, MakeCS, MakeEng, MakeMain, Git, pushbulletMode, remPYC, RemoveMode2 from Auth.auth import name, Email, gitToken, gitRepo import time import os token = gitToken repo = gitRepo email = Email origin = "sudo git remote set-url origin https://github:" + str(token) + str(repo) config_mail = "sudo git config --global user.email " + email config_name = "sudo git config --global user.name " + name def echo(msg): echocmd = "sudo echo " + '"' + msg + '"' os.system(echocmd) auto = True timeoutTime = open('Assets/Service/timeou.txt').read() int(timeoutTime) msg = "Timeout time is: " + str(timeoutTime) echo(msg) def Main(): RemoveMode2() Clear() getUSTVGO() replaceUStVicons() MakeCS() MakeEng() MakeMain() Git() #pushbulletMode(5) remPYC() while auto == True: Main() echo("Waiting " + str(timeoutTime) + "Seconds") time.sleep(int(timeoutTime))
from Ingresos import Ingresos from Egresos import Egresos IngresosObj = Ingresos() EgresosObj = Egresos() def Ingreso(): IngresosObj.NuevoIngreso() def Egreso(): EgresosObj.NuevoEgreso() def getIngreso(): IngresosObj.getIngreso() def getEgreso(): EgresosObj.getEgreso() class Finanzas: def __init__(self): pass def NuevaCuenta(self): nombreCuenta = input("Ingresa tu nombre para poder abrir una nueva cuenta: ") print("") print("Esta cuenta pertenece a: " +nombreCuenta) print("Saldo actual: $0.00\n") def ReportarTransferencia (self): ingresos = getIngreso() egresos = getIngreso() transferencias = { "Ingresos" : ingresos, "Egresos": egresos } for x in transferencias: print(transferencias[x]) def VerSaldo(self): ingresos = getIngreso() egresos = getIngreso() transferencias = { "Ingresos" : ingresos, "Egresos": egresos } total = 0 for x in transferencias: total += int(x) print(total)
import os import multiprocessing class FileOperator2: output_dir = os.path.abspath(os.path.join(os.getcwd(), "PageData")) tar_file_size = 100 * 1024 * 1024 count = 0 def __init__(self, output_dir=output_dir, tar_file_size=tar_file_size, count=count): self.output_dir = output_dir self.tar_file_size = tar_file_size self.count = count def write_data(self, mode: str = 'a', data: dict = None): if data is None: return tar_file = os.path.join(self.output_dir, "%07d.txt" % self.count) file_check(tar_file) data_size = os.path.getsize(tar_file) while data_size > self.tar_file_size: self.count += 1 tar_file = os.path.join(self.output_dir, "%07d.txt" % self.count) file_check(tar_file) data_size = os.path.getsize(tar_file) with open(tar_file, mode, encoding='utf8') as f: f.write("url:" + data.get("url") + '\n') f.write("title:" + data.get("title") + '\n') f.write("date:" + data.get("date") + '\n') f.write("page_content:\n" + data.get("page_content") + "\n") print("%s has writen one page(url:%s) into file: %s" % ( multiprocessing.current_process(), data.get('url'), tar_file)) def write_url(self, mode: str = 'a', url: str = None): if url is None: return tar_file = os.path.join(self.output_dir, "url_list.txt") file_check(tar_file) with open(tar_file, mode, encoding='utf8') as f: f.write(url + '\n') print("%s has writen one url:%s into url_list file" % (multiprocessing.current_process, url)) def read_url(self, mode: str = 'r'): url_list = [] tar_file = os.path.join(self.output_dir, "url_list.txt") if not os.path.exists(tar_file): return url_list with open(tar_file, mode, encoding='utf8') as f: for url in f: url_list.append(url) return url_list def file_check(file_path): file_dir = os.path.split(file_path)[0] if not os.path.exists(file_dir): os.makedirs(file_dir) if not os.path.exists(file_path): os.system(r'touch %s' % file_path) if __name__ == "__main__": file_o = FileOperator()
### Final Project Submission ### Students: Myles Novick & Ariel Camperi from util import * class CriminalState(object): """ Configuration values to describe a criminal agent's state. """ STEAL = 'steal' ESCAPE = 'escape' SAFE = 'safe' CAUGHT = 'caught' class CriminalAgent(Agent): """ This class encapsulates state data about a criminal agent, but also logic about what action to take given a simulation state, and about how to execute an action and properly update state data. Initialized with a position. """ def __init__(self, pos): super(CriminalAgent, self).__init__(pos) self.state = CriminalState.STEAL self.justCommittedCrime = False def copy(self): """ Creates a copy of the criminal agent, and copies over relevant properties. """ copy = CriminalAgent((self.x, self.y)) copy.state = self.state copy.justCommittedCrime = self.justCommittedCrime return copy def isActive(self): """ Criminals remain active while they are in steal or escape modes. """ return self.state not in [CriminalState.SAFE, CriminalState.CAUGHT] def getAction(self, simulationState): """ Given a simulation state, generates a list of legal actions, and uses the evaluation function to determine the optimal successor state (depth 1). """ if self.state == CriminalState.SAFE or self.state == CriminalState.CAUGHT: return Directions.STOP i = simulationState.criminalAgents.index(self) legalActions = simulationState.getLegalActionsForAgent(self) legalSuccessors = [simulationState.generateSuccessorForCriminalAction(action, i) for action in legalActions] evals = [self.evaluationFunction(successor, successor.criminalAgents[i]) for successor in legalSuccessors] return legalActions[evals.index(max(evals))] def evaluationFunction(self, simulationState, agent): """ While stealing, the criminal will favor proximity to malls, and want to stay away from nearby detected police agents. While escaping, the criminal will try to get to haven as quickly as possible, again while avoiding nearby detected police agents. """ currPos = agent.getPos() currState = agent.state if currState == CriminalState.STEAL: closestMallDistance = 1. / (float(min([euclideanDistance(currPos, mallPos) for mallPos in simulationState.malls])) + 0.00001) detectedPoliceAgents = [police for police in simulationState.policeAgents if euclideanDistance(currPos, police.getPos()) <= CRIMINAL_SIGHT_RADIUS] return closestMallDistance - float(len(detectedPoliceAgents)) elif currState == CriminalState.ESCAPE: closestHavenDistance = 1. / (float(min([euclideanDistance(currPos, havenPos) for havenPos in simulationState.havens])) + 0.00001) detectedPoliceAgents = [police for police in simulationState.policeAgents if euclideanDistance(currPos, police.getPos()) <= CRIMINAL_SIGHT_RADIUS] return 1. + closestHavenDistance - float(len(detectedPoliceAgents)) elif currState == CriminalState.CAUGHT: return -999999 elif currState == CriminalState.SAFE: return 999999 def executeAction(self, action, simulationState): """ Calls super implementation to update position, then updates status based on surrounding conditions (e.g. collisions with police agents). """ super(CriminalAgent, self).executeAction(action) currPos = self.getPos() if self.state == CriminalState.STEAL and currPos in simulationState.malls: self.state = CriminalState.ESCAPE self.justCommittedCrime = True elif self.state == CriminalState.ESCAPE: self.justCommittedCrime = False if currPos in simulationState.havens: self.state = CriminalState.SAFE else: for policeAgent in simulationState.policeAgents: if currPos == policeAgent.getPos(): self.state = CriminalState.CAUGHT break
''' mbinary ######################################################################### # File : permute_back_track.py # Author: mbinary # Mail: zhuheqin1@gmail.com # Blog: https://mbinary.xyz # Github: https://github.com/mbinary # Created Time: 2018-11-25 12:32 # Description: ######################################################################### ''' def permute(n): def _util(lst,i): if i==n:print(lst) else: for j in range(i,n): lst[i],lst[j]=lst[j],lst[i] _util(lst,i+1) lst[i],lst[j]=lst[j],lst[i] _util([i for i in range(n)],0) if __name__=='__main__': permute(5)
import pymysql import requests from bs4 import BeautifulSoup from abc import * import crawling class Yes24BookCrawling(crawling.Crawling, ABC): def __init__(self, main_url, db_host, db_port, db_user, db_pw, db_name, db_charset): super().__init__(main_url, db_host, db_port, db_user, db_pw, db_name, db_charset) def crawler(self): try: url = super().MAIN_URL() req = requests.get(url) cont = req.content soup = BeautifulSoup(cont, 'lxml') # print(soup) soup = soup.select("div#bestList > ol > li") # print(soup) for i in range(len(soup)): soup[i] = soup[i].select("p") BOOK_TITLE = soup[i][2].find("a").get_text() BOOK_URL = "http://www.yes24.com" + soup[i][2].find("a")["href"] temp = soup[i][3].get_text().split("|"); BOOK_AUTHOR = temp[0][0:len(temp[0]) - 3] BOOK_PUBLISHER = temp[1][1:] IMAGE_URL = self.get_image(BOOK_URL) self.connect_db(i, BOOK_TITLE, BOOK_URL, BOOK_AUTHOR, BOOK_PUBLISHER, IMAGE_URL, "", "") #print(str(i + 1) + " : " + BOOK_TITLE + " : " + BOOK_URL + " : " + BOOK_AUTHOR + " : " + BOOK_PUBLISHER) f = open("./../../manual_active_log.txt", "a") f.write("table : yes24_book_rank UPDATED" + "\n") print("table : yes24_book_rank UPDATED") f.close() except Exception as e: super().error_logging(str(e)) print("Error Detected") def get_image(self, URL): URL = URL header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'} req = requests.get(URL, headers=header) ## 주간 차트를 크롤링 할 것임 cont = req.content soup = BeautifulSoup(cont, 'lxml') #print(soup) soup = soup.select("em.imgBdr") return soup[0].find("img")["src"] def connect_db(self, i, book_title, book_info_url, book_author, book_publisher, image_url, tmp7, tmp8): rank_number = i + 1 conn = pymysql.connect(host=super().DB_HOST(), port=int(super().DB_PORT()), user=super().DB_USER(), password=super().DB_PW(), db=super().DB_NAME(), charset=super().DB_CHARSET()) curs = conn.cursor() if int(rank_number) == 1: sql = """delete from yes24_book_rank""" curs.execute(sql) sql = """insert into yes24_book_rank (rank, title, url, author, publisher, image_url) values (%s, %s, %s, %s, %s, %s)""" curs.execute(sql, (rank_number, book_title, book_info_url, book_author, book_publisher, image_url)) ''' sql = """select title from yes24_book_rank where rank = %s""" curs.execute(sql, rank_number) row = curs.fetchone() if row[0] == book_title: #print("same yes24") pass else: #print(str(rank_number) + " : " + book_title + " : " + " : " + book_author + " : " + book_publisher) sql = """update yes24_book_rank set title=%s, url=%s, author=%s, publisher=%s, image_url=%s where rank=%s""" curs.execute(sql, (book_title, book_info_url, book_author, book_publisher, image_url, rank_number)) ''' conn.commit() conn.close()
from django.contrib import admin from .models import Answer # Register your models here. admin.site.register(Answer)
import pandas as pd from presidio_analyzer import AnalyzerEngine, RecognizerRegistry, PatternRecognizer from presidio_analyzer.nlp_engine import NlpEngineProvider from presidio_analyzer.pattern import Pattern from presidio_anonymizer import AnonymizerEngine from presidio_anonymizer.entities.engine import RecognizerResult, OperatorConfig class Anonymizer: """ Anonymizes a text using presidio https://microsoft.github.io/presidio/api/ """ def __init__(self, model_name="es_core_news_md", language=None, default_entities=None): """ Parameters ---------- model_name Spacy model name. language Supported language ("en", "es", "nl", etc) default_entities A list with the name of the default supported entities. """ self.language = model_name[:model_name.index("_")] if not language else language self.model_name = model_name self.nlp_engine = None self.registry = None self.entities = [] self.anonymizers_config = {} self.analyzer = None self. anonymizer = None # Create configuration containing engine name and models configuration = { "nlp_engine_name": "spacy", "models": [{"lang_code": self.language, "model_name": self.model_name}] } # Create NLP engine based on configuration provider = NlpEngineProvider(nlp_configuration=configuration) self.nlp_engine = provider.create_engine() # Prepare base entities self.registry = RecognizerRegistry() if default_entities: self.registry.load_predefined_recognizers(languages=[self.language]) self.entities = default_entities # Anonymizers mapping values for entity in self.entities: self.anonymizers_config[entity] = OperatorConfig("replace", {"new_value": entity}) # Prepare analyzer self.analyzer = AnalyzerEngine(registry=self.registry, nlp_engine=self.nlp_engine, supported_languages=[self.language]) # Prepare anonymizer self.anonymizer = AnonymizerEngine() def get_entities(self): """ Returns ------- List of entities used """ return self.entities def add_recognizer_regex(self, regex, name): """ Parameters ---------- regex Regex to match in the text name Name assigned to the recognizer """ pattern = Pattern(name=name, regex=regex, score=1) recognizer = PatternRecognizer(supported_entity=name, patterns=[pattern], supported_language=self.language) self.registry.add_recognizer(recognizer) self.entities.append(name) self.anonymizers_config[name] = OperatorConfig("replace", {"new_value": name}) return None def add_recognizer_deny_list(self, deny_list, name): """ Parameters ---------- deny_list List of tokens to match in the text name Name assigned to the recognizer """ regex = r"(?<![a-zA-Z]{1})(" + "|".join(deny_list) + r")(?=[^a-zA-Z]+|$)" # Improve? pattern = Pattern(name=name, regex=regex, score=1) recognizer = PatternRecognizer(supported_entity=name, patterns=[pattern], supported_language=self.language) self.registry.add_recognizer(recognizer) self.entities.append(name) self.anonymizers_config[name] = OperatorConfig("replace", {"new_value": name}) return None def anonymize_dataset(self, dataset, column="text", save_path=None, preprocess=lambda x: x): """ Parameters ---------- dataset The dataset to anonymize. column Name of the column to anonymize. preprocess Optional function with only one parameter (string) that returns another string. This function will NOT modify the output text. In addition, this function expects the output string to have the same length as the input string. This can be useful to deal with accents, dieresis or special characters. save_path Path to save the anonymized dataset as csv Returns ------- An anonymized pandas DataFrame """ dataset=pd.DataFrame(dataset) if column not in dataset.columns: raise KeyError("Column '{}' not in dataset".format(column)) # Preprocess in case there are NaNs dataset.dropna(how="all", axis=0, inplace=True) dataset.dropna(how="all", axis=1, inplace=True) dataset.fillna(value='nan', axis='columns', inplace=True) # Anonymize dataset dataset_PII = dataset.copy() dataset_PII[column] = dataset_PII[column].astype("str").apply( lambda x: self.anonymizer.anonymize( text=x, analyzer_results=self.analyzer.analyze(preprocess(x),language=self.language, entities=self.entities), operators=self.anonymizers_config ).text ) # Whether or not the row was modified during anonymization dataset_PII["has_PII"]=dataset_PII[column].apply(lambda x: any([value in x for value in self.entities])) if save_path: dataset_PII.to_csv(save_path) return dataset_PII def anonymize_text(self, text, preprocess=lambda x: x): """ Parameters ---------- text The text to anonymize. preprocess Optional function with only one parameter (string) that returns another string. This function will NOT modify the output text. In addition, this function expects the output string to have the same length as the input string. This can be useful to deal with accents, dieresis or special characters. Returns ------- The anonymized text string """ anonymized_text = self.anonymizer.anonymize( text=text, analyzer_results=self.analyzer.analyze(preprocess(text),language=self.language, entities=self.entities), operators=self.anonymizers_config ).text has_PII = any([value in anonymized_text for value in self.entities]) return (anonymized_text, has_PII)
from typing import List from arg.perspectives.basic_analysis import load_data_point from arg.perspectives.declaration import PerspectiveCandidate from arg.perspectives.ranked_list_interface import StaticRankedListInterface from galagos.query_runs_ids import Q_CONFIG_ID_BM25_10000 def show(): ci = StaticRankedListInterface(Q_CONFIG_ID_BM25_10000) all_data_points: List[PerspectiveCandidate] = load_data_point("dev") pre_cid = -1 n_found = 0 n_not_found = 0 l = [] for x in all_data_points: if x.cid != pre_cid: l.append((pre_cid, n_found, n_not_found)) n_found = 0 n_not_found = 0 pre_cid = x.cid try: ci.fetch(x.cid, x.pid) n_found += 1 except KeyError: n_not_found += 1 l.sort(key=lambda x:x[0]) print("{} datapoints".format(len(all_data_points))) print("{} claims".format(len(l))) for e in l: print(e) if __name__ == "__main__": show()
# -*- coding: utf-8 -*- """ Create a socket file in the Linux system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2019 by rgb-24bit. :license: MIT, see LICENSE for more details. """ import argparse import os import socket DESCRIPTION = 'Create a socket file in the Linux system' VERSION = '1.0.0' def makesocket(path): """Create a socket file, return True successfully, fail to return False.""" if not os.path.exists(path): try: sock = socket.socket(socket.AF_UNIX) sock.bind(path) except Exception as e: return False return True return False def parse_args(): """Command line arguments parsing.""" parser = argparse.ArgumentParser(prog='makesocket', description=DESCRIPTION) parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION) parser.add_argument('-p', '--path', action='store', dest='path', type=str, default=None, help='Specify the path to the socket file to be created') return parser.parse_args() def cli(): args = parse_args() if args.path: if not makesocket(args.path): print('Create socket file faild.') else: print('Create socket file success.') if __name__ == '__main__': cli()
from pygame.surface import Surface from OpenGL.GL import * from OpenGL.GLU import * #----------------------------------------------------------------------- def init() -> None: glEnable(GL_DEPTH_TEST) glClearColor(1., 1., 1., 0.) glShadeModel(GL_FLAT) glEnable(GL_COLOR_MATERIAL) glEnable(GL_LIGHTING) glEnable(GL_LIGHT0) glLight(GL_LIGHT0, GL_POSTION, (0, 1, 1, 0)) #----------------------------------------------------------------------- def resize(screen:Surface, perspective:float=60.) -> None: width, height = screen.get_size() glViewport(0, 0, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(perspective, float(width) / height, 1., 10000.) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
''' Created on Nov 8, 2017 @author: selyunin ''' import numpy as np from PIL import Image import cv2 from sklearn.utils import shuffle def training_generator(df, batch_size=128): num_images = df.shape[0] while 1: # Loop forever so the generator never terminates for offset in range(0, num_images, batch_size): # print("offset: {}".format(offset)) batch_df = df.loc[offset:offset+batch_size] # X_train_new, y_train_new = get_data(batch_df) X_train_center = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['left_img']))) y_train_center = np.array(batch_df['steering']) X_train_right = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['right_img']))) y_train_right = np.array(batch_df['steering']) - 0.115 X_train_left = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['left_img']))) y_train_left = np.array(batch_df['steering']) + 0.115 X_train_lrc = np.concatenate((X_train_center, X_train_left, X_train_right), axis=0) y_train_lrc = np.concatenate((y_train_center, y_train_left, y_train_right), axis=0) X_train_flip = np.array(list(map(lambda x : cv2.flip(x,1), X_train_lrc))) y_train_flip = np.array(list(map(lambda x : -1.*x, y_train_lrc))) #concatenate left, center, right and flipped img X_train_new = np.concatenate((X_train_lrc, X_train_flip), axis=0) y_train_new = np.concatenate((y_train_lrc, y_train_flip), axis=0) yield shuffle(X_train_new, y_train_new) def validation_generator(df, batch_size=32): num_images = df.shape[0] while 1: # Loop forever so the generator never terminates for offset in range(0, num_images, batch_size): # print("offset: {}".format(offset)) batch_df = df.loc[offset:offset+batch_size] # X_valid_new, y_valid_new = get_data(batch_df) X_train_center = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['left_img']))) y_train_center = np.array(batch_df['steering']) X_train_right = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['right_img']))) y_train_right = np.array(batch_df['steering']) - 0.115 X_train_left = np.array(list(map(lambda x : np.asarray(Image.open(x)), batch_df['left_img']))) y_train_left = np.array(batch_df['steering']) + 0.115 X_train_lrc = np.concatenate((X_train_center, X_train_left, X_train_right), axis=0) y_train_lrc = np.concatenate((y_train_center, y_train_left, y_train_right), axis=0) X_train_flip = np.array(list(map(lambda x : cv2.flip(x,1), X_train_lrc))) y_train_flip = np.array(list(map(lambda x : -1.*x, y_train_lrc))) #concatenate left, center, right and flipped img X_valid_new = np.concatenate((X_train_lrc, X_train_flip), axis=0) y_valid_new = np.concatenate((y_train_lrc, y_train_flip), axis=0) yield shuffle(X_valid_new, y_valid_new)
import sys #sys.path.append('../vrep_api') #sys.path.append('../toolkit') try: import vrep except: print ('--------------------------------------------------------------') print ('"vrep.py" could not be imported. This means very probably that') print ('either "vrep.py" or the remoteApi library could not be found.') print ('Make sure both are in the same folder as this file,') print ('or appropriately adjust the file "vrep.py"') print ('--------------------------------------------------------------') print ('') #from toolkit import * class VREPClient: def __init__(self, host, port): self.host = host self.port = port self.client_id = -1 self.console_id = -1 self.debug = False print("connecting to ", host, port) self.client_id = vrep.simxStart(self.host, 20000, True, True, 5000,5) print("connected!") if not self.is_connected(): if self.debug: err_print(prefix="COMPONENT CREATION", message=["CANNOT CONNECT TO REMOTE API"]) raise Exception("CANNOT CONNECT TO REMOTE API") def is_debug_mode(self): return self.debug def set_debug(self, mode): self.debug = mode def is_connected(self): return self.client_id != -1 def init_terminal(self): err_list = [] res, self.console_id = vrep.simxAuxiliaryConsoleOpen( self.client_id, "CONSOLA", 4, 5, None, None, None, None, vrep.simx_opmode_blocking) if res != 0: err_list = parse_error(res) return res, err_list def write_on_terminal(self, mess): res = vrep.simxAuxiliaryConsolePrint(self.client_id, self.console_id, mess, vrep.simx_opmode_blocking) return res, parse_error(res)
# PF-Prac-15 def check_22(num_list): str_l1 = [str(i) for i in num_list] str_l2 = ''.join(str_l1) #print(str_l2) sub = '22' if sub in str_l2: return True else: return False # start writing your code here print(check_22([3, 2, 5, 1, 2, 1, 2, 2]))
import numpy as np import matplotlib.pyplot as plt from dnn.deep_convnet import DeepConvNet from dnn.common.functions import softmax import math network = DeepConvNet() network.load_params("dnn/deep_convnet_params.pkl") def predict(x): pre = network.predict(x.reshape(1,1,28,28)) pre_label = int(np.argmax(pre)) pre_score = math.floor(max(softmax(pre[0])) * 1000)/ 10 print("predict", pre_label, pre_score, softmax(pre[0])) return pre_label, pre_score def generate_adv(x, label, eps=0.3): d = np.zeros_like(x) # advsが生成されなかった時、生成されるまで繰り返す while (d == np.zeros_like(x)).all(): d, g = network.gradient_for_fgsm(x.reshape(1, 1, 28, 28), np.array([label])) d = d.reshape(28, 28) p = eps * np.sign(d) adv = (x + p).clip(min=0, max=1) return p, adv
""" Ask the user to enter their first name by displaying the message: Please enter your first name: Display the message: <FirstName>, please enter a sentence. Enter Stop! to stop running the program: If the sentence ends with a full stop then count the number of spaces in the sentence and display the appropriate (grammatically correct) message from below: There are <SpaceCount> spaces in the sentence. There is <SpaceCount> space in the sentence. If the sentence does not end with a full stop then display the message: Sorry <FirstName>, a sentence must end with a full stop. and loop back to the section that displays the message Hi <FirstName>, please enter a sentence. Enter Stop! to stop running the program: If the user enters Stop! (case sensitive) then stop running the program. Hint. Loop while you haven't been told to Stop! Examples Please enter your first name: dave Dave, please enter a sentence. Enter Stop! to stop running the program: Short sentence. There is 1 space in the sentence. Dave, please enter a sentence. Enter Stop! to stop running the program: Stop! Please enter your first name: Dave Dave, please enter a sentence. Enter Stop! to stop running the program: This is a sentence. There are 3 spaces in the sentence. Dave, please enter a sentence. Enter Stop! to stop running the program: This is not a sentence Sorry Dave, a sentence must end with a full stop. Dave, please enter a sentence. Enter Stop! to stop running the program: This is a sentence Sorry Dave, a sentence must end with a full stop. Dave, please enter a sentence. Enter Stop! to stop running the program: Stop! """ # Auth: Michael Devenport. name_prompt = "Please enter your first name: " sentence_prompt = "{}, please enter a sentence. Enter Stop! to stop running the program: " return_statement = "There is {} space in the sentence." plural_return_statement = "There are {} spaces in the sentence." error = "Sorry {}, a sentence must end with a full stop." exit_loop = 'Stop!' first_name = input(name_prompt).capitalize() while first_name: sentence = input(sentence_prompt.format(first_name)) if sentence.endswith('.'): count_blank_space = sentence.count(' ') if count_blank_space == 1: print(return_statement.format(count_blank_space)) else: print(plural_return_statement.format(count_blank_space)) elif sentence == exit_loop: exit() else: print(error.format(first_name))
# This file makes use of the InferSent, SentEval and CoVe libraries, and may contain adapted code from the repositories # containing these libraries. Their licenses can be found in <this-repository>/Licenses. # # CoVe: # Copyright (c) 2017, Salesforce.com, Inc. All rights reserved. # Repository: https://github.com/salesforce/cove # Reference: McCann, Bryan, Bradbury, James, Xiong, Caiming, and Socher, Richard. Learned in translation: # Contextualized word vectors. In Advances in Neural Information Processing Systems 30, pp, 6297-6308. # Curran Associates, Inc., 2017. # # This code also makes use of TensorFlow: Martin Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig # Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, # Geoffrey Irving, Michael Isard, Rafal Jozefowicz, Yangqing Jia, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dan # Mane, Mike Schuster, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Jonathon Shlens, Benoit Steiner, Ilya # Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viegas, Oriol Vinyals, Pete Warden, # Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on # heterogeneous systems, 2015. Software available from tensorflow.org. # import sys import os import timeit import gc import numpy as np import tensorflow as tf import tensorflow_hub as hub # Helper function for asserting that dimensions are correct and allowing for "None" dimensions def dimensions_equal(dim1, dim2): return all([d1 == d2 or (d1 == d2) is None for d1, d2 in zip(dim1, dim2)]) class ELMOBCN: def __init__(self, params, n_classes, max_sent_len, outputdir, weight_init=0.01, bias_init=0.01): self.params = params self.n_classes = n_classes self.max_sent_len = max_sent_len self.outputdir = outputdir self.W_init = weight_init self.b_init = bias_init self.embed_dim = 1024 def create_model(self): print("\nCreating BCN model...") elmo = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True) textinputs1 = tf.placeholder(tf.string, shape=[None, self.max_sent_len]) inputs1length = tf.placeholder(tf.int32, shape=[None]) textinputs2 = tf.placeholder(tf.string, shape=[None, self.max_sent_len]) inputs2length = tf.placeholder(tf.int32, shape=[None]) # Takes 2 input sequences, each of the form [GloVe(w); CoVe(w)] (duplicated if only one input sequence is needed) inputs1 = elmo( inputs={ "tokens": textinputs1, "sequence_len": inputs1length }, signature="tokens", as_dict=True)["elmo"] inputs2 = elmo( inputs={ "tokens": textinputs2, "sequence_len": inputs2length }, signature="tokens", as_dict=True)["elmo"] labels = tf.placeholder(tf.int32, [None]) is_training = tf.placeholder(tf.bool) assert dimensions_equal(inputs1.shape, (self.params['batch_size'], self.max_sent_len, self.embed_dim)) assert dimensions_equal(inputs2.shape, (self.params['batch_size'], self.max_sent_len, self.embed_dim)) # Feedforward network with ReLU activation, applied to each word embedding (word) in the sequence (sentence) feedforward_weight = tf.get_variable("feedforward_weight", shape=[self.embed_dim, self.embed_dim], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) feedforward_bias = tf.get_variable("feedforward_bias", shape=[self.embed_dim], initializer=tf.constant_initializer(self.b_init)) with tf.variable_scope("feedforward"): feedforward_inputs1 = tf.layers.dropout(inputs1, rate=self.params['dropout_ratio'], training=is_training) feedforward_inputs2 = tf.layers.dropout(inputs2, rate=self.params['dropout_ratio'], training=is_training) def feedforward(feedforward_input): return tf.nn.relu6(tf.matmul(feedforward_input, feedforward_weight) + feedforward_bias) feedforward_outputs1 = tf.map_fn(feedforward, feedforward_inputs1) feedforward_outputs2 = tf.map_fn(feedforward, feedforward_inputs2) assert dimensions_equal(feedforward_outputs1.shape, (self.params['batch_size'], self.max_sent_len, self.embed_dim)) assert dimensions_equal(feedforward_outputs2.shape, (self.params['batch_size'], self.max_sent_len, self.embed_dim)) # BiLSTM processes the resulting sequences # The BCN is symmetrical - not sure whether to use same BiLSTM or or two separate BiLSTMs (one for each side). # Therefore implemented both, e.g. For SNLI you might want to use different models for inputs1 and inputs2, # whereas for sentiment analysis you might want to use the same model if self.params['same_bilstm_for_encoder']: with tf.variable_scope("bilstm_encoder_scope"): encoder_fw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_bw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_inputs = tf.concat((feedforward_outputs1, feedforward_outputs2), 0) encoder_raw_outputs, _ = tf.nn.bidirectional_dynamic_rnn(encoder_fw_cell, encoder_bw_cell, encoder_inputs, dtype=tf.float32) emcoder_outputs1, encoder_outputs2 = tf.split(tf.concat([encoder_raw_outputs[0], encoder_raw_outputs[-1]], 2), 2, axis=0) else: with tf.variable_scope("bilstm_encoder_scope1"): encoder_fw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_bw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_raw_outputs1, _ = tf.nn.bidirectional_dynamic_rnn(encoder_fw_cell, encoder_bw_cell, feedforward_outputs1, dtype=tf.float32) emcoder_outputs1 = tf.concat([encoder_raw_outputs1[0], encoder_raw_outputs1[-1]], 2) with tf.variable_scope("bilstm_encoder_scope2"): encoder_fw_cell2 = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_bw_cell2 = tf.contrib.rnn.LSTMCell(self.params['bilstm_encoder_n_hidden'], forget_bias=self.params['bilstm_encoder_forget_bias']) encoder_raw_outputs2, _ = tf.nn.bidirectional_dynamic_rnn(encoder_fw_cell2, encoder_bw_cell2, feedforward_outputs2, dtype=tf.float32) encoder_outputs2 = tf.concat([encoder_raw_outputs2[0], encoder_raw_outputs2[-1]], 2) assert dimensions_equal(emcoder_outputs1.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2)) assert dimensions_equal(encoder_outputs2.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2)) # Biattention mechanism [Seo et al., 2017, Xiong et al., 2017] def biattention(biattention_input): X = biattention_input[0] Y = biattention_input[1] # Affinity matrix A=XY^T A = tf.matmul(X, Y, transpose_b=True) assert dimensions_equal(A.shape, (self.max_sent_len, self.max_sent_len)) # Column-wise normalisation to extract attention weights Ax = tf.nn.softmax(A) Ay = tf.nn.softmax(tf.transpose(A)) assert dimensions_equal(Ax.shape, (self.max_sent_len, self.max_sent_len)) assert dimensions_equal(Ay.shape, (self.max_sent_len, self.max_sent_len)) # Context summaries Cx = tf.matmul(Ax, X, transpose_a=True) Cy = tf.matmul(Ay, X, transpose_a=True) assert dimensions_equal(Cx.shape, (self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2)) assert dimensions_equal(Cy.shape, (self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2)) biattention_output1 = tf.concat([X, X - Cy, tf.multiply(X, Cy)], 1) biattention_output2 = tf.concat([Y, Y - Cx, tf.multiply(Y, Cx)], 1) return biattention_output1, biattention_output2 biattention_outputs1, biattention_outputs2 = tf.map_fn(biattention, (emcoder_outputs1, encoder_outputs2)) assert dimensions_equal(biattention_outputs1.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2*3)) assert dimensions_equal(biattention_outputs2.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_encoder_n_hidden']*2*3)) # Integrate with two separate one-layer BiLSTMs with tf.variable_scope("bilstm_integrate_scope1"): integrate_fw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_integrate_n_hidden'], forget_bias=self.params['bilstm_integrate_forget_bias']) integrate_bw_cell = tf.contrib.rnn.LSTMCell(self.params['bilstm_integrate_n_hidden'], forget_bias=self.params['bilstm_integrate_forget_bias']) integrate_raw_outputs1, _ = tf.nn.bidirectional_dynamic_rnn(integrate_fw_cell, integrate_bw_cell, biattention_outputs1, dtype=tf.float32) integrate_outputs1 = tf.concat([integrate_raw_outputs1[0], integrate_raw_outputs1[-1]], 2) with tf.variable_scope("bilstm_integrate_scope2"): integrate_fw_cell2 = tf.contrib.rnn.LSTMCell(self.params['bilstm_integrate_n_hidden'], forget_bias=self.params['bilstm_integrate_forget_bias']) integrate_bw_cell2 = tf.contrib.rnn.LSTMCell(self.params['bilstm_integrate_n_hidden'], forget_bias=self.params['bilstm_integrate_forget_bias']) integrate_raw_outputs2, _ = tf.nn.bidirectional_dynamic_rnn(integrate_fw_cell2, integrate_bw_cell2, biattention_outputs2, dtype=tf.float32) integrate_outputs2 = tf.concat([integrate_raw_outputs2[0], integrate_raw_outputs2[-1]], 2) assert dimensions_equal(integrate_outputs1.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_integrate_n_hidden']*2)) assert dimensions_equal(integrate_outputs2.shape, (self.params['batch_size'], self.max_sent_len, self.params['bilstm_integrate_n_hidden']*2)) # Max, mean, min and self-attentive pooling with tf.variable_scope("pool"): self_pool_weight1 = tf.get_variable("self_pool_weight1", shape=[self.params['bilstm_integrate_n_hidden']*2, 1], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) self_pool_bias1 = tf.get_variable("self_pool_bias1", shape=[1], initializer=tf.constant_initializer(self.b_init)) self_pool_weight2 = tf.get_variable("self_pool_weight2", shape=[self.params['bilstm_integrate_n_hidden']*2, 1], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) self_pool_bias2 = tf.get_variable("self_pool_bias2", shape=[1], initializer=tf.constant_initializer(self.b_init)) def pool(pool_input): Xy = pool_input[0] Yx = pool_input[1] assert dimensions_equal(Xy.shape, (self.max_sent_len, self.params['bilstm_integrate_n_hidden']*2,)) assert dimensions_equal(Xy.shape, (self.max_sent_len, self.params['bilstm_integrate_n_hidden']*2,)) # Max pooling - just take the (max_sent_len) "columns" in the matrix and get the max in each of them. max_Xy = tf.reduce_max(Xy, axis=0) max_Yx = tf.reduce_max(Yx, axis=0) assert dimensions_equal(max_Xy.shape, (self.params['bilstm_integrate_n_hidden']*2,)) assert dimensions_equal(max_Yx.shape, (self.params['bilstm_integrate_n_hidden']*2,)) # Mean pooling - just take the (max_sent_len) "columns" in the matrix and get the mean in each of them. mean_Xy = tf.reduce_mean(Xy, axis=0) mean_Yx = tf.reduce_mean(Yx, axis=0) assert dimensions_equal(mean_Xy.shape, (self.params['bilstm_integrate_n_hidden']*2,)) assert dimensions_equal(mean_Yx.shape, (self.params['bilstm_integrate_n_hidden']*2,)) # Min pooling - just take the (max_sent_len) "columns" in the matrix and get the min in each of them. min_Xy = tf.reduce_min(Xy, axis=0) min_Yx = tf.reduce_min(Yx, axis=0) assert dimensions_equal(min_Xy.shape, (self.params['bilstm_integrate_n_hidden']*2,)) assert dimensions_equal(min_Yx.shape, (self.params['bilstm_integrate_n_hidden']*2,)) # Self-attentive pooling Bx = tf.nn.softmax((tf.matmul(Xy, self_pool_weight1)) + self_pool_bias1) By = tf.nn.softmax((tf.matmul(Yx, self_pool_weight2)) + self_pool_bias2) assert dimensions_equal(Bx.shape, (self.max_sent_len, 1)) assert dimensions_equal(By.shape, (self.max_sent_len, 1)) x_self = tf.squeeze(tf.matmul(Xy, Bx, transpose_a=True)) y_self = tf.squeeze(tf.matmul(Yx, By, transpose_a=True)) assert dimensions_equal(x_self.shape, (self.params['bilstm_integrate_n_hidden']*2,)) assert dimensions_equal(y_self.shape, (self.params['bilstm_integrate_n_hidden']*2,)) # Combine pooled representations pool_output1 = tf.concat([max_Xy, mean_Xy, min_Xy, x_self], 0) pool_output2 = tf.concat([max_Yx, mean_Yx, min_Yx, y_self], 0) return pool_output1, pool_output2 pool_outputs1, pool_outputs2 = tf.map_fn(pool, (integrate_outputs1, integrate_outputs2)) assert dimensions_equal(pool_outputs1.shape, (self.params['batch_size'], self.params['bilstm_integrate_n_hidden']*2*4)) assert dimensions_equal(pool_outputs2.shape, (self.params['batch_size'], self.params['bilstm_integrate_n_hidden']*2*4)) # FeedForward network (2 relu layers, followed by a softmax) with tf.variable_scope("output"): joined = tf.concat([pool_outputs1, pool_outputs2], 1) assert dimensions_equal(joined.shape, (self.params['batch_size'], self.params['bilstm_integrate_n_hidden']*2*4*2)) # Output layer 1: Dropout, fully-connected layer, D->(D/(2 or 4 or 8)) relu dp1 = tf.layers.dropout(joined, rate=self.params['dropout_ratio'], training=is_training) assert dimensions_equal(dp1.shape, (self.params['batch_size'], self.params['bilstm_integrate_n_hidden']*2*4*2)) output_dim1 = int((self.params['bilstm_integrate_n_hidden'] * 2 * 4 * 2) / self.params['output_reduction']) output_weight1 = tf.get_variable("output_weight1", shape=[self.params['bilstm_integrate_n_hidden']*2*4*2, output_dim1], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) output_bias1 = tf.get_variable("output_bias1", shape=[output_dim1], initializer=tf.constant_initializer(self.b_init)) output_outputs1 = tf.nn.relu6((tf.matmul(dp1, output_weight1) + output_bias1)) assert dimensions_equal(output_outputs1.shape, (self.params['batch_size'], output_dim1)) # Output layer 2: Dropout, fully-connected layer, D->(D/2) relu dp2 = tf.layers.dropout(output_outputs1, rate=self.params['dropout_ratio'], training=is_training) assert dimensions_equal(dp2.shape, (self.params['batch_size'], output_dim1)) output_dim2 = int(output_dim1 / 2) output_weight2 = tf.get_variable("output_weight2", shape=[output_dim1, output_dim2], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) output_bias2 = tf.get_variable("output_bias2", shape=[output_dim2], initializer=tf.constant_initializer(self.b_init)) output_outputs2 = tf.nn.relu6((tf.matmul(dp2, output_weight2) + output_bias2)) assert dimensions_equal(output_outputs2.shape, (self.params['batch_size'], output_dim2)) # SoftMax layer: Dropout, D->n_classes fully-connected layer, SoftMax dp3 = tf.layers.dropout(output_outputs2, rate=self.params['dropout_ratio'], training=is_training) assert dimensions_equal(dp3.shape, (self.params['batch_size'], output_dim2)) softmax_weight = tf.get_variable("softmax_weight", shape=[output_dim2, self.n_classes], initializer=tf.random_uniform_initializer(-self.W_init, self.W_init)) softmax_bias = tf.get_variable("softmax_bias", shape=[self.n_classes], initializer=tf.constant_initializer(self.b_init)) logits = (tf.matmul(dp3, softmax_weight) + softmax_bias) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) cost = tf.reduce_mean(cross_entropy) if self.params['optimizer'] == "adam": train_step = tf.train.AdamOptimizer(self.params['learning_rate'], beta1=self.params['adam_beta1'], beta2=self.params['adam_beta2'], epsilon=self.params['adam_epsilon']).minimize(cost) elif self.params['optimizer'] == "gradientdescent": train_step = tf.train.GradientDescentOptimizer(self.params['learning_rate']).minimize(cost) else: print("ERROR: Invalid optimizer: \"" + self.params['optimizer'] + "\".") sys.exit(1) predict = tf.argmax(tf.nn.softmax(logits), axis=1) print("Successfully created BCN model.") return textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, cost, train_step def dry_run(self): tf.reset_default_graph() with tf.Graph().as_default(): return self.create_model() def train(self, dataset): best_dev_accuracy = -1 tf.reset_default_graph() with tf.Graph().as_default() as graph: textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, loss_op, train_op = self.create_model() with tf.Session(graph=graph) as sess: print("\nTraining model...") sess.run(tf.global_variables_initializer()) train_data_len = dataset.get_total_samples("train") total_train_batches = train_data_len // self.params['batch_size'] train_milestones = {int(total_train_batches * 0.1): "10%", int(total_train_batches * 0.2): "20%", int(total_train_batches * 0.3): "30%", int(total_train_batches * 0.4): "40%", int(total_train_batches * 0.5): "50%", int(total_train_batches * 0.6): "60%", int(total_train_batches * 0.7): "70%", int(total_train_batches * 0.8): "80%", int(total_train_batches * 0.9): "90%", total_train_batches: "100%"} best_epoch_number = 0 epochs_since_last_save = 0 for epoch in range(self.params['n_epochs']): print(" ============== Epoch " + str(epoch + 1) + " of " + str(self.params['n_epochs']) + " ==============") epoch_start_time = timeit.default_timer() done = 0 average_loss = 0 indexes = np.random.permutation(train_data_len) for i in range(total_train_batches): batch_indexes = indexes[i * self.params['batch_size']: (i + 1) * self.params['batch_size']] batch_X1, batchX1length, batch_X2, batch_X2length, batch_y = dataset.get_batch('train', batch_indexes) _, loss = sess.run([train_op, loss_op], feed_dict={textinputs1: batch_X1, textinputs2: batch_X2, inputs1length: batchX1length, inputs2length: batch_X2length, labels: batch_y, is_training: True}) average_loss += (loss / total_train_batches) done += 1 if done in train_milestones: print(" " + train_milestones[done]) print(" Loss: " + str(average_loss)) print(" Computing dev accuracy...") dev_accuracy = self.calculate_accuracy(dataset, sess, textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, set_name="dev") print(" Dev accuracy:" + str(dev_accuracy)) print(" Epoch took %s seconds" % (timeit.default_timer() - epoch_start_time)) if dev_accuracy > best_dev_accuracy: # If dev accuracy improved, save the model after this epoch best_dev_accuracy = dev_accuracy best_epoch_number = epoch epochs_since_last_save = 0 tf.train.Saver().save(sess, os.path.join(self.outputdir, 'model')) else: # If dev accuracy got worse, don't save epochs_since_last_save += 1 gc.collect() if epochs_since_last_save >= 7: # If dev accuracy keeps getting worse, stop training (early stopping) break print("Finished training model after " + str(best_epoch_number + 1) + " epochs. Model is saved in: " + self.outputdir) print("Best dev accuracy: " + str(best_dev_accuracy)) return best_dev_accuracy def calculate_accuracy(self, dataset, sess, textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, set_name="test", verbose=False): test_data_len = dataset.get_total_samples(set_name) total_test_batches = test_data_len // self.params['batch_size'] test_milestones = {int(total_test_batches * 0.1): "10%", int(total_test_batches * 0.2): "20%", int(total_test_batches * 0.3): "30%", int(total_test_batches * 0.4): "40%", int(total_test_batches * 0.5): "50%", int(total_test_batches * 0.6): "60%", int(total_test_batches * 0.7): "70%", int(total_test_batches * 0.8): "80%", int(total_test_batches * 0.9): "90%", total_test_batches: "100%"} done = 0 test_y = [] predicted = [] indexes = np.arange(test_data_len) for i in range(total_test_batches): batch_indexes = indexes[i * self.params['batch_size']: (i + 1) * self.params['batch_size']] batch_X1, batchX1length, batch_X2, batch_X2length, batch_y = dataset.get_batch(set_name, batch_indexes) for item in batch_y: test_y.append(item) batch_pred = list(sess.run(predict, feed_dict={textinputs1: batch_X1, textinputs2: batch_X2, inputs1length: batchX1length, inputs2length: batch_X2length, labels: batch_y, is_training: False})) for item in batch_pred: predicted.append(item) done += 1 if verbose and done in test_milestones: print(" " + test_milestones[done]) return sum([p == a for p, a in zip(predicted, test_y)]) / float(test_data_len) def test(self, dataset): tf.reset_default_graph() with tf.Graph().as_default() as graph: textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, _, _ = self.create_model() with tf.Session(graph=graph) as sess: print("\nComputing test accuracy...") sess.run(tf.global_variables_initializer()) tf.train.Saver().restore(sess, os.path.join(self.outputdir, 'model')) accuracy = self.calculate_accuracy(dataset, sess, textinputs1, inputs1length, textinputs2, inputs2length, labels, is_training, predict, verbose=True) print("Test accuracy: " + str(accuracy)) return accuracy
# -*- coding: utf-8 -*- """ Created on Fri Jan 26 20:13:43 2018 @author: NANCUH """ # -*- coding: utf-8 -*- """ Created on Fri Jan 26 18:40:41 2018 @author: NANCUH """ import numpy as np from math import sqrt import pandas as pd import warnings import random from collections import Counter #dataset = {'k':[[1,2],[2,3],[3,1]],'r':[[6,5],[7,7],[8,6]]} #new_feature = [5,6] #for i in dataset: # for ii in dataset[i]: # plt.scatter(ii[0],ii[1],s=100,color=i) #plt.scatter(new_feature[0],new_feature[1],s=100,color='b') #plt.show() def k_nearest_neighbors(data,predict,k=3): if len(data)>=k or k%2 ==0 : warnings.warn('K is set less change greater than dataset types or k is not to be even!') distances = [] for group in data: for features in data[group]: euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict)) distances.append([euclidean_distance,group]) votes = [i[1] for i in sorted((distances))[:k]] confidence = float(Counter(votes).most_common(1)[0][1])/float(k) vote_result = Counter(votes).most_common(1)[0][0] return vote_result,confidence #result = k_nearest_neighbors(dataset,new_feature,k=3) #print(result) #for i in dataset: # for ii in dataset[i]: # plt.scatter(ii[0],ii[1],s=100,color=i) #plt.scatter(new_feature[0],new_feature[1],s=100,color=result) #plt.show() accuracies = [] for i in range(25): df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999,inplace = True) df.drop(['id'],1,inplace=True) full_data = df.astype(float).values.tolist() random.shuffle(full_data) test_size = 0.4 train_set = {2:[],4:[]} test_set = {2:[],4:[]} train_data = full_data[:-int(test_size*len(full_data))] test_data = full_data[-int(test_size*len(full_data)):] for i in train_data: train_set[i[-1]].append(i[:-1]) for i in test_data: test_set[i[-1]].append(i[:-1]) correct = 0 total = 0 for group in test_set: for data in test_set[group]: vote,confidence = k_nearest_neighbors(train_set,data,k=5) if group == vote: correct+=1 #print(confidence) total+=1 #print(correct,total) print('accuracy:',float(correct)/float(total)) accuracies.append(float(correct)/float(total)) print(sum(accuracies)/len(accuracies))
"""DabsetScript URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin import backend.views urlpatterns = [ url(r'^admin/', admin.site.urls), # Backend url(r'^$', backend.views.home), url(r'^start_process/', backend.views.startProcess), # Raw Leads url(r'^leads/', backend.views.Leads), url(r'^mark_as_good/', backend.views.markAsGood), # Campaigns url(r'^campaigns/', backend.views.Campaigns), # Sent Leads url(r'^send_campaign/', backend.views.sendCampaign), # Templates url(r'^templates/', backend.views.templatesHome), url(r'^save_template/', backend.views.saveTemplate), # Logs url(r'^logs/', backend.views.logs), # Features url(r'^tuncate_leads/', backend.views.tuncateLeads), ]
import unittest from process_changes_with_object import get_commits, read_file from changesvisualise import initialSetup, loadDataFrame, getAuthorInfo, getAuthorLineCount, getAuthorDateLineInfo, getAuthorDateLineCounts, getAuthorSatSunLineCounts, getTimeLineInfo, getTimeLineCounts, getAuthorTimeLineInfo, getAuthorTimeLineCounts, getAuthorAddDelEditInfo, getAuthorAddEditDelCounts class TestCommits(unittest.TestCase): #read in test file def setUp(self): self.data = read_file('changes_python.log') def test_number_of_lines(self): self.assertEqual(5255, len(self.data)) def test_number_of_commits(self): commits = get_commits(self.data) self.assertEqual(422, len(commits)) self.assertEqual('Thomas', commits[0].author) self.assertEqual(['FTRPC-500: Frontier Android || Inconsistencey in My Activity screen', 'Client used systemAttribute name="Creation-Date" instead of versionCreated as version created.'], commits[24].comment) self.assertEqual(['M /cloud/personal/client-international/android/branches/android-15.2-solutions/libs/model/src/com/biscay/client/android/model/util/sync/dv/SyncAdapter.java'], commits[20].changed_path) class TestVizualResult(unittest.TestCase): def setUp(self): self.viz = initialSetup() ##load data frame in the class def test_loadDataFrame(self): df = loadDataFrame(self.viz) self.assertEqual(422, len(df)) ##positve test self.assertNotEqual(423, len(df)) ##neg test ##Get dataframe with author and no_of_lines def test_getAuthorInfo(self): self.dfAuthor = getAuthorInfo(self.viz) self.assertEqual('Vincent' ,self.dfAuthor.loc[2]['author']) ##positve test self.assertNotEqual('Vincent' ,self.dfAuthor.loc[1]['author']) ##neg test ### groups the dataframe by author and sums no_of_lines def test_getAuthorLineCount(self): self.dfAuthor = getAuthorInfo(self.viz) self.sumAuthors = getAuthorLineCount(self.viz,self.dfAuthor) self.assertEqual(234 ,self.sumAuthors.iloc[0]['no_of_lines']) ##positve test self.assertNotEqual(235 ,self.sumAuthors.iloc[0]['no_of_lines']) ##neg test ##Get dataframe with author, date and no_of_lines def test_getAuthorDateLine(self): self.dfAuthorDate = getAuthorDateLineInfo(self.viz) self.assertEqual('Vincent' ,self.dfAuthorDate.loc[2]['author']) ##positve test self.assertNotEqual('Vincent' ,self.dfAuthorDate.loc[1]['author']) ##neg test ### groups the dataframe by author, date and sums no_of_lines def test_getAuthorAddEditDel(self): self.dfAuthorAddEditDel = getAuthorAddDelEditInfo(self.viz) self.dfAuthorAddEditDelCnt = getAuthorAddEditDelCounts(self.viz,self.dfAuthorAddEditDel) self.assertEqual(9 ,self.dfAuthorAddEditDelCnt.iloc[0]['add']) ##positve test self.assertNotEqual(7 ,self.dfAuthorAddEditDelCnt.iloc[0]['add']) ##neg test ### groups the dataframe by author, date and sums no_of_lines def test_getAuthorDateLineCounts(self): self.dfAuthorDateCnt = getAuthorDateLineInfo(self.viz) self.sumAuthors = getAuthorDateLineCounts(self.viz,self.dfAuthorDateCnt) self.assertEqual(9 ,self.sumAuthors.iloc[0]['no_of_lines']) ##positve test self.assertNotEqual(10 ,self.sumAuthors.iloc[0]['no_of_lines']) ##neg test ### groups the dataframe by author, date and sums no_of_lines def test_getAuthorSatSunLineCounts(self): self.dfAuthorDate = getAuthorDateLineInfo(self.viz) self.dfAuthorSatSun = getAuthorSatSunLineCounts(self.viz,self.dfAuthorDate) self.assertEqual(1 ,self.dfAuthorSatSun.iloc[0]['no_of_lines']) ##positve test self.assertNotEqual(2 ,self.dfAuthorSatSun.iloc[0]['no_of_lines']) ##neg test ##Get dataframe with hour and no_of_lines def test_getTimeLineInfo(self): self.dfAuthorTime = getTimeLineInfo(self.viz) self.assertEqual(9 ,self.dfAuthorTime.loc[2]['hour']) ##positve test self.assertNotEqual(10 ,self.dfAuthorTime.loc[1]['hour']) ##neg test ### groups the dataframe by hour and sums no_of_lines def test_getTimeLineCounts(self): self.dfAuthorTime = getTimeLineInfo(self.viz) self.sumTimeCnt = getTimeLineCounts(self.viz,self.dfAuthorTime) self.assertEqual(5 ,self.sumTimeCnt.iloc[0]['no_of_lines']) ##positve test self.assertNotEqual(6 ,self.sumTimeCnt.iloc[0]['no_of_lines']) ##neg test ##Get dataframe with hour, author, and no_of_lines def test_getAuthorTimeLineInfo(self): self.dfAuthorTime = getAuthorTimeLineInfo(self.viz) self.assertEqual(9 ,self.dfAuthorTime.loc[2]['hour']) ##positve test self.assertNotEqual(10 ,self.dfAuthorTime.loc[1]['hour']) ##neg test ### groups the dataframe by hour, author and sums no_of_lines def test_getAuthorTimeLineCounts(self): self.dfAuthorTime = getAuthorTimeLineInfo(self.viz) self.sumTimeCnt = getAuthorTimeLineCounts(self.viz,self.dfAuthorTime) self.assertEqual(9 ,self.sumTimeCnt.iloc[0]['no_of_lines']) ##positve test self.assertNotEqual(10 ,self.sumTimeCnt.iloc[0]['no_of_lines']) ##neg test if __name__ == '__main__': unittest.main()
from cms.models import CMSPlugin from django.db import models from django.utils.translation import ugettext_lazy as _ from cms.extensions import PageExtension from cms.extensions.extension_pool import extension_pool class ArticlesPlugin(CMSPlugin): limit = models.PositiveIntegerField(_('Articles per page')) class TeaserExtension(PageExtension): title = models.CharField(_('Title'), max_length=255, blank=True, null=True) image = models.ImageField( _('Image'), upload_to='teaser', blank=True, null=True) description = models.TextField(_('Description'), blank=True, null=True) extension_pool.register(TeaserExtension)
from picamera import PiCamera import picamera.array from threading import Thread import time from matplotlib import pyplot as plt import numpy as np class Stream: def __init__(self): self.cam = PiCamera() self.cam.resolution = (320,240) self.cam.exposure_mode = "sports" self.cam.framerate = 30 self.cam.brightness = 60 #self.cam.contrast = 60 self.cam.iso = 800 self.cam.shutter_speed = 500 #self.cam.hflip = 180 time.sleep(2) self.rawStream = picamera.array.PiRGBArray(self.cam) self.stream = self.cam.capture_continuous(self.rawStream, format='bgr', use_video_port=True) #print(self.stream.array) self.frame = None self.stopRecord = False def startStreamAsThread(self): s = Thread(target=self.updateFrame) s.daemon = True s.start() return self def updateFrame(self): for i in self.stream: self.frame = i.array #print(self.frame) self.rawStream.truncate(0) if(self.stopRecord): self.stream.close() self.rawStream.close() return def readImage(self): #return latest frame return self.frame #s = Stream() #s.startStreamAsThread() #time.sleep(1) #print(s.frame) #p = np.array(s.readImage()) #s.stopRecord = True #print(p) #plt.imshow(p, interpolation='nearest') #plt.show()
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 import time import json import requests import numpy as np from PIL import Image from crnn.keys import alphabetChinese from crnn.util import resizeNormalize, strLabelConverter # ---------------Params--------------- IMAGE_PATH = "./demo_images/test.jpg" CRNN_API_URL = "http://text_recognition:8501/v1/models/crnn:predict" # ---------------Alphabet--------------- alphabet = alphabetChinese nclass = len(alphabet) + 1 # ---------------Process image--------------- image = cv2.imread(IMAGE_PATH) image = Image.fromarray(image) image = image.convert('L') image = resizeNormalize(image, 32) image = image.astype(np.float32) image = np.array([image]) # ---------------Build post--------------- post_json = { "instances": [{ "input_image": image.tolist() }] } # ---------------Test--------------- t0 = time.time() response = requests.post(CRNN_API_URL, data=json.dumps(post_json)) print("forward time : {}".format(time.time() - t0)) response.raise_for_status() prediction = response.json()["predictions"] print(prediction) raw = strLabelConverter(prediction[0], alphabet) print(raw)
name = "ptex" version = "2.1.28" build_requires = [ 'gcc-4.8.2+' ] requires = [ ] variants = [ ["platform-linux", "arch-x86_64", "os-CentOS-7"] ] uuid = "ptex" def commands(): env.PATH.append("{root}/bin") if building: env.PTEX_INCLUDE_DIR = '{root}/include' env.PTEX_LOCATION = '{root}' env.LD_LIBRARY_PATH.append('{root}/lib')
from math import log2, floor from torch import nn, cat, add, Tensor from torch.nn import init, Upsample, Conv2d, ReLU from torch.nn.functional import interpolate class Net(nn.Module): def __init__(self, scale_factor, num_channels=3, base_channels=64, num_residuals=20): super(Net, self).__init__() self.upscale_factor = scale_factor self.input_conv = nn.Sequential(nn.Conv2d(num_channels, base_channels, kernel_size=3, stride=1, padding=1, bias=False), nn.ReLU()) conv_blocks = [] for _ in range(num_residuals): conv_blocks.append(nn.Sequential(nn.Conv2d(base_channels, base_channels, kernel_size=3, stride=1, padding=1, bias=False), nn.ReLU())) self.residual_layers = nn.Sequential(*conv_blocks) self.output_conv = nn.Conv2d(base_channels, num_channels, kernel_size=3, stride=1, padding=1, bias=False) def forward(self, x: Tensor): img = nn.functional.interpolate(x, scale_factor=self.upscale_factor, mode="bicubic", align_corners=True) x = self.input_conv(img) x = self.residual_layers(x) x = self.output_conv(x) return img + x
from data.models import ContentType, ReadNum from django.db.models.fields import exceptions class ReadNumExtend(): def get_read_num(self): try: ct = ContentType.objects.get_for_model(self) re = ReadNum.objects.get(content_type=ct, object_id=self.pk) return re.read_num except exceptions.ObjectDoesNotExist: return 0
# -*- coding: utf-8 -*- """ Created on Sun Aug 24 15:31:04 2014 @author: gabor """ import butools from butools.mc import CheckProbMatrix from butools.utils import SumMatrixList import numpy as np import scipy.linalg as la def CheckDMAPRepresentation (D0, D1, prec=None): """ Checks if the input matrixes define a discrete time MAP. Matrices D0 and D1 must have the same size, D0 must be a transient probability matrix, D1 has only non-negative elements, and the rowsum of D0+D1 is 1 (up to the numerical precision). Parameters ---------- D0 : matrix, shape (M,M) The D0 matrix of the DMAP to check D1 : matrix, shape (M,M) The D1 matrix of the DMAP to check prec : double, optional Numerical precision, the default value is 1e-14 Returns ------- r : bool The result of the check """ if prec==None: prec=butools.checkPrecision if not CheckProbMatrix(D0,True, prec): if butools.verbose: print ("CheckDMAPRepresentation: D0 is not a transient probability matrix!") return False if D0.shape!=D1.shape: if butools.verbose: print ("CheckDMAPRepresentation: D0 and D1 have different sizes!") return False if np.min(D1)<-prec or np.min(D0)<-prec: if butools.verbose: print ("CheckDMAPRepresentation: One of the matrices has negative element!") return False if np.any(np.abs(np.sum(D0+D1,1)-1)>prec): if butools.verbose: print ("CheckDMAPRepresentation: The rowsum of D0+D1 is not 1!") return False return True def CheckDMMAPRepresentation (D,prec=None): """ Checks if the input matrixes define a discrete time MMAP. All matrices D0...DK must have the same size, D0 must be a transient probability matrix, D1 has only non-negative elements, and the rowsum of D0+D1+...+DK is 1 (up to the numerical precision). Parameters ---------- D : list/cell of matrices, length(K) The D0...DK matrices of the DMMAP to check Returns ------- r : bool The result of the check """ if prec==None: prec=butools.checkPrecision if np.min(np.hstack(D)) < -prec: if butools.verbose: print ("CheckDMMAPRepresentation: Some of the matrices D1 ... DM have negative elements!") return False return CheckDMAPRepresentation(D[0],SumMatrixList(D[1:]),prec) def CheckDRAPRepresentation (D0, D1, prec=None): """ Checks if the input matrixes define a discrete time RAP. Matrices H0 and H1 must have the same size, the dominant eigenvalue of H0 is real and less than 1, and the rowsum of H0+H1 is 1 (up to the numerical precision). Parameters ---------- H0 : matrix, shape (M,M) The H0 matrix of the DRAP to check H1 : matrix, shape (M,M) The H1 matrix of the DRAP to check prec : double, optional Numerical precision, the default value is 1e-14 Returns ------- r : bool The result of the check """ if prec==None: prec=butools.checkPrecision if D0.shape[0]!=D0.shape[1]: if butools.verbose: print ("CheckDRAPRepresentation: D0 is not a quadratic matrix!") return False if D1.shape[0]!=D1.shape[1]: if butools.verbose: print ("CheckDRAPRepresentation: D1 is not a quadratic matrix!") return False if D0.shape!=D1.shape: if butools.verbose: print ("CheckDRAPRepresentation: D0 and D1 have different sizes!") return False if np.any(np.abs(np.sum(D0+D1,1).A.flatten()-1.0) > prec): if butools.verbose: print ("CheckDRAPRepresentation: A rowsum of D0+D1 is not 1!") return False ev = la.eigvals(D0) ix = np.argsort(-np.abs(np.real(ev))) maxev = ev[ix[0]] if not np.isreal(maxev): if butools.verbose: print("CheckDRAPRepresentation: The largest eigenvalue of matrix D0 is complex!") return False if maxev>1.0+prec: if butools.verbose: print("CheckDRAPRepresentation: The largest eigenvalue of matrix D0 is greater than 1!") return False if np.sum(np.abs(ev)==abs(maxev)) > 1 and butools.verbose: print ("CheckDRAPRepresentation warning: There are more than one eigenvalue with the same absolute value as the largest eigenvalue!") return True def CheckDMRAPRepresentation(H,prec=None): """ Checks if the input matrixes define a discrete time MRAP. All matrices H0...HK must have the same size, the dominant eigenvalue of H0 is real and less than 1, and the rowsum of H0+H1+...+HK is 1 (up to the numerical precision). Parameters ---------- H : list/cell of matrices, length(K) The H0...HK matrices of the DMRAP to check Returns ------- r : bool The result of the check """ if prec==None: prec=butools.checkPrecision return CheckDRAPRepresentation(H[0],SumMatrixList(H[1:]),prec)
import sys import logging import os from splunklib.modularinput import * import ConfigParser from SharedAPIs.SharedAPIs import * def do_work(input_name, ew, symbol): EventWriter.log(ew, EventWriter.INFO, "JORDI %s" % symbol) data = symbol splunk_home = os.getenv("SPLUNK_HOME") myscript = sys.argv[0] basedir = os.path.dirname(os.path.dirname(sys.argv[0])) inputsfile = basedir + "/local/inputs.conf" mydata = "JORDI LOGS: PARAMETER:{} SPLUNK_HOME:{} MYSCRIPT:{} basedir:{} inputsfile:{}".format(data,splunk_home, myscript, basedir, inputsfile) message(mydata) EventWriter.log(ew, EventWriter.INFO, mydata) event.data = mydata ew.write_event(event) class MyScript(Script): def get_scheme(self): scheme = Scheme("My test input") scheme.description = "My test input prototype" scheme.use_external_validation = True scheme.use_single_instance = False username_argument = Argument("username") username_argument.data_type = Argument.data_type_string username_argument.description = "username" username_argument.required_on_create = True scheme.add_argument(username_argument) password_argument = Argument("password") password_argument.data_type = Argument.data_type_string password_argument.description = "password" password_argument.required_on_create = True scheme.add_argument(password_argument) return scheme def validate_input(self, validation_definition): message("lala: im in") def stream_events(self, inputs, ew): self.masqueradeConfig() message("JORDI executed") for input_name, input_item in inputs.inputs.iteritems(): symbol = str(input_item["username"]) do_work(input_name, ew, symbol) def masqueradeConfig(self): message("masquerade config") self.SPLUNK_HOME = os.getenv("SPLUNK_HOME") self.BASEDIR = os.path.dirname(os.path.dirname(sys.argv[0])) self.INPUTSFILE = self.BASEDIR + "/local/inputs.conf" myconfig = ConfigParser.ConfigParser() myconfig.read(self.INPUTSFILE) for mysection in myconfig.sections(): for (mykey, myvalue) in myconfig.items(mysection): message("section:{} key:{} value:{}".format(mysection, mykey, myvalue)) if (mykey == "username" or mykey == "password") and not myvalue.startswith("+++"): mynewvalue = masqueradeString(myvalue) message("To masquerade key={0} value={1} into key={0} value={2}".format(mykey, myvalue, mynewvalue)) updateConfigFile(self.INPUTSFILE, mysection, mykey, mynewvalue) def mytest(): splunk_home = os.getenv("SPLUNK_HOME") myscript = sys.argv[0] basedir = os.path.dirname(os.path.dirname(sys.argv[0])) inputsfile = basedir + "/local/inputs.conf" mydata = "JORDI LOGS: SPLUNK_HOME:{} MYSCRIPT:{} basedir:{} inputsfile:{}".format(splunk_home, myscript, basedir, inputsfile) message(str(mydata)) myconfig = ConfigParser.ConfigParser() myconfig.read(inputsfile) for mysection in myconfig.sections(): for (mykey, myvalue) in myconfig.items(mysection): message("section:{} key:{} value:{}".format(mysection,mykey,myvalue)) if ( mykey == "username" or mykey == "password" ) and not myvalue.startswith("+++"): mynewvalue = masqueradeString(myvalue) message("To masquerade key={0} value={1} into key={0} value={2}".format(mykey, myvalue, mynewvalue)) updateConfigFile(inputsfile, mysection, mykey, mynewvalue) pass if __name__ == "__main__": #mytest() MyScript().run(sys.argv)
def filtrarPalabras(n, cadena): palabras = cadena.split() nueva = "" for palabra in palabras: if len(palabra) >= n: nueva = nueva + palabra + " " return nueva # PROGRAMA PRINCIPAL frase = '''En un lugar de la Mancha de cuyo nombre no quiero acordarme no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero adarga antigua rocín flaco y galgo corredor''' n = int(input("Ingrese un número entero: ")) print("Palabras cuya longitud es mayor o igual a", n, "-", filtrarPalabras(n, frase))
from Interface import * import pickle class BankAccount: """Creates a bank account with Name on Account and starting value. It will pull in either data from a file, or from the user if data doesn't exist""" def __init__(self, startingValue): self.balance = startingValue self.categories = [] def categoryExistence(self, name): for category in self.categories: if category.name.lower() == name.lower(): return True else: pass return False def addCategory(self, newCategory, amount): category = Category(newCategory, amount) self.categories.append(category) def updateCategoryInBankAccount(self, name, amount): for category in self.categories: if (category.name.lower() == name.lower()): category.updateCategory(amount) else: pass def addTransaction(self, amount, category): self.balance -= amount if(self.categoryExistence(category)): self.updateCategoryInBankAccount(category,amount) else: self.addCategory(category, amount) def deposit(self, amount): self.balance+= amount def checkBalance(self): return self.balance def quit(self): writeBankAccountToFile(self) class Category: """Class to add categories that transasctions will be stored under.""" def __init__(self, name, amount): self.name = name self.amount = amount def updateCategory(self, amount): self.amount+= amount def getAmount(self): return self.amount def getName(self): return self.name def writeBankAccountToFile(account): f = open("data.txt","r+") pickle.dump(account, f) f.close() def initialRead(): f = open("data.txt" , "r+") try: newAccount = pickle.load(f) f.close() return newAccount except EOFError: amount = float(raw_input("What is the starting amount?")) newAccount = BankAccount(amount) return newAccount def main(): account = initialRead() interface(account) if __name__ == '__main__': main()
from rest_framework.response import Response from rest_framework import serializers, viewsets from django.shortcuts import get_object_or_404 from health_records.models import HealthProfile, HealthRecord, PhysActivity, EatingInfo class PhysicalActivitySerializer(serializers.ModelSerializer): class Meta: model = PhysActivity fields = '__all__' class EatingSerializer(serializers.ModelSerializer): class Meta: model = EatingInfo fields = '__all__' class HealthRecordSerializer(serializers.ModelSerializer): physical_activity = PhysicalActivitySerializer(many=True) eating_info = EatingSerializer(many=True) class Meta: model = HealthRecord fields = ('activity_date', 'date_created', 'date_modified', 'weight', 'physical_activity', 'eating_info', ) # Serializers define the API representation. class HealthProfileSerializer(serializers.ModelSerializer): records = HealthRecordSerializer(many=True) class Meta: model = HealthProfile fields = ('height', 'pk', 'records',) class HealthProfileViewSet(viewsets.ModelViewSet): queryset = HealthProfile.objects.all() serializer_class = HealthProfileSerializer def retrieve(self, request, pk=None): queryset = HealthProfile.objects.all() profile = get_object_or_404(queryset, pk=pk) serializer = self.serializer_class(profile, context={'request': request}) return Response(serializer.data)
""" ---------------------------------- Minas Katsiokalis AM: 2011030054 email: minaskatsiokalis@gmail.com ---------------------------------- """ import crypto_1 import crypto_4 """ ----------------------------------------- Generation of AES key using SHA-256 ----------------------------------------- """ #generates a key for AES-128 based on the hashed password of user def generateHashedUserKey(password): aes = crypto_1.AES() sha256 = crypto_4.SHA() #hashing the password with SHA 256 str1 = ''.join(str(e) for e in password) sha_pass = sha256.sha256_lib(str1) #generate a key with the hash of password aes_pass = aes.generateUserKey(sha_pass[:16]) return aes_pass
from selenium.webdriver.common.by import By from base.basepage import BasePage from utilities.custom_logger import customLogger import logging import pytest class LoginPage(BasePage): log = customLogger(logging.DEBUG) def __init__(self, driver): super().__init__(driver) self.driver = driver # Locators _login_link = "Login" _email_field = "user_email" _password_field = "user_password" _login_button = "commit" # def getLoginLink(self): # return self.driver.find_element(By.LINK_TEXT, self._login_link) # # def getEmailFIeld(self): # return self.driver.find_element(By.ID, self._email_field) # # def getPassField(self): # return self.driver.find_element(By.ID, self._password_field) # # def getLogginButton(self): # return self.driver.find_element(By.NAME, self._login_button) def clickLoginLink(self): #self.getLoginLink().click() self.elementClick("linktext", self._login_link) def enterEmail(self, email): #self.getEmailFIeld().send_keys(username) self.elementSendKeys(email, "id", self._email_field) def enterPassword(self, password): #self.getPassField().send_keys(password) self.elementSendKeys(password, "id", self._password_field) def clickLoginButton(self): #self.getLogginButton().click() self.elementClick("name", self._login_button) def Login(self, username="", password=""): self.clickLoginLink() self.enterEmail(username) self.enterPassword(password) self.clickLoginButton() def verifyLoginSuccessful(self): result = self.islementPresent("xpath", "//*[@id='navbar']//span[text()='Test User']") return result def verifyLoginFailed(self): result = self.islementPresent("xpath", "//div[contains(text(), 'Invalid email or password')]") return result def verifyTitle(self): return self.verifyPageTitle("google")
#!/usr/bin/env python # -*- coding: utf-8 -*- # # First draft of a port of our Django–centric module style fabfile so # it's suitable for use in deploying Meteor applications. # # It looks to a python module (pointed to by HOST_ROLES env. var) # for roles (groups of hosts) to work on. # # Examples: # `HOST_ROLES=serverroles fab deploy` # will prompt for role selection from the serverroles python module. # `HOST_ROLES=serverroles fab -R staging deploy` # would deploy to whatever staging servers are setup. # `HOST_ROLES=serverroles fab -H mytests.local` # would deploy to the single host mytest.local. # `HOST_ROLES=serverroles fab deploy` # will prompt for role selection before deploying. import os import subprocess import sys from fabric.api import abort, env, hide, local, task from fabric.context_managers import cd, lcd from fabric.contrib.console import confirm from fabric.decorators import runs_once from fabric.operations import prompt, run, sudo try: from camplight import Request, Campfire print 'imported camplight' camplight = True except ImportError: camplight = False # defaults for conforming puppet-managed vhost instances DEFAULT_VHOST_PATH = '/home/vhosts/' DEFAULT_REPO_NAME = 'repo.git' # a bare repo sent to with send-pack DEFAULT_WORKTREE = 'code' DEFAULT_METEOR = True DEFAULT_RUNDIR = 'rundir' try: host_roles = os.environ['HOST_ROLES'] except KeyError: host_roles = "roles" # set env.vhosts from the python module… try: fab_roles = __import__(host_roles) except ImportError: raise RuntimeError("Couldn't import your project roles!") vhosts = getattr(fab_roles, 'vhosts', None) env.forward_agent = True if vhosts != None: for vhost in vhosts.keys(): vhosts[vhost]['vhostpath'] = \ vhosts[vhost].get('vhostpath', DEFAULT_VHOST_PATH + vhost) vhosts[vhost]['reponame'] = \ vhosts[vhost].get('reponame', DEFAULT_REPO_NAME) vhosts[vhost]['worktree'] = \ vhosts[vhost].get('worktree', DEFAULT_WORKTREE) vhosts[vhost]['meteor'] = \ vhosts[vhost].get('meteor', DEFAULT_METEOR) vhosts[vhost]['rundir'] = \ vhosts[vhost].get('rundir', DEFAULT_RUNDIR) env.vhosts = vhosts # env.roledefs is used internally by Fabric, so preserve that behaviour for vhost in env.vhosts.keys(): env.roledefs.update({ vhost: env.vhosts[vhost]['hosts'] }) # only prompt for a role later if we're not running a side-effect free # command. do_something = True quick_cmds = ('-l', '--list', 'check_clean', 'listroles') for arg in sys.argv: if arg in quick_cmds: do_something = False continue # If Fabric is called without specifying either a *role* (group of # predefined servers) or at least one *host* (via the -H argument), # then prompt the user to choose a role from the predefined roledefs # list. This way, the env.hosts list is constructed at script load # time and all the functions can use it when they run (by the time # fabric commands are run the environment should already be set up # with all the required host information!). if vhosts is None: pass elif do_something and (not env.roles and not env.hosts): validgrp = prompt("Choose host group [%s]: " % \ ", ".join(env.roledefs.keys()), validate=lambda x: x in env.roledefs.keys() and x) if not validgrp: abort('No such group of hosts.') if hasattr(env.roledefs[validgrp], '__call__'): # if the role definition value is callable, call it to get the # list of hosts. print "Retrieving list of hosts", sys.stdout.flush() rawhosts = env.roledefs[validgrp]() hosts = [host['address'] for host in rawhosts] hostnames = [host['name'] for host in rawhosts] print "OK" else: hostnames = hosts = env.roledefs[validgrp] env.hosts.extend(hosts) if not confirm("Acting on the following hosts: \n%s\nOK? " \ % "\n".join(hostnames)): abort('OK, aborting.') # env.roles used by Fabric internally env.roles = [] env.vhost = validgrp env.hosts = hosts elif len(env.roles) > 1: # simplifies host detection for now… abort('Sorry, I currently only operate on one role at a time') elif env.roles: role = env.roles[0] print "Retrieving list of hosts for role %s" % role if hasattr(env.roledefs[role], '__call__'): # if the role definition value is callable, call it to get the # list of hosts. sys.stdout.flush() rawhosts = env.roledefs[role]() hosts = [host['address'] for host in rawhosts] hostnames = [host['name'] for host in rawhosts] env.roles = [] env.vhost = role env.hosts = hosts else: hosts = env.roledefs[role] env.vhost = role env.hosts.extend(hosts) print "OK" elif env.hosts: # hosts specified on the commandline… # makes things saner if we only allow hosts already declared in # our vhosts, since we need a vhostpath. And only hosts from a # single Role can be specified. print "Checking sanity of manual host selection", sys.stdout.flush() # make sure all hosts specified belong to a Role, and only one # Role. Since to do this we need to resolve all role # hostnames, it might take a little while… hostlist = {} for vhost in env.vhosts.keys(): hostlist[vhost] = [] if hasattr(env.vhosts[vhost]['hosts'], '__call__'): hostlist[vhost].extend(env.vhosts[vhost]['hosts']()) else: hostlist[vhost].append({ 'address': env.vhosts[vhost]['hosts'][0], 'name': env.vhosts[vhost]['hosts'][0] }) # now check supplied hosts against list of all hosts from all roles role = None ## ## env.hosts might contain short names, like 'K3-App-1', so ## resolve those to their IP addresses; rewriting env.hosts ## accordingly. ## for i, host in enumerate(env.hosts): # hosts from commandline for vhost in hostlist.keys(): for host_dict in hostlist[vhost]: if host in host_dict['address']: # the role this host belongs to if role is None: role = vhost elif role != vhost: abort("Sorry, only hosts for a single role can be provided") # we've got a role for the provided host continue elif host in host_dict['name']: env.hosts[i] = host_dict['address'] # the role this host belongs to if role is None: role = vhost elif role != vhost: abort("Sorry, only hosts for a single role can be provided") # we've got a role for the provided host continue if role is None: abort("Sorry, only hosts from a declared role can be provided") else: env.vhost = role print "OK" # ## Commands start here # # used when checking for a clean local worktree PROJECT_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '..')) @task def kickpuppy(): """Runs a 'service puppet restart'. """ sudo('/usr/sbin/service puppet restart') @task def chownvhost(): """Ensures various directories are owned by www-data:staff. """ with hide('running'): sudo('/bin/chown -R www-data:staff %s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) sudo('/bin/chown -R www-data:staff %s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['reponame'])) sudo('/bin/chown -R www-data:staff %s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['rundir'])) @task def deploy(): """Deploy code to hosts and restart services. """ # check_clean() refspec = os.getenv('GIT_REFSPEC', False) revision = os.getenv('GIT_COMMIT', False) if not refspec or not revision: with lcd(PROJECT_PATH): # determine refspec and revision using git plumbing. refspec = local("git symbolic-ref HEAD", capture=True).strip() commit_msg = local("git log -n 1 --oneline", capture=True).strip() chownvhost() light_a_campfire() tell_campfire('{} deploying {} ({}) to [{}]'.format( env.user, refspec, commit_msg, env.vhost)) pull() chownvhost() mrt_deploy() chownvhost() restartservices() tell_campfire('{} deployed {} ({}) to [{}]'.format( env.user, refspec, commit_msg, env.vhost)) @task def mrt_deploy(): """Bundles & unbundles on the server. The stuff in the worktree ends up in the rundir, by means of meteorite bundling. """ # bundle from the checked out code (worktree) with cd('%s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])): with hide('running', 'stdout'): sudo('/bin/chown -R www-data:staff .') sudo('/bin/chmod -R g+w .') run('rm -f /tmp/bundle_%s' % env.vhost) # copy a couple of files so they make it into the bundle run('test -f ../secrets/server_local_settings.js && cp ../secrets/server_local_settings.js app/server/_local_settings.js || true') run('test -f ../secrets/lib_local_settings.js && cp ../secrets/lib_local_settings.js app/lib/_local_settings.js || true') with cd('app'): run('mrt bundle /tmp/bundle_%s.tar.gz' % env.vhost) # unbundle inside the rundir with cd('%s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['rundir'])): sudo('rm -rf bundle') run('tar xfz /tmp/bundle_%s.tar.gz' % env.vhost) # NOTE: the path to node_modules changes in Meteor 0.6.5 # bundle/programs/server/node_modules with cd('%s/%s/bundle/programs/server/node_modules' % ( env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['rundir'])): # reinstall fibers run('npm uninstall fibers') run('npm install fibers') # # End npm packaging hack. # with cd('%s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['rundir'])): with hide('running', 'stdout'): sudo('/bin/chown -R www-data:staff bundle') sudo('/bin/chmod -R g+w bundle') # delete the temp bundle .tar.gz run('rm -f /tmp/bundle_%s.tar.gz' % env.vhost) @task @runs_once def check_clean(): """Check for clean working tree. Uses “non-porcelain” Git commands (i.e. it uses “plumbing” commands), which are supposed to be much more stable than user interface commands. """ print "Checking for a clean tree " # update the index first with hide('running'): with lcd(PROJECT_PATH): local('git update-index -q --ignore-submodules --refresh') # 1. check for unstaged changes in the working tree rtncode = subprocess.call(['git', 'diff-files', '--quiet', '--ignore-submodules', '--'], cwd=PROJECT_PATH) if rtncode: # Python < 2.7 doesn't have subprocess.check_call :( process = subprocess.Popen(['git', 'diff-files', '--name-status', '-r', '--ignore-submodules', '--'], stdout=subprocess.PIPE, cwd=PROJECT_PATH) output, err = process.communicate() print '\n\n%s' % output.strip() abort('Resolve your unstaged changes before deploying!') # 2. check for uncommitted changes in the index rtncode = subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD', '--ignore-submodules', '--'], cwd=PROJECT_PATH) if rtncode: # Python < 2.7 doesn't have subprocess.check_call :( process = subprocess.Popen(['git', 'diff-index', '--cached', '--name-status', '-r', '--ignore-submodules', 'HEAD', '--'], stdout=subprocess.PIPE, cwd=PROJECT_PATH) output, err = process.communicate() print '\n\n%s' % output.strip() abort('Resolve your uncommitted changes before deploying!') # 3. check for untracked files in the working tree process = subprocess.Popen(['git', 'ls-files', '--others', '--exclude-standard', '--error-unmatch', '--'], stdout=subprocess.PIPE, cwd=PROJECT_PATH) output, err = process.communicate() if output: print '\n\n%s' % output.strip() abort('Resolve your untracked files before deploying!') # 4. check the refspec and commit to ensure it's on the origin # server (so can be pulled onto the deployment target) refspec = os.getenv('GIT_REFSPEC', False) revision = os.getenv('GIT_COMMIT', False) if not refspec or not revision: with lcd(PROJECT_PATH): # determine refspec and revision using git plumbing. refspec = local("git symbolic-ref HEAD", capture=True).strip() revision = local("git rev-parse --verify HEAD", capture=True).strip() print 'Fetching origin refs' local('git fetch origin') process = subprocess.Popen(['git', 'branch', '-r', '--contains', revision], stdout=subprocess.PIPE, cwd=PROJECT_PATH) output, err = process.communicate() if not output: abort("The revision you're trying to deploy doesn't exist in the origin. You have to push.") print "OK" @task def checkouttag(tag): """Checks out a tag from the repository into the worktree. """ with cd('%s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['reponame'])): run('git fetch --tags') # delete the old worktree before checking out fresh sudo('/bin/chmod -R g+w %s/%s/' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) run('rm -rf %s/%s/*' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) sudo('/bin/chmod -R g+w .') sudo('/usr/bin/git checkout -f %s' % tag) sudo('/bin/chmod -R g+w %s/%s/' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) print "OK" @task def pull(): """Fetch and checkout the revision from the repo. """ with hide(): refspec = os.getenv('GIT_REFSPEC', False) revision = os.getenv('GIT_COMMIT', False) if not refspec or not revision: with lcd(PROJECT_PATH): # determine refspec and revision using git plumbing. refspec = local("git symbolic-ref HEAD", capture=True).strip() revision = local("git rev-parse --verify HEAD", capture=True).strip() with cd('%s/%s' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['reponame'])): run('git fetch origin %s' % refspec) # delete the old worktree before checking out fresh sudo('/bin/chmod -R g+w %s/%s/' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) run('rm -rf %s/%s/*' % (env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['worktree'])) sudo('/bin/chmod -R g+w .') sudo('/usr/bin/git checkout -f %s' % revision) sudo('/bin/chown -R www-data:staff %s/%s' % ( env.vhosts[env.vhost]['vhostpath'], env.vhosts[env.vhost]['reponame'])) print "OK" @task def restartservices(): """Restart web workers. """ with hide('running'): with cd(env.vhosts[env.vhost]['vhostpath']): print "Restarting %s " % env.host if env.vhosts[env.vhost]['meteor']: run("supervisorctl restart %s_meteor" % env.vhost) print "OK" @task def stopservices(): """Stop services. """ with hide('running'): with cd(env.vhosts[env.vhost]['vhostpath']): print "Stopping %s " % env.host if env.vhosts[env.vhost]['meteor']: run("supervisorctl stop %s_meteor" % env.vhost) print "OK" @task def startservices(): """Start services. """ with hide('running'): with cd(env.vhosts[env.vhost]['vhostpath']): print "Starting %s " % env.host if env.vhosts[env.vhost]['meteor']: run("supervisorctl start %s_meteor" % env.vhost) print "OK" @task def listroles(): """Lists the roles defined in HOST_ROLES module. """ print 'I know about the following roles: %s' % \ ', '.join(env.vhosts.keys()) @task @runs_once def light_a_campfire(): if camplight: try: # # Light a campfire. # # XXX Hardcode values should be abstracted to settings # campfire_request = Request( 'https://commoncode.campfirenow.com', '6c1897e6a194951ea55c82c05e18b79a3562e1e6' ) campfire = Campfire(campfire_request) account = campfire.account() rooms = campfire.rooms() global campfire_is_lit global campfire_room campfire_room = campfire.room('Mentorloop') campfire_room.join() except Exception, e: # # Log these for now. We're expecting a HttpError due to connection # problems, or a changed API token. # print 'Error: %s' % e campfire_is_lit = False else: # # The campfire is lit! # campfire_is_lit = True def tell_campfire(msg): if camplight and campfire_is_lit: campfire_room.speak(msg)
# _*_ coding:utf-8 _*_ # @File : passport.py # @Time : 2020-08-31 8:24 # @Author: zizle """ 用户登录、注册 """ import re import time import base64 from datetime import datetime from fastapi import APIRouter, Form, File, UploadFile, Depends, Body, Query from fastapi.encoders import jsonable_encoder from fastapi.exception_handlers import HTTPException from fastapi.responses import StreamingResponse from pymysql.err import IntegrityError from utils import verify from db.redis_z import RedisZ from db.mysql_z import MySqlZ from configs import logger from modules.basic.validate_models import ExchangeLibCN, VarietyGroupCN from .models import JwtToken, User, UserInDB, ModuleItem, UserModuleAuthItem, UserClientAuthItem, UserVarietyAuthItem passport_router = APIRouter() class ClientNotFound(Exception): """ 客户端不存在 """ async def checked_image_code(input_code: str = Form(...), code_uuid: str = Form(...)): """ 验证图形验证码的依赖项 """ with RedisZ() as r: real_image_code = r.get(code_uuid) # 使用code_uuid取得redis中的验证码 if not real_image_code or input_code.lower() != real_image_code.lower(): return False return True async def get_default_role(client_token: str = Form(...)): """ 根据客户端类型获取默认的用户角色 """ with MySqlZ() as cursor: cursor.execute("SELECT `id`,`is_manager` FROM basic_client WHERE machine_uuid=%s;", client_token) client = cursor.fetchone() if client["is_manager"]: return "research" else: return "normal" @passport_router.post("/register/", summary="用户注册") async def register( is_image_code_passed: bool = Depends(checked_image_code), role: str = Depends(get_default_role), phone: str = Form(...), username: str = Form(""), email: str = Form(""), password: str = Form(...), client_uuid: str = Form(...) ): if not is_image_code_passed: return {"message": "验证码有误!", "user": {}} time.sleep(3) # 解码phone和password phone = base64.b64decode(phone.encode('utf-8')).decode('utf-8') password = base64.b64decode(password.encode('utf-8')).decode('utf-8') # 手动验证邮箱和手机号 if not re.match(r'^([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+@([a-zA-Z0-9]+[_|\_|\.]?)*[a-zA-Z0-9]+\.[a-zA-Z]{2,3}$', email): return {"message": "邮箱格式有误!", "user": {}} if not re.match(r'^[1][3-9][0-9]{9}$', phone): return {"message": "手机号格式有误!", "user": {}} # 将用户信息保存到数据库中 user_to_save = UserInDB( user_code=verify.generate_user_unique_code(), username=username, phone=phone, email=email, role=role, password_hashed=verify.get_password_hash(password) # hash用户密码 ) try: with MySqlZ() as cursor: cursor.execute( "INSERT INTO `user_user` (`user_code`,`username`,`phone`,`email`,`password_hashed`,`role`) " "VALUES (%(user_code)s,%(username)s,%(phone)s,%(email)s,%(password_hashed)s,%(role)s);", (jsonable_encoder(user_to_save)) ) # 创建用户可登录的客户端 new_user_id = cursor._instance.insert_id() cursor.execute( "SELECT `id`,client_name FROM `basic_client` WHERE machine_uuid=%s;", client_uuid ) client_info = cursor.fetchone() if not client_info: raise ClientNotFound("Client Not Found") cursor.execute( "INSERT INTO `user_user_client` (user_id,client_id,expire_date) " "VALUES (%s,%s,%s);", (new_user_id, client_info["id"], "3000-01-01") ) except IntegrityError as e: logger.error("用户注册失败:{}".format(e)) return {"message": "手机号已存在!", "user": {}} except ClientNotFound: return {"message": "无效客户端,无法注册!", "user": {}} back_user = User( user_code=user_to_save.user_code, username=user_to_save.username, phone=user_to_save.phone, email=user_to_save.email, role=user_to_save.role ) return {"message": "注册成功!", "user": back_user} async def get_user_in_db( phone: str = Form(...), password: str = Form(...), client_uuid: str = Form(...), user_code: str = Form("") ): # 解码phone和password phone = base64.b64decode(phone.encode('utf-8')).decode('utf-8') password = base64.b64decode(password.encode('utf-8')).decode('utf-8') with MySqlZ() as cursor: cursor.execute( "SELECT `id`,`user_code`,`username`,`phone`,`email`,`password_hashed`,`role` " "FROM `user_user` WHERE (`phone`=%s OR `user_code`=%s) AND `is_active`=1;", (phone, user_code) ) user_dict = cursor.fetchone() if not user_dict: # 数据库中没有查询到用户 return None # 如果有用户,修改登录时间 cursor.execute( "UPDATE `user_user` SET `recent_login`=%s WHERE `id`=%s;", (datetime.today(), user_dict["id"]) ) if not verify.verify_password(password, user_dict["password_hashed"]): # 将查询到的密码验证 return None # 如果密码验证通过, today_str = datetime.today().strftime("%Y-%m-%d") # 非超管和运营查询当前用户是否能在这个客户端登录 if user_dict["role"] not in ["superuser", "operator"]: cursor.execute( "SELECT userclient.id, userclient.user_id FROM user_user_client AS userclient " "INNER JOIN basic_client AS clienttb " "ON userclient.client_id=clienttb.id AND userclient.user_id=%s AND clienttb.machine_uuid=%s " "AND userclient.expire_date>%s AND clienttb.is_active=1;", (user_dict["id"], client_uuid, today_str) ) is_client_accessed = cursor.fetchone() if not is_client_accessed: raise HTTPException(status_code=403, detail="Can not login with the client.") return User(**user_dict) @passport_router.post("/login/", response_model=JwtToken, summary="用户登录") async def login_for_access_token( is_image_code_passed: bool = Depends(checked_image_code), user: User = Depends(get_user_in_db) ): if not is_image_code_passed: raise HTTPException(status_code=400, detail="Got an error image code.") if not user: raise HTTPException(status_code=401, detail="Incorrect username or password.") # 得到通过密码验证的用户,签发token证书 access_token = verify.create_access_token(data={"user_id": user.id, "user_code": user.user_code}) show_username = user.username if user.username else user.phone return {"message": "登录成功!", "show_username": show_username, "access_token": access_token, "token_type": "bearer"} @passport_router.get("/image_code/", summary="图片验证码") async def image_code(code_uuid: str): response = StreamingResponse(verify.generate_code_image(code_uuid)) return response @passport_router.get("/user/module-authenticate/", summary="用户当前的模块权限情况") async def user_module_authority( user_token: str = Depends(verify.oauth2_scheme), query_user: int = Query(...) ): operate_user, _ = verify.decipher_user_token(user_token) if not operate_user: return {"message": "登录已过期了,重新登录再进行操作!", "user": {}, "modules": []} # 查询用户的模块权限 with MySqlZ() as cursor: cursor.execute( "SELECT id,username,user_code,role FROM user_user WHERE id=%s;", (query_user, ) ) user_info = cursor.fetchone() cursor.execute( "SELECT id,user_id,module_id,module_text,expire_date " "FROM user_user_module WHERE user_id=%s;", (user_info["id"]) ) data = cursor.fetchall() return {"message": "查询用户模块权限成功!", "user": user_info, "modules": data} @passport_router.post("/user/module-authenticate/", summary="用户模块权限认证") async def user_authenticate_module( module_item: ModuleItem = Body(...) ): if not module_item.user_token: raise HTTPException(status_code=403, detail="您还未登录,登录后在进行操作!") # 403 -> reply.error() = 201 user_id, user_code = verify.decipher_user_token(module_item.user_token) # 解析用户信息 if not user_id: raise HTTPException(status_code=403, detail="您登录已过期,请重新登录后再进行操作!") # 查询客户端类型和用户的身份信息 with MySqlZ() as cursor: cursor.execute("SELECT `id`,is_manager,is_active FROM basic_client WHERE machine_uuid=%s;", (module_item.client_uuid, )) client_info = cursor.fetchone() cursor.execute("SELECT `id`,role,is_active FROM user_user WHERE id=%s;", (user_id, )) user_info = cursor.fetchone() if not client_info or not client_info["is_active"]: detail_message = "进入「{}」失败:\n无效客户端,无法进行这个操作!".format(module_item.module_text) raise HTTPException(status_code=401, detail=detail_message) # 403 -> reply.error() = 204 if not user_info or not user_info["is_active"]: detail_message = "进入「{}」失败:\n无效用户,无法进行这个操作!".format(module_item.module_text) raise HTTPException(status_code=401, detail=detail_message) if user_info["role"] in ["superuser", "operator"]: return { "message": "验证成功!", "authenticate": True, "module_id": module_item.module_id, "module_text": module_item.module_text, } if client_info["is_manager"] and module_item.module_id >= "0": return { "message": "验证成功!", "authenticate": True, "module_id": module_item.module_id, "module_text": module_item.module_text, } # 查询用户是否有权限进入相应模块 today_str = datetime.today().strftime("%Y-%m-%d") cursor.execute( "SELECT `id`,user_id,module_id FROM user_user_module " "WHERE user_id=%s AND module_id=%s AND expire_date>%s;", (user_id, module_item.module_id, today_str) ) is_accessed = cursor.fetchone() if not is_accessed: detail_message = "还没有「{}」的权限,请联系管理员进行开通!".format(module_item.module_text) raise HTTPException(status_code=401, detail=detail_message) else: return { "message": "验证成功!", "authenticate": True, "module_id": module_item.module_id, "module_text": module_item.module_text, } @passport_router.put("/user/module-authenticate/", summary="修改用户的模块权限") async def modify_module_authority( operate_token: str = Depends(verify.oauth2_scheme), modify_item: UserModuleAuthItem = Body(...) ): operate_user, _ = verify.decipher_user_token(operate_token) if not operate_user: raise HTTPException(status_code=401, detail="您登录过期了,重新登录后再操作!") # 验证expire_date try: datetime.strptime(modify_item.expire_date, '%Y-%m-%d') except Exception: raise HTTPException(status_code=400, detail="数据格式有误,修改失败!") with MySqlZ() as cursor: cursor.execute( "SELECT user_id,module_id FROM user_user_module WHERE module_id=%s AND user_id=%s;", (modify_item.module_id, modify_item.modify_user) ) is_exist = cursor.fetchone() if is_exist: cursor.execute( "UPDATE user_user_module SET module_text=%(module_text)s,expire_date=%(expire_date)s " "WHERE user_id=%(modify_user)s AND module_id=%(module_id)s;", jsonable_encoder(modify_item) ) else: cursor.execute( "INSERT INTO user_user_module (user_id, module_id, module_text, expire_date) " "VALUES (%(modify_user)s,%(module_id)s,%(module_text)s,%(expire_date)s);", jsonable_encoder(modify_item) ) return {"message": "修改模块权限成功!"} @passport_router.get("/user/client-authenticate/", summary="用户客户端登录权限情况") async def user_module_authority( user_token: str = Depends(verify.oauth2_scheme), query_user: int = Query(...) ): operate_user, _ = verify.decipher_user_token(user_token) if not operate_user: return {"message": "登录已过期了,重新登录再进行操作!", "user": {}, "clients": []} # 查询用户的客户端登录权限 with MySqlZ() as cursor: cursor.execute( "SELECT id,username,user_code,role FROM user_user WHERE id=%s;", (query_user, ) ) user_info = cursor.fetchone() if not user_info: return {"message": "操作的用户不存在!", "user": {}, "clients": []} cursor.execute( "SELECT cliettb.id, cliettb.client_name,cliettb.machine_uuid,cliettb.is_manager,cliettb.is_active,uctb.expire_date " "FROM basic_client AS cliettb " "LEFT JOIN user_user_client AS uctb " "ON uctb.user_id=%s AND cliettb.id=uctb.client_id;", (user_info["id"], ) ) clients = cursor.fetchall() for client_item in clients: if user_info["role"] in ["superuser", "operator"]: # 超级管理员和运营员都有权限登录 client_item["expire_date"] = "3000-01-01" return {"message": "查询用户客户端登录权限成功!", "user": user_info, "clients": clients} @passport_router.put("/user/client-authenticate/", summary="修改用户客户端登录权限") async def modify_client_authority( operate_token: str = Depends(verify.oauth2_scheme), modify_item: UserClientAuthItem = Body(...) ): operate_user, _ = verify.decipher_user_token(operate_token) if not operate_user: raise HTTPException(status_code=401, detail="您登录过期了,重新登录后再操作!") # 验证expire_date try: datetime.strptime(modify_item.expire_date, '%Y-%m-%d') except Exception: raise HTTPException(status_code=400, detail="数据格式有误,修改失败!") with MySqlZ() as cursor: cursor.execute( "SELECT user_id,client_id FROM user_user_client WHERE client_id=%s AND user_id=%s;", (modify_item.client_id, modify_item.modify_user) ) is_exist = cursor.fetchone() if is_exist: cursor.execute( "UPDATE user_user_client SET expire_date=%(expire_date)s " "WHERE user_id=%(modify_user)s AND client_id=%(client_id)s;", jsonable_encoder(modify_item) ) else: cursor.execute( "INSERT INTO user_user_client (user_id, client_id, expire_date) " "VALUES (%(modify_user)s,%(client_id)s,%(expire_date)s);", jsonable_encoder(modify_item) ) return {"message": "修改用户客户端登录权限成功!"} @passport_router.get("/user/variety-authenticate/", summary="用户当前品种权限情况") async def user_variety_authority( user_token: str = Depends(verify.oauth2_scheme), query_user: int = Query(None) ): operate_user, _ = verify.decipher_user_token(user_token) if not operate_user: return {"message": "登录已过期了,重新登录再进行操作!", "user": {}, "varieties": []} if not query_user: # 查询自己有权限的品种 query_user = operate_user # 查询用户的品种权限 with MySqlZ() as cursor: cursor.execute( "SELECT id,username,user_code,role FROM user_user WHERE id=%s;", (query_user,) ) user_info = cursor.fetchone() if not user_info: return {"message": "操作的用户不存在!", "user": {}, "varieties": []} cursor.execute( "SELECT varietytb.id, varietytb.variety_name,varietytb.variety_en,varietytb.exchange_lib," "varietytb.is_active,varietytb.group_name,uvtb.expire_date " "FROM basic_variety AS varietytb " "LEFT JOIN user_user_variety AS uvtb " "ON uvtb.user_id=%s AND varietytb.id=uvtb.variety_id;", (user_info["id"],) ) varieties = cursor.fetchall() for variety_item in varieties: variety_item['exchange_lib'] = ExchangeLibCN[variety_item['exchange_lib']] variety_item['group_name'] = VarietyGroupCN[variety_item['group_name']] if user_info["role"] in ["superuser", "operator"]: # 超级管理员和运营员都有权限 variety_item["expire_date"] = "3000-01-01" return {"message": "查询用户品种权限成功!", "user": user_info, "varieties": varieties} @passport_router.put("/user/variety-authenticate/", summary="修改用户品种权限") async def modify_client_authority( operate_token: str = Depends(verify.oauth2_scheme), modify_item: UserVarietyAuthItem = Body(...) ): operate_user, _ = verify.decipher_user_token(operate_token) if not operate_user: raise HTTPException(status_code=401, detail="您登录过期了,重新登录后再操作!") # 验证expire_date try: datetime.strptime(modify_item.expire_date, '%Y-%m-%d') if not re.match(r'^[A-Z]{1,2}$', modify_item.variety_en): raise ValueError("INVALID VARIETY.") except Exception: raise HTTPException(status_code=400, detail="数据格式有误,修改失败!") with MySqlZ() as cursor: cursor.execute( "SELECT user_id,variety_id FROM user_user_variety WHERE variety_id=%s AND user_id=%s;", (modify_item.variety_id, modify_item.modify_user) ) is_exist = cursor.fetchone() if is_exist: cursor.execute( "UPDATE user_user_variety SET expire_date=%(expire_date)s " "WHERE user_id=%(modify_user)s AND variety_id=%(variety_id)s;", jsonable_encoder(modify_item) ) else: cursor.execute( "INSERT INTO user_user_variety (user_id, variety_id, variety_en, expire_date) " "VALUES (%(modify_user)s,%(variety_id)s,%(variety_en)s,%(expire_date)s);", jsonable_encoder(modify_item) ) return {"message": "修改用户品种权限成功!"} @passport_router.get("/user/token-login/", summary="使用TOKEN进行自动登录") async def user_token_logged( client: str = Query(..., min_length = 36, max_length = 36), token: str = Depends(verify.oauth2_scheme) ): user_id, user_code = verify.decipher_user_token(token) if not user_id: raise HTTPException(status_code=401, detail="登录失败!Token Expired!") # 查询用户的有效与能否在此客户端登录 with MySqlZ() as cursor: cursor.execute("SELECT `id`,role,username,note,is_active FROM user_user WHERE id=%s;", (user_id, )) user_info = cursor.fetchone() if not user_info: raise HTTPException(status_code=401, detail="登录失败!USER NOT FOUND!") cursor.execute("SELECT `id`,is_manager FROM basic_client WHERE machine_uuid=%s;", (client, )) client_info = cursor.fetchone() if not client_info: raise HTTPException(status_code=401, detail="登录失败!INVALID CLIENT!") today_str = datetime.today().strftime("%Y-%m-%d") # 1 创建今日在线的数据库 cursor.execute( "SELECT `id`,online_date FROM user_user_online WHERE `user_id`=%s AND online_date=%s;", (user_info["id"], today_str) ) is_today_online = cursor.fetchone() if not is_today_online: # 今日还未登录 cursor.execute( "INSERT INTO user_user_online (user_id,online_date,total_online) VALUES (%s,%s,%s);", (user_info["id"], today_str, 0) ) # 2 非超管和运营查询当前用户是否能在这个客户端登录 if user_info["role"] not in ["superuser", "operator"]: cursor.execute( "SELECT userclient.id, userclient.user_id FROM user_user_client AS userclient " "INNER JOIN basic_client AS clienttb " "ON userclient.client_id=clienttb.id AND userclient.user_id=%s AND clienttb.machine_uuid=%s " "AND userclient.expire_date>%s;", (user_info["id"], client, today_str) ) is_client_accessed = cursor.fetchone() if not is_client_accessed: raise HTTPException(status_code=403, detail="Can not login with the client.") return { "message": "token登录成功!", "show_username": user_info["username"], } @passport_router.post("/login/file/", summary="测试接口,上传文件") async def login_file( file_key: UploadFile = File(...), ): print(file_key.filename) return {"message": "上传文件"} @passport_router.get("/token_login/", summary="使用token进行登录") async def login_status_keeping( is_logged: bool = Depends(verify.is_user_logged_in), ): print("用户登录情况:", is_logged) return {"message": "用户登录"}
# -*- coding:utf-8 -*- from jinja2 import Environment, FileSystemLoader from fund.smtam import * import inspect env = Environment( loader=FileSystemLoader( "./templates/", encoding="utf8", ) ) def create_report(isin_code: str): """ レポート定義ファイルを受け取ってテンプレートに展開する """ rep = SmtamTemplate(isin_code) tmpl = env.get_template(rep.template_file) params = inspect.getmembers(rep) html = tmpl.render(**dict(params)) return html if __name__ == "__main__": # 定義クラス取得 isin_code = "JP90C000H1T1" isin_code = "JP90C0003PR7" # isin_code = "JP90C0001530" # レポート生成 create_report(isin_code)
#coding=utf-8 #!/usr/bin/env python __author__ = 'XingHua' import fudge mock = fudge.Fake('mock') mock.expects('method')\ .with_arg_count(arg1=1, arg2='2').returns(True) mock.method(arg1=1, arg2='2') fudge.verify()
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import io import os import subprocess import sys import tempfile import unittest from unittest.mock import MagicMock, Mock, call from benchpress.lib.hook_factory import HookFactory from benchpress.lib.parser import TestCaseResult, TestStatus from benchpress.suites import Suite HookFactory.create = MagicMock() class TestSuite(unittest.TestCase): def setUp(self): self.suite_config = {"name": "test", "description": "desc", "args": []} self.mock_hook = MagicMock() HookFactory.create.return_value = self.mock_hook Suite.parse = Mock() def test_arg_list(self): """Argument list is formatted correctly with lists or dicts""" self.assertListEqual( ["--output-format=json", "a"], Suite.arg_list(["--output-format=json", "a"]) ) expected = ["--output-format", "json", "--file"] actual = Suite.arg_list({"output-format": "json", "file": None}) # items are the same regardless of order self.assertCountEqual(expected, actual) # '--output-format' comes immediately before 'json' self.assertEqual(actual.index("--output-format") + 1, actual.index("json")) def test_run_succeed(self): """Echo is able to run and be parsed correctly Run a suite to echo some json and make sure it can be parse and is exported correctly.""" mock_data = '{"key": "hello"}' self.suite_config["args"] = [mock_data] self.suite_config["metrics"] = ["key"] self.suite_config["path"] = "echo" suite = Suite(self.suite_config) suite.parse = Mock( return_value=[TestCaseResult(name="key", status=TestStatus.PASSED)] ) metrics = suite.run() suite.parse.assert_called_with([mock_data], [], 0) self.assertEqual( [TestCaseResult(name="key", status=TestStatus.PASSED)], metrics ) def test_run_fail(self): """Exit 1 raises an exception""" self.suite_config["args"] = ["-c", 'echo "error" >&2; exit 1'] self.suite_config["path"] = "sh" suite = Suite(self.suite_config) with self.assertRaises(subprocess.CalledProcessError) as e: suite.run() e = e.exception self.assertEqual("", e.stdout.strip()) self.assertEqual("error", e.stderr.strip()) def test_run_fail_no_check_returncode(self): """Bad return code doesn't fail when check_returncode is False""" self.suite_config["args"] = ["-c", 'echo "error" >&2; exit 1'] self.suite_config["path"] = "sh" self.suite_config["check_returncode"] = False suite = Suite(self.suite_config) # suite.run won't raise an exception suite.run() def test_run_no_binary(self): """Nonexistent binary raises an error""" self.suite_config["path"] = "somethingthatdoesntexist" self.suite_config["metrics"] = [] suite = Suite(self.suite_config) with self.assertRaises(OSError): suite.run() def test_run_parser_error(self): """A crashed parser raises an error""" self.suite_config["path"] = "true" self.suite_config["metrics"] = [] suite = Suite(self.suite_config) suite.parse = Mock(side_effect=ValueError("")) with self.assertRaises(ValueError): suite.run() def test_run_timeout(self): """Binary running past timeout raises an error""" self.suite_config["timeout"] = 0.1 self.suite_config["path"] = "/bin/sh" self.suite_config["args"] = ["-c", "yes"] suite = Suite(self.suite_config) with self.assertRaises(subprocess.TimeoutExpired): suite.run() def test_run_timeout_is_pass(self): """Binary running past timeout raises an error""" self.suite_config["timeout"] = 0.1 self.suite_config["timeout_is_pass"] = True self.suite_config["path"] = "/bin/sh" self.suite_config["args"] = [ "-c", 'echo "wow" && echo "err" > /dev/stderr && sleep 2', ] suite = Suite(self.suite_config) suite.run() suite.parse.assert_called_with(["timed out as expected"], [], 0) def test_tee_stdouterr(self): """tee_output option works correctly With tee_option=True, the suite should print the subprocess stdout lines starting with 'stdout:' and stderr starting with 'stderr:'""" mock_data = "line 1 from echo\nthis is the second line" self.suite_config["args"] = [mock_data] self.suite_config["metrics"] = ["key"] self.suite_config["tee_output"] = True self.suite_config["path"] = "echo" suite = Suite(self.suite_config) # capture stdout/err orig_stdout, orig_stderr = sys.stdout, sys.stderr sys.stdout = io.StringIO() sys.stderr = io.StringIO() suite.run() expected = "stdout: line 1 from echo\nstdout: this is the second line\n" self.assertEqual(sys.stdout.getvalue(), expected) # test with stderr and stdout # first reset stdout string sys.stdout.truncate(0) sys.stdout.seek(0) self.suite_config["path"] = "sh" self.suite_config["args"] = ["-c", 'echo "error" >&2 && echo "from stdout"'] self.suite_config["tee_output"] = True suite = Suite(self.suite_config) suite.run() expected = "stdout: from stdout\nstderr: error\n" self.assertEqual(sys.stdout.getvalue(), expected) sys.stdout = orig_stdout sys.stderr = orig_stderr def test_tee_output_file(self): """tee_output can write to file.""" mock_data = "line 1 from echo\nthis is the second line" self.suite_config["args"] = [mock_data] self.suite_config["metrics"] = ["key"] fd, teefile = tempfile.mkstemp() os.close(fd) self.suite_config["path"] = "sh" self.suite_config["args"] = ["-c", 'echo "error" >&2 && echo "from stdout"'] self.suite_config["tee_output"] = teefile suite = Suite(self.suite_config) suite.run() expected = "stdout: from stdout\nstderr: error\n" with open(teefile, "r") as tmp: self.assertEqual(tmp.read(), expected) os.remove(teefile) def test_hooks(self): """Suite runs hooks before/after in stack order""" self.suite_config["path"] = "true" self.suite_config["hooks"] = [ {"hook": "first", "options": {"a": 1}}, {"hook": "second", "options": {"b": 1}}, ] mock = MagicMock() first = mock.first second = mock.second def get_mock_hook(name): if name == "first": return first else: return second HookFactory.create.side_effect = get_mock_hook suite = Suite(self.suite_config) suite.run() self.assertListEqual( [ call.first.before({"a": 1}, suite), call.second.before({"b": 1}, suite), # post hooks run in reverse order call.second.after({"b": 1}, suite), call.first.after({"a": 1}, suite), ], mock.method_calls, ) if __name__ == "__main__": unittest.main()
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= ## @file # Module with some simple but useful utilities fro GSL Error handling # @author Vanya BELYAEV Ivan.Belyaev@itep.ru # @date 2013-02-10 # ============================================================================= """Module with some simple but useful utilities for GSL error handling """ # ============================================================================= __author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru" __date__ = "2013-02-10" # ============================================================================= __all__ = ( # 'gslIgnore' , ## context manager to ignore GSL errore 'gslError' , ## context manager to print GSL errors 'gslException' , ## context manager to turn GSL errors into C++/Python exceptions 'GslIgnore' , ## context manager to ignore GSL errore 'GslError' , ## context manager to print GSL errors 'GslException' , ## context manager to turn GSL errors into C++/Python exceptions 'setHandler' , ## use ``global'' GSL handler 'useHandler' , ## ditto ) # ============================================================================= ## helper base class/context manager class ErrHandler(object) : def __init__ ( self ) : self.err_handler = None def __enter__ ( self ) : self.err_handler = self.handler () return self def __exit__ ( self , *_ ) : if self.err_handler : del self.err_handler self.err_handler = None # ============================================================================= ## @class GslIgnore # Simple context manager to ignore all GSL errors # @code # with GslIgnore() : # ... do something # @endcode class GslIgnore(ErrHandler) : """Simple context manager to ignore all GSL errors >>> with GslIgnore() : >>> ... do something... """ def __init__ ( self ) : import ROOT,cppyy self.handler = cppyy.gbl.Ostap.Utils.GslIgnore super(GslIgnore,self).__init__() # ============================================================================= ## @class GslError # Simple context manager to print GSL errors to stderr # @code # with GslError() : # ... do something # @endcode class GslError(ErrHandler) : """Simple context manager to print GSL errors to stderr >>> with GslError() : >>> ... do something... """ def __init__ ( self ) : import ROOT,cppyy self.handler = cppyy.gbl.Ostap.Utils.GslError super(GslError,self).__init__() # ============================================================================= ## @class GslException # Simple context manager to turn GSL errors into C++/Python exceptions # @code # with GslException() : # ... do something # @endcode class GslException (ErrHandler) : """Simple context manager to turn GSL Errors into C++/Python exceptions >>> with GslException() : >>> ... do something... """ def __init__ ( self ) : import ROOT,cppyy self.handler = cppyy.gbl.Ostap.Utils.GslException super(GslException,self).__init__() # ============================================================================= ## Simple context manager to ignore all GSL errors # @code # with gslIgnore() : # ... do something # @endcode def gslIgnore () : """Simple context manager to ignore all GSL errors >>> with gslIgnore() : >>> ... do something... """ return GslIgnore() # ============================================================================= ## Simple context manager to print GSL errors to stderr # @code # with gslError() : # ... do something # @endcode def gslError () : """Simple context manager to print GSL errors to stderr >>> with gslError() : >>> ... do something... """ return GslError() # ============================================================================= ## Simple context manager to turn GSL errors into C++/Python exceptions # @code # with gslException() : # ... do something # @endcode def gslException () : """Simple context manager to turn GSL Errors into C++/Python exceptions >>> with glException() : >>> ... do something... """ return GslException() # ============================================================================= _global_gsl_handler = [] def _setHandler ( handler ) : global _global_gsl_handler while _global_gsl_handler : _global_gsl_handler.pop() if handler: _global_gsl_handler.append ( handler ) return _global_gsl_handler # ============================================================================= ## Make use ``global'' GSL handler # @code # setHandler ( None ) ## clean up global handlers # setHandler ( 'Ignore' ) ## ignore all GSL erorrs # setHandler ( 'Error' ) ## print GSL errors to stderr and continue # setHandler ( 'Exception' ) ## convert GSL errors into C++/Python exceptions # setHandler ( 'Raise' ) ## ditto # setHandler ( 'Throw' ) ## ditto # @endcode def setHandler ( handler ) : """Use ``global'' GSL handler >>> setGlobalHandler ( None ) ## clean up global handlers >>> setGlobalHandler ( 'Ignore' ) ## ignore all GSL erorrs >>> setGlobalHandler ( 'Error' ) ## print GSL errors to stderr and continue >>> setGlobalHandler ( 'Exception' ) ## convert GSL errors into C++/Python exceptions >>> setGlobalHandler ( 'Raise' ) ## ditto >>> setGlobalHandler ( 'Throw' ) ## ditto """ # from ostap.logger.logger import getLogger logger = getLogger( 'ostap.utils.gsl' ) # import ROOT,cppyy Ostap = cppyy.gbl.Ostap # global _global_gls_handler if not handler : _setHandler ( handler ) elif isinstance ( handler , str ) : hl = handler.lower() if 'ignore' == hl : _setHandler ( Ostap.Utils.GslIgnore () ) logger.debug('Global GSL error Handler: Ignore all GLS errors') elif hl in ( 'error' , 'print' ) : _setHandler ( Ostap.Utils.GslError () ) logger.debug('Global GSL error Handler: print all GLS errors to stderr') elif hl in ( 'exception' , 'raise' , 'throw' ) : _setHandler ( Ostap.Utils.GslException () ) logger.debug('Global GSL error Handler: convert GLS errors to C++/Python exceptions') else : raise TypeError('Unknown handler type %s' % handler ) elif isinstance ( handler , Ostap.Utils.GslError ) : _setHandler ( handler ) logger.debug('Global Eror Handler: %s' % handler ) elif issubclass ( handler , Ostap.Utils.GslError ) : h = _setHandler ( handler () ) logger.debug('Global Eror Handler: %s' % h ) else : raise TypeError('Unknown handler type %s' % handler ) ## ditto useHandler = setHandler # ============================================================================= if '__main__' == __name__ : from ostap.logger.logger import getLogger if '__main__' == __name__ : logger = getLogger( 'ostap.utils.gsl' ) else : logger = getLogger( __name__ ) from ostap.utils.docme import docme docme ( __name__ , logger = logger ) setHandler ( 'Error' ) with gslIgnore() , gslException() : with gslError() : setHandler( 'Exception') setHandler( 'Exception') setHandler( 'Exception') setHandler( 'Exception') setHandler( 'Ignore') setHandler( 'Error') setHandler( 'Error') setHandler( 'Error') setHandler( 'Error') logger.info ( 'Active handlers %s' % _global_gsl_handler ) del _global_gsl_handler[:] logger.info ( 'Active handlers %s' % _global_gsl_handler ) logger.info ( 80*'*' ) # ============================================================================= # The END # =============================================================================
######################################################################################### # # url_check_hdl_test - # This is the python script which executes the url_check usecase using python unittest # # Revision History # * 1.0 - 5.28.21 - Karthik Babu Harichandra Babu - Initial version # ######################################################################################### from url_check_main import URL_Check from optparse import OptionParser import json def url_check(url): result = {} urlcheckinst = URL_Check(url) validity, url_failed = urlcheckinst.url_verify() if validity and not url_failed: result[url] = f"The Given URL is Clean - {url}" elif not validity and not url_failed: result[url] = f"The Given URL is Malware Affected - {url}" elif url_failed: result[url] = f"The Given URL is Not in Valid URL Format - {url}" print(json.dumps(result)) return(json.dumps(result))
from django.db import models, IntegrityError from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.urls import reverse from rest_framework_simplejwt.tokens import RefreshToken, AccessToken from uuid import uuid4 # Create your models here. class GenerateTokenMixin: @property def token(self): return self._get_tokens() def _get_tokens(self): tokens = RefreshToken.for_user(self) refresh = str(tokens) access = str(tokens.access_token) data = { "refresh": refresh, "access": access } return data class UserAccount(GenerateTokenMixin, User): class Meta: verbose_name = _("useraccount") verbose_name_plural = _("useraccounts") def __str__(self): return self.username def get_absolute_url(self): return reverse("useraccount_detail", kwargs={"pk": self.pk}) class Staff(GenerateTokenMixin, User): class Meta: verbose_name = _("staffaccount") verbose_name_plural = _("staffaccounts") def __str__(self): return self.username def get_absolute_url(self): return reverse("staffaccount_detail", kwargs={"pk": self.pk}) class Team(models.Model): def get_object(self, name): return self.model.objects.get(name=name) name = models.CharField(max_length=80, unique=True) class Meta: verbose_name = _("team") verbose_name_plural = _("teams") def __str__(self): return self.name @classmethod def update_team_name(cls, old_name, new_name): try: team = cls.objects.get(name=old_name) team.name = new_name team.save() return {"detail": f"{old_name} changed to {new_name}"} except cls.DoesNotExist: return {"detail": f"{old_name} does not exist"} except IntegrityError: return {"details": f"{new_name} already exist"} def get_absolute_url(self): return reverse("team_detail", kwargs={"pk": self.pk}) class Fixture(models.Model): home_team = models.ForeignKey( Team, related_name='home_team', on_delete=models.CASCADE) away_team = models.ForeignKey( Team, related_name='away_team', on_delete=models.CASCADE) date_time = models.DateTimeField(blank=False) fixed_at = models.DateTimeField(auto_now=True) link_address = models.CharField(max_length=50, unique=True, default=uuid4) class Meta: verbose_name = _("fixture") verbose_name_plural = _("fixtures") ordering = ['date_time'] @classmethod def updateFixtureEvent(cls, instance, details): instance.home_team = details['home_team'] instance.away_team = details['away_team'] instance.date_time = details['date_time'] instance.save() return instance def __str__(self): return "{} vs {}".format(self.home_team, self.away_team) def get_absolute_url(self): return reverse("fixture_detail", kwargs={"pk": self.pk})
import requests from django.db import models class ResultManager(models.Manager): def from_url(self, url): result = requests.get(url) result = self.create( response_code=result.status_code, response_text=result.text, url=url, ) return result class Result(models.Model): created_at = models.DateTimeField(auto_now_add=True) has_been_parsed = models.BooleanField(default=False) parsed_at = models.DateTimeField(blank=True, null=True) response_code = models.PositiveIntegerField() response_text = models.TextField() url = models.URLField() objects = ResultManager() class Meta: ordering = ('created_at', ) def __str__(self): return 'Requested on {}'.format(self.created_at)
# -*- coding: utf-8 -*- """ Created on Wed Sep 2 11:24:10 2020 @author: Georg Maubach Schreiben Sie eine Funktion isSchaltjahr(), der ein Jahr übergeben wird und die dann zurückgibt, ob das Jahr ein Schaltjahr ist oder nicht. Definition Schaltjahr: - Wenn ein Jahr durch 4 teilbar ist, ist es ein Schaltjahr. - Wenn das Jahr durch 100 teilbar ist, dann ist es kein Schaltjahr, aber wenn es durch 400 teilbar ist, dann ist es doch eins. """ def istSchaltjahr(jahr): schaltjahr = False; if(jahr % 4 == 0): schaltjahr = True; if(jahr % 100 != 0): schaltjahr = False; if(jahr % 400 == 0): schaltjahr = True; return(schaltjahr); def istSchaltjahr(jahr): # Musterlösung if ((jahr % 100 == 0 and jahr % 400 != 0) or jahr % 4 != 0): return False; else: return True; if __name__ == "__main__": print("Schaltjahr ", schaltjahr);
import unittest from unittest.mock import Mock from src.dqn import DQN from gym import spaces import numpy as np import tensorflow as tf class TestDqn(unittest.TestCase): def setUp(self): observation_space = spaces.Box(0, 255, shape=(224, 320, 3), dtype=np.uint8) environment = Mock(action_space=spaces.MultiBinary(12), observation_space=observation_space, reset=lambda: observation_space.sample()) self.dqn = DQN(environment) def test_observation_space(self): self.assertEqual(self.dqn.observation_space().shape, (224, 320, 3)) def test_action_space(self): self.assertEqual(self.dqn.action_space().n, 12) def test_reward(self): self.assertEqual(self.dqn.reward(10, None, None), 10) def test_build_model(self): model = self.dqn.build_model() self.assertEqual(model.layers[-1].output.shape.as_list(), [None, 12]) def test_setup_models(self): self.dqn.build_model = Mock() self.dqn.setup_models() call_one, call_two = self.dqn.build_model.call_args_list self.assertEqual(call_one, (())) self.assertIsInstance(call_two[1]['initializer'], tf.keras.initializers.Zeros) def test_transfer_weights(self): self.dqn.model = tf.keras.models.Sequential([ tf.keras.layers.Dense(10, input_shape=[10], kernel_initializer="ones"), tf.keras.layers.Dense(1, kernel_initializer="ones") ]) self.dqn.model.compile(optimizer="sgd", loss="mse") self.dqn.target_model = tf.keras.models.Sequential([ tf.keras.layers.Dense(10, input_shape=[10], kernel_initializer="zeros"), tf.keras.layers.Dense(1, kernel_initializer="zeros") ]) self.dqn.target_model.compile(optimizer="sgd", loss="mse") w1, b1, w2, b2 = self.dqn.target_model.get_weights() self.assertEqual(np.sum(w1), 0) self.assertEqual(np.sum(b1), 0) self.assertEqual(np.sum(w2), 0) self.assertEqual(np.sum(b2), 0) self.dqn.transfer_weights() w1, b1, w2, b2 = self.dqn.target_model.get_weights() self.assertEqual(np.sum(w1), 100) self.assertEqual(np.sum(b1), 0) self.assertEqual(np.sum(w2), 10) self.assertEqual(np.sum(b2), 0) def test_actions(self): actions = self.dqn.actions() self.assertEqual(actions.tolist(), [ [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.] ]) def test_step(self): pass def test_select_action(self): pass def test__select_action_probs(self): self.dqn.setup_models() state = self.dqn.observation_space().sample() probs = np.round(self.dqn._select_action_probs(self.dqn._reshape_state(state)), 2) np.testing.assert_allclose(probs, [0.91, 0.01, 0.01 , 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01 ]) def test_td_target(self): self.dqn.setup_models() w = self.dqn.target_model.get_weights() target = self.dqn.td_target(0.9, self.dqn._reshape_state(self.dqn.observation_space().sample() + 1)) target = np.round(target, 2) np.testing.assert_equal(target, [0.9, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
""" lsn14_examples_2ofX.py - example code from lesson 14 """ __author__ = "CS110Z" # Use a list to store the names of your favorite NFL Quarterbacks # User must enter each name individually num_qbs = int(input('How many names do you wish to enter: ')) qb_names = [] for i in range(num_qbs): qb_names.append(input('Enter qb %d: ' % i)) # Print out the entered names for i in range(num_qbs): print('%d of %d: %s' % (i, num_qbs, qb_names[i])) # Use a list to store the names of your favorite NFL Quarterbacks # User must enter all names on a line inputs = input('Enter all qb''s names seprated by a space: ') qb_names = inputs.split() for i in range(len(qb_names)): print('%d of %d: %s' % (i, len(qb_names), qb_names[i]))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This is a sample script to demonstrate how to use LNP module in ncupy @author: juniti-y """ ###################################################################### ### ### Import modules ### ###################################################################### import numpy as np # For numpy from ncupy import lnp # For lnp import matplotlib.pyplot as plt # For pyplot ###################################################################### ### ### Parameters for data generation ### ###################################################################### # Sampling condition MAX_T = 100.0 # Duration (unit: [sec.]) DELTA_T = 0.001 # Sampling interval (unit: [sec.]) # Signal property FR1 = 13.0 FR2 = 17.0 # Composition Weight W1 = 1.0 W2 = -1.0 # Baseline W0 = 2.0 # Window size for moving average MV_W = 10 ###################################################################### ### ### Generate the training data ### ###################################################################### # Generate the time bins T_BIN = np.linspace(0.0, MAX_T, np.int(np.round(MAX_T/DELTA_T))+1) # Memory allocation of response # Y = np.zeros((T_BIN.size, 1)) # Generate the sin signal 1 X1 = np.sin(2 * np.pi * T_BIN/FR1) # Generate the sin signal 2 X2 = np.sin(2 * np.pi * T_BIN/FR2) # Generate the composition signal Z = W1*X1 + W2*X2 # Compute the firing rate (Unit: [sec.]) R = np.exp(Z + W0) # Compute the spike probability for each bin S_P = DELTA_T * R # Generate the random number that follows the uniform dist. RND = np.random.rand(np.size(T_BIN)) # Generate the spike S = np.double(RND < S_P) # Get the moving average of S CEF_AVG = np.ones(MV_W)/MV_W ES = np.convolve(S, CEF_AVG, 'same') # Compute the empirical firing rate EFR = ES/(DELTA_T*MV_W) # Generate the data matrix for predictors X = np.c_[np.reshape(X1, (X1.size, 1)), np.reshape(X2, (X2.size, 1))] # Generate the data matrix for responses Y = S ###################################################################### ### ### Constructing LNP model that fits the training data ### ###################################################################### # Create the object of LNP class MODEL = lnp.LNP() # Change the options of the object MODEL.set_options(method='MLE2', delta_t=DELTA_T, lmbd=1e-4) dist=[0.03, 0.04] # Run the fitting algorithm MODEL.fit(X, Y, dist) # Predict the responses to given predictors PY = MODEL.predict(X) ###################################################################### ### ### Compare the true process and its estimation (just for demonstration) ### ###################################################################### plt.plot(T_BIN, R, T_BIN, PY, T_BIN, EFR) plt.title('Comparison among true, model, empirical') plt.legend(['true', 'model', 'empirical'])
#!/usr/bin/env python # coding: utf-8 # # Titanic - XGBoost # Este notebook cria um modelo baseado no dataset do Titanic e usando XGBoost. # Vamos começar importando as bibliotecas básicas que vamos usar. # In[ ]: import pandas as pd import numpy as np import matplotlib.pyplot as plt get_ipython().magic(u'matplotlib inline') # Próximo passo: carregando os dados a partir dos CSVs disponibilizados no Kaggle. Estamos usando a biblioteca pandas para esse propósito. # In[ ]: # Vamos iniciar o notebook importanto o Dataset titanic_df = pd.read_csv("../input/train.csv") test_df = pd.read_csv("../input/test.csv") # Podemos observar as primeiras linhas dele. titanic_df.head() # Vamos começar com o básico de tratamento desse dataset. Importante: tudo que fizermos vamos fazer no dataset de treinamento e também de teste. # ## Tratando a Idade - Imputation # Teremos que preencher isso de algum jeito. Uma abordagem comum nesses casos é usar uma média ou mediana. Vamos usar aqui a mediana do dataset - mas poderíamos agrupar por sexo, por exemplo. Fica a seu critério fazer isso de forma mais fancy. ;) # In[ ]: age_median = titanic_df['Age'].median() print(age_median) # In[ ]: titanic_df['Age'] = titanic_df['Age'].fillna(age_median) test_df['Age'] = test_df['Age'].fillna(age_median) # ## Tratando Gênero - LabelEncoding # In[ ]: from sklearn.preprocessing import LabelEncoder sex_encoder = LabelEncoder() sex_encoder.fit(list(titanic_df['Sex'].values) + list(test_df['Sex'].values)) # In[ ]: sex_encoder.classes_ # In[ ]: titanic_df['Sex'] = sex_encoder.transform(titanic_df['Sex'].values) test_df['Sex'] = sex_encoder.transform(test_df['Sex'].values) # ## Feature Engineering - Título # Feature Engineering é uma técnica que envolve criar novas features - em geral a partir de outras. Vamos usar essa técnica para extrair o título a partir do nome. # In[ ]: titanic_df.head()['Name'] # In[ ]: import re def extract_title(name): x = re.search(', (.+)\.', name) if x: return x.group(1) else: return '' # In[ ]: titanic_df['Title'] = titanic_df['Name'].apply(extract_title) test_df['Title'] = test_df['Name'].apply(extract_title) # ## OneHotEncoding # Agora vamos trabalhar com features que são MultiCategoricas. # In[ ]: from sklearn.preprocessing import OneHotEncoder from sklearn.feature_extraction import DictVectorizer feature_names = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Title', 'Embarked'] dv = DictVectorizer() dv.fit(titanic_df[feature_names].append(test_df[feature_names]).to_dict(orient='records')) dv.feature_names_ # In[ ]: from sklearn.model_selection import train_test_split train_X, valid_X, train_y, valid_y = train_test_split(dv.transform(titanic_df[feature_names].to_dict(orient='records')), titanic_df['Survived'], test_size=0.2, random_state=42) # In[ ]: import xgboost as xgb # In[ ]: train_X.todense() # In[ ]: dtrain = xgb.DMatrix(data=train_X.todense(), feature_names=dv.feature_names_, label=train_y) dvalid = xgb.DMatrix(data=valid_X.todense(), feature_names=dv.feature_names_, label=valid_y) # In[ ]: xgb_clf = xgb.train({'max_depth':20, 'eta':0.1, 'objective':'binary:logistic', 'eval_metric': 'error'}, num_boost_round=3000, dtrain=dtrain, verbose_eval=True, early_stopping_rounds=30, evals=[(dtrain, 'train'), (dvalid, 'valid')]) # In[ ]: from xgboost import plot_tree ax = plot_tree(xgb_clf, num_trees=xgb_clf.best_ntree_limit-1) ax.figure.set_size_inches((30,40)) # ## Submissão do Arquivo # In[ ]: test_df['Fare'] = test_df['Fare'].fillna(0) # Lembra que o sklean trabalha com matrizes numpy, certo? # In[ ]: test_X = dv.transform(test_df[feature_names].to_dict(orient='records')) print(test_X.shape) # In[ ]: dtest = xgb.DMatrix(data=test_X.todense(), feature_names=dv.feature_names_) # In[ ]: y_pred = np.round(xgb_clf.predict(dtest)).astype(int) # Ótimo! Já temos aquilo que precisávamos. Próximo passo agora é empacotar num arquivo CSV e submeter no Kaggle. # In[ ]: submission_df = pd.DataFrame() # In[ ]: submission_df['PassengerId'] = test_df['PassengerId'] submission_df['Survived'] = y_pred submission_df # In[ ]: submission_df.to_csv('xgboost_model.csv', index=False) # Por favor, anote aqui para referência: quanto foi o seu score de treinamento do modelo? E no dataset de Validação? Quanto foi o seu score na submissão do Kaggle? # In[ ]:
#Hypotheses 3 import pandas as pd import matplotlib.pyplot as plt df=pd.read_csv(r'C:\Users\manis\Desktop\unt sundar\5709\project2\final_aqi_df.csv') dfgroup=df.groupby(['country']) dfspain=dfgroup.get_group('Spain') dfitaly=dfgroup.get_group('Italy') dfindia=dfgroup.get_group('India') dfgermany=dfgroup.get_group('Germany') dfspain.plot( x='week',y=['NO2','PM2.5'],kind='line') plt.axvline(11,linestyle='--',color='black',label='lockdown week') plt.title('Pollutant levels in Spain from 1st - 14th week of 2020') plt.ylabel('Levels of NO2 and PM2.5') plt.legend() plt.show() dfitaly.plot( x='week',y=['NO2','PM2.5'],kind='line') plt.axvline(11,linestyle='--',color='black',label='lockdown week') plt.title('Pollutant levels in Italy from 1st - 14th week of 2020') plt.ylabel('Levels of NO2 and PM2.5') plt.legend() plt.show() dfindia.plot( x='week',y=['NO2','PM2.5'],kind='line') plt.axvline(12,linestyle='--',color='black',label='lockdown week') plt.title('Pollutant levels in India from 1st - 14th week of 2020') plt.ylabel('Levels of NO2 and PM2.5') plt.legend() plt.show() dfgermany.plot( x='week',y=['NO2','PM2.5'],kind='line') plt.axvline(13,linestyle='--',color='black',label='lockdown week') plt.title('Pollutant levels in Germany from 1st - 14th week of 2020') plt.ylabel('Levels of NO2 and PM2.5') plt.legend() plt.show()
print("Digite 10 numeros. ") numeros = range(1, 11) soma = 0 for numero in numeros: print("Digite um numero: ") digitado = int(input()) soma += digitado media = soma / 10 print(media)
# -*- coding:utf-8 -*- import datetime from flask_sqlalchemy import SQLAlchemy from sqlalchemy_utils import JSONType __all__ = ["AliEvent", "db"] db = SQLAlchemy() class AliEvent(db.Model): __tablename__ = "ali_event" #: 事件ID,由 ActionTrail 服务为每个操作事件所产生的一个GUID。 id = db.Column(db.String(64), primary_key=True) #: API 操作名称,比如 Ecs 的 StopInstance 。 name = db.Column(db.String(64)) #: 处理 API 请求的服务端,比如 ram.aliyuncs.com 。 source = db.Column(db.String(255)) #: API 请求的发生时间 - UTC request_time = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now) #: 事件类型,如 ApiCall(控制台或 API 操作), ConsoleSignin(用户登录)。 type = db.Column(db.String(255)) #: ActionTrail 事件格式的版本。 version = db.Column(db.String(255)) #: optional | 如果云服务处理 API 请求时发生了错误,这里记录了相应的错误码,比如 NoPermission。· err_code = db.Column(db.String(255), default=-1) #: optional | 如果云服务处理API请求时发生了错误,这里记录了相应的错误消息,比如 You are not authorized. err_msg = db.Column(db.Text, default="") #: 云服务处理 API 请求时所产生的消息请求 ID 。 request_id = db.Column(db.String(64), default="") #: optional | 用户 API 请求的输入参数 request_param = db.Column(JSONType, default={}) #: 云服务名称,如 Ecs, Rds, Ram。 service_name = db.Column(db.String(64)) #: 发送API请求的源IP地址。如果API请求是由用户通过控制台操作触发, #: -> 那么这里记录的是用户浏览器端的IP地址,而不是控制台Web服务器的IP地址。 source_ip = db.Column(db.CHAR(64)) #: 发送 API 请求的客户端代理标识,比如控制台为 AliyunConsole ,SDK 为 aliyuncli/2.0.6 。 user_agent = db.Column(db.CHAR(255)) identity = db.Column(JSONType) created_by = db.Column(db.CHAR(128), nullable=False, default="unknow") @classmethod def get(cls): events = cls.query.all() return events @classmethod def get_id(cls): event_ids = cls.query.with_entities(cls.id).all() return event_ids @classmethod def add(cls, data): event = cls(**data) event.save() return event def save(self): """ save model """ db.session.add(self) db.session.commit() return self
from django import forms class ContactForm(forms.Form): subject = forms.CharField(max_length=300) email = forms.EmailField(required=False, label='Your e-mail addres') message = forms.CharField(widget=forms.Textarea) def clean_message(self): """ Django form system knows to look for method whose name starts with clean__ and ends with the name of a field. If any such method exists, it's called during validation. """ message = self.cleaned_data['message'] num_words = len(message.split()) if num_words < 4: raise forms.ValidationError("Not enough words!") return message
# -------------------------------------------------------------------------- # ------------ Metody Systemowe i Decyzyjne w Informatyce ---------------- # -------------------------------------------------------------------------- # Zadanie 4: Zadanie zaliczeniowe # autorzy: A. Gonczarek, J. Kaczmar, S. Zareba # 2017 # implemented K. Bochynski # -------------------------------------------------------------------------- import pickle as pkl import numpy as np TRAIN_DATA_FILE_PATH = 'train.pkl' NUMBER_OF_LABELS = 36 def load_training_data(): with open(TRAIN_DATA_FILE_PATH, 'rb') as f: return pkl.load(f) def predict(x): """ Funkcja pobiera macierz przykladow zapisanych w macierzy X o wymiarach NxD i zwraca wektor y o wymiarach Nx1, gdzie kazdy element jest z zakresu {0, ..., 35} i oznacza znak rozpoznany na danym przykladzie. :param x: macierz o wymiarach NxD :return: wektor o wymiarach Nx1 """ x_train, y_train = load_training_data() cut = 5000 x_train = prepare_x(x_train[:cut]) x = prepare_x(x) y_train = y_train[:cut] # cutting y train set distance = hamming_distance(x > 0, x_train > 0) index_min_distances = np.argmin(distance, axis=1) # N x 1 return y_train[index_min_distances] pass def hamming_distance(x_array, x_train_array): return np.absolute(x_array.dot(x_train_array.T - 1) + (x_array - 1).dot(x_train_array.T)) pass def prepare_x(x_to_prepare): N = x_to_prepare.shape[0] x = np.reshape(x_to_prepare, (N, 56, 56)) x = np.reshape(x[:, 3:-3, 3:-3], (N, 2500)) return x
import os # If you want to run the program on the main Mac terminal import sys # Same import time import spotipy import Lyrics_Player import spotipy.util as util from json.decoder import JSONDecodeError # Convenient to read Spotify's object returns # Uncomment if you want to run the program from the Mac terminal: # username = sys.argv[1] # scope = 'user-read-private user-read-playback-state user-modify-playback-state playlist-read-private ' \ # 'playlist-read-collaborative user-top-read user-read-recently-played' # # Try to assign to "token" the permission token:(Basically the first login) # try: # token = util.prompt_for_user_token(username, # scope, # client_id="Your spotify's client id", # client_secret="Your spotify's secret client id", # redirect_uri='https://www.google.com/') # except: # # os.remove(f".cache-{username}") # token = util.prompt_for_user_token(username, scope) # Comment if running the program from Mac terminal: username = 'your spotify user name' # # the specific permissions requests: scope = 'user-read-private user-read-playback-state user-modify-playback-state playlist-read-private ' \ 'playlist-read-collaborative user-top-read user-read-recently-played' token = util.prompt_for_user_token(username, scope, client_id= "Your spotify's client id", client_secret= "Your sporify's secret client id", redirect_uri='https://www.google.com/') # Create the Spotify object (this The object we are going to work with for everything in thr library): spotify_object = spotipy.Spotify(auth=token) # Evert time we want to extract new data from the Spotify's API we need to use: # spotify_object.'the_builtin_module'('parameters'). examples fpr all the modules can be found at # https://spotipy.readthedocs.io/en/2.12.0/#examples # The logged-in user information: user = spotify_object.current_user() # print(json.dumps(user, sort_keys=True, indent=4)) # Used to print the information in a more readable way display_name = user['display_name'] # ********************************************************************************************************************* def play(song_uri=None, artist_name=None, song_name=None, list_of_songs_uri=None): # # Get current dives info: (includes volume level, kind of device, id and etc...) """ :param list_of_songs_uri: :param song_uri: song's uri. This is how Spotify reads each track/album/artist... :param artist_name: :param song_name: :return: start playing the track """ devices = spotify_object.devices() # print(json.dumps(devices, sort_keys=True, indent=4)) try: device_ID = devices['devices'][0]['id'] except IndexError: print() print('Your Spotify app is not open. Open it and try again') print() else: if song_uri is not None: # I.E. it is a specific song and not a list of songs # if song_num_input is not None: play_track_URI_lst = list([]) # For some reason, in order to play a song you can't the string of the URI. # Even if you have only one song, i.e. one URI, you must pass it in a list. # Playing a track from the list of all songs with index numbers: play_track_URI_lst.append(song_uri) print() print(f'Playing {song_name} by {artist_name}, Enjoy :)') print() spotify_object.start_playback(device_ID, None, play_track_URI_lst) else: spotify_object.start_playback(device_ID, None, list_of_songs_uri) def get_artist_Albums_and_tracks(artist_name): # All the atrist's Spotify information: # global found_song, user_song_num search_results = spotify_object.search(artist_name, 1, 0, 'artist') # print(json.dumps(search_results, sort_keys=True, indent=4)) # Get search results: # search(q, limit=10, offset=0, type='track', market=None) # q - the search query (see how to write a query in the # limit - the number of items to return (min = 1, default = 10, max = 50) we would usually want only one # (the most popular one). # offset - the index of the first item to return. # type - the type of item to return. One of ‘artist’, ‘album’, # ‘track’, ‘playlist’, ‘show’, or ‘episode’ # Artist details: try: artist = search_results['artists']['items'][0] except IndexError: print() print("Artist name does not exist. Please make sure there are no typos and try again") print() else: # The program continues only if the artist name exists in Spotify artistID = artist['id'] # Get Albums data: albums_data = spotify_object.artist_albums(artistID) # print(json.dumps(albums_data, sort_keys=True, indent=4)) albums_data = albums_data['items'] song_number_increment = 0 # The song index in the list list_of_songs_names = [] list_of_tracks_URI = [] # The list of each of the songs's URI for future play() function song usage. # The Loop that goes through each of the artist's albums: for item in albums_data: print("Album: " + item['name']) album_ID = item['id'] print() # Get tracks of each album: tracks_search_results = spotify_object.album_tracks(album_ID) tracks_search_results = tracks_search_results['items'] for song_details in tracks_search_results: print(f'{song_number_increment}: {song_details["name"]}') # print(json.dumps(song_details, sort_keys=True, indent=4)) song_number_increment += 1 list_of_songs_names.append(song_details['name']) list_of_tracks_URI.append(song_details['uri']) print() while True: # 1 Main menu print() print('*' * 80) print('What would you like to do now?') print() print('1 - Get song lyrics') print() print('2 - Play song') print() print('3 - Play song with lyrics') print() print('0 - exit') print() user_num_choice = input("Enter your choice: ") print() print('*' * 80) # Exit: if user_num_choice == '0': print('*' * 80) break # Get Lyrics elif user_num_choice == '1': print( "* IMPORTANT! * I suggest to give it a few tries as the lyrics database is sometimes inconsistent") print() user_song_pick = input("Enter a song name or number: ") try: user_song_num = int(user_song_pick) except ValueError: # It is not a number. (It is a song name) for song_name in list_of_songs_names: # Taking the possibility that the user didn't entered the full name of the song. found_song = False if user_song_pick in song_name: Lyrics_Player.get_lyrics(artist_name, song_name) found_song = True break if not found_song: print() print("invalid song name. Please make sure the song name is correct and try again") print() time.sleep(2) else: # if it is a number try: # check if the index is valid lyrics_for_song_name = list_of_songs_names[ user_song_num] # Choosing the correct song by the number-song index except IndexError: # If the number is indeed out of range print() print("The song number is invalid. Please make sure that the song number exist") print() else: # if the number is in range Lyrics_Player.get_lyrics(artist_name, lyrics_for_song_name) # Play song elif user_num_choice == '2': user_song_pick = input("Enter a song name or number: ") try: user_song_num = int(user_song_pick) # checks whether it is a number or a song name except ValueError: # If it is a song name: for song_name in list_of_songs_names: # Taking the possibility that the user didn't entered the # full name of the song. found_song = False if user_song_pick in song_name: song_index = list_of_songs_names.index(song_name) song_uri = list_of_tracks_URI[song_index] play(song_uri, artist_name, song_name) found_song = True break if not found_song: print() print("invalid song name. Please make sure the song name is correct and try again") print() time.sleep(2) else: # If it is actually a number. try: # Check whether the number is in valid index song_name = list_of_songs_names[user_song_num] except IndexError: print() print("The song number is invalid. Please make sure that the song number exist") print() else: song_index = user_song_num song_uri = list_of_tracks_URI[song_index] play(song_uri, artist_name, song_name) # Play song with lyrics elif user_num_choice == '3': user_song_pick = input("Enter a song name or number: ") try: user_song_num = int(user_song_pick) except ValueError: # It is not a number i.e it is a song name: for song_name in list_of_songs_names: # Taking the possibility that the user didn't entered the full name of the song. found_song = False if user_song_pick in song_name: # even if it is only a partial name song_index = list_of_songs_names.index(song_name) song_uri = list_of_tracks_URI[song_index] print() Lyrics_Player.get_lyrics(artist_name, song_name) time.sleep(1) play(song_uri, artist_name, song_name) time.sleep(5) found_song = True break if not found_song: print() print("invalid song name. Please make sure the song name is correct and try again") print() time.sleep(2) else: # It is a number: try: # Check if the number is valid: song_name = list_of_songs_names[user_song_num] except IndexError: print() print("The song number is invalid. Please make sure that the song number exist") print() else: song_index = user_song_num song_uri = list_of_tracks_URI[song_index] print('Just a few seconds...') print() Lyrics_Player.get_lyrics(artist_name, song_name) time.sleep(1) play(song_uri, artist_name, song_name) time.sleep(5) else: print() print("Number entered is not valid. Please enter a valid number") print() def playlists(): user_playlists = spotify_object.current_user_playlists() user_playlists = user_playlists['items'] playlist_numbering = 0 playlists_id_list = [] playlists_names_list = [] for playlist in user_playlists: print(f'{playlist_numbering} - {playlist["name"]}') playlist_numbering += 1 playlists_names_list.append(playlist['name']) playlists_id_list.append(playlist['id']) while True: print() print('*' * 80) print("What would you like to do now?") print() print("1 - Show and play all playlist's tracks") print() print('0 - exit') print() print('*' * 80) user_num_choice = input("Enter your choice: ") print() # Exit: if user_num_choice == "0": break # One playlist's tracks elif user_num_choice == "1": user_playlist_num = input("Enter playlist number: ") try: # checks if the number is a valid index chosen_playlist_name = playlists_names_list[int(user_playlist_num)] except IndexError: print() print("Playlist number is not valid. Please chose a valid playlist number and try again.") print() else: chosen_playlist_id = playlists_id_list[int(user_playlist_num)] chosen_playlist_tracks = spotify_object.playlist_tracks(chosen_playlist_id) chosen_playlist_tracks = chosen_playlist_tracks['items'] track_numerating = 0 # print(json.dumps(chosen_playlist_tracks[0]['track'], sort_keys=True, indent=4)) print(f"{chosen_playlist_name} tracks:") print() playlist_track_name_list = [] playlist_track_uri_list = [] playlist_artist_name_list = [] for track_info in chosen_playlist_tracks: print(f"{track_numerating} - {track_info['track']['name']}") playlist_track_name_list.append(track_info['track']['name']) playlist_track_uri_list.append(track_info['track']['uri']) playlist_artist_name_list.append(track_info['track']['album']['artists'][0]['name']) track_numerating += 1 while True: print() print('*' * 80) print("what would you like to do now?") print() print("1 - Play a specific song with lyrics") print() print("2 - Play all current playlist's tracks") print() print('0 - exit') print() print('*' * 80) user_num_choice = input("Enter your choice: ") print() if user_num_choice == '0': break # Play a specific song with lyrics: elif user_num_choice == '1': user_song_num = input("Enter song number: ") song_name = playlist_track_name_list[int(user_song_num)] print(f'song name: {song_name}') song_uri = playlist_track_uri_list[int(user_song_num)] song_artist = playlist_artist_name_list[int(user_song_num)] print(f'song artist: {song_artist}') Lyrics_Player.get_lyrics(song_artist, song_name) time.sleep(1) play(song_uri, song_artist, song_name) # Play all playlist with lyrics elif user_num_choice == '2': # Lyrics_Player.get_lyrics(song_artist, song_name) play(None, None, None, playlist_track_uri_list) # Once I insert a list of uri's it play them all # One by one. we can then use the next() previous and etc. functions. time.sleep(0.5) current_song_lyrics() music_player_interface() else: print() print("invalid number entered. Please try again") print() else: print() print("invalid number entered. Please try again") print() def user_top(): while True: print("Your top tracks: ") print() print("1 - recently") print() print("2 - all-time") print() print('0 - exit') print() user_answer = input("Enter your choice: ") print() if user_answer == '0': break if user_answer == '1' or 'recently': user_top_tracks = spotify_object.current_user_top_tracks(limit=20, offset=0, time_range='short_term') user_top_tracks = user_top_tracks['items'] user_top_tracks_names_list = [] user_top_tracks_uri_list = [] user_top_tracks_artist_name_list = [] top_track_numerating = 0 # print(json.dumps(user_top_tracks, sort_keys=True, indent=4)) for track in user_top_tracks: print(f"{top_track_numerating} - {track['name']}") user_top_tracks_names_list.append(track['name']) user_top_tracks_uri_list.append(track['uri']) user_top_tracks_artist_name_list.append(track['album']['artists'][0]['name']) top_track_numerating += 1 play(None, None, None, user_top_tracks_uri_list) elif user_answer == '2' or 'all-time' or 'all time': user_top_tracks_long = spotify_object.current_user_top_tracks(limit=20, offset=0, time_range='long_term') user_top_tracks_long = user_top_tracks_long['items'] user_top_tracks_names_list_long = [] user_top_tracks_uri_list_long = [] user_top_tracks_artist_name_list_long = [] top_track_numerating = 0 # print(json.dumps(user_top_tracks, sort_keys=True, indent=4)) for track in user_top_tracks_long: print(f"{top_track_numerating} - {track['name']}") user_top_tracks_names_list_long.append(track['name']) user_top_tracks_uri_list_long.append(track['uri']) user_top_tracks_artist_name_list_long.append(track['album']['artists'][0]['name']) top_track_numerating += 1 play(None, None, None, user_top_tracks_uri_list_long) else: print() print("Number is invalid. Please enter a valid number and try again.") print() # for song in user_recently_played: # print(song['track']['name']) # Main function: def main(): # Main menu while True: print() print(f"Welcome to Spotify Lyrics {display_name} !") print() print("What would you like to do?") print() print("1 - Enter the name of your favorite artist to see the best of their work and more!") print() print('2 - Get Only lyrics') print() print('3 - Play your favorite playlists with lyrics! ') print() print('4 - Current playing song lyrics') # print('4 - See and play your top listed tracks ') print() print('0 - exit') print() user_num_choice = input("Enter your choice: ") print() # Exit: if user_num_choice == '0': print("Bye Bye and thank you ") print() break # Favorite artist options:: elif user_num_choice == '1': while True: print() print('What would you like to do?') print() print("1 - See all artist's albums and songs") print() print("2- see your artist's top songs") print() print('0 - exit') print() user_input = input("Enter your choice: ") print() # Break: if user_input == '0': break # See all artist's albums and songs: elif user_input == '1': artist_name = input("Enter your favorite artist name: ") get_artist_Albums_and_tracks(artist_name) # see your artist's top songs: elif user_input == '2': artist_name = input("Enter your favorite artist name: ") search_results = spotify_object.search(artist_name, 1, 0, 'artist') try: artist = search_results['artists']['items'][0] except IndexError: print() print("Artist name does not exist. Please make sure there are no typos and try again") print() else: # The program continues only if the artist name exists in Spotify artistID = artist['id'] artist_top_songs = spotify_object.artist_top_tracks(artistID)['tracks'] # print(json.dumps(artist_top_songs, sort_keys=True, indent=4)) song_numerating = 0 artist_top_songs_name_list = [] artist_top_songs_uri_list = [] for song in artist_top_songs: print(f"{song_numerating} - {song['name']}") artist_top_songs_name_list.append(song['name']) artist_top_songs_uri_list.append(song['uri']) song_numerating += 1 while True: print() print("What would you like to do now?") print() print("1 - Play a specific song with lyrics") print() print("2 - Play all current top artist's tracks with lyrics") print() print('0 - exit') print() print('*' * 80) user_num_choice = input("Enter your choice: ") print() if user_num_choice == '0': break # Play a specific song with lyrics: elif user_num_choice == '1': user_song_num = input("Enter song number: ") song_name = artist_top_songs_name_list[int(user_song_num)] song_uri = artist_top_songs_uri_list[int(user_song_num)] Lyrics_Player.get_lyrics(artist_name, song_name) time.sleep(1) play(song_uri, artist_name, song_name) # Plat all artist's top tracks elif user_num_choice == '2': play(None, None, None, artist_top_songs_uri_list) # Once I insert a list of uri's it play them all # One by one. we can then use the next() previous and etc. functions. time.sleep(0.5) current_song_lyrics() music_player_interface() else: print() print("invalid number entered. Please try again") print() else: print() print("invalid number entered. Please try again") print() # Only lyrics: elif user_num_choice == '2': artist_name = input("Enter artist name: ") song_name = input("Enter song name: ") Lyrics_Player.get_lyrics(artist_name, song_name) # Playlists: elif user_num_choice == '3': playlists() # Top of user: Doesn't work well. User top-listened tracks are far from being accurate # elif user_num_choice == '4': # user_top() elif user_num_choice == '4': try: current_song_lyrics() except: print("Error fetching the current song lyrics. Pleas make sure Spotify is open and " "try again. ") else: pass # Invalid number: else: print() print("invalid number entered. Please try again") print() def current_song_lyrics(): song_name = spotify_object.current_user_playing_track()['item']['name'] artist_name = spotify_object.current_user_playing_track()['item']['artists'][0]['name'] print(f"Playing {song_name} by {artist_name}") # print(json.dumps(artist_name, sort_keys=True, indent=4)) Lyrics_Player.get_lyrics(artist_name, song_name) def music_player_interface(): while True: print() print('1 - next track') print() print('2 - previous track') print() print('3 - pause track') print() print('4 - resume track') print() print('5 - Show current song lyrics') print() print('0 - exit') print() user_input = input("Enter your choice: ") print() if user_input == '0': spotify_object.pause_playback() break # Next track: elif user_input == '1': try: spotify_object.next_track() time.sleep(0.3) current_song_lyrics() except: print("Cant switch songs while in Pause") # Previous track: elif user_input == '2': try: spotify_object.previous_track() current_song_lyrics() except: print("Cant switch songs while in Pause") # Pause: elif user_input == '3': try: spotify_object.pause_playback() except: print("Cant pause while already in pause") # Resume: elif user_input == '4': try: spotify_object.start_playback() current_song_lyrics() except: print("Cant resume while already playing") # Current song lyrics elif user_input == '5': current_song_lyrics() else: print() print('Invalid choice number. Please try again') print() if __name__ == '__main__': main()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('dancers', '0007_auto_20180119_1522'), ] operations = [ migrations.AlterField( model_name='club', name='name', field=models.CharField(max_length=200, verbose_name='\u041a\u043b\u0443\u0431'), ), migrations.AlterField( model_name='dancer', name='reg', field=models.IntegerField(default=75, verbose_name='\u0420\u0435\u0433\u0438\u043e\u043d'), ), migrations.AlterField( model_name='rank', name='name', field=models.CharField(max_length=50, verbose_name='\u0420\u0430\u0437\u0440\u044f\u0434'), ), ]
#Area of a shape - Algorithm #1. Great User #2. Present options #3. Evaluate options #4. Collect values based on selected option #5. Evaluate values #6. Calculate #7. Throw result #1. Great User print ("Hello, Welcome to our page") #2. Present options positive_response=["Y","Yes","yes","y"] response=input ("Do you want to perform a calculation") shapes=["triangle", "circle", "rectangle"] print (" select any of the", shapes) #3. Evaluate options while response in positive_response: selectedshape=input("please select any shape = ") while selectedshape.lower() not in shapes: print ("0nly select from the list") selectedshape=input("Please select a shape = ") #4. Collect values based on selected option if selectedshape=="circle": radius=input("please enter radius = ") #5. Evaluate values while not radius.isdigit(): radius=input("please enter radius = ") #6 Calculate and throw result print("the area of the circle with radius",radius,"is",(float(radius)**2)*3.142) #7 Repeat step #4, #5 & #6 for other options elif selectedshape=="triangle": base=input("please enter base = ") height=input ("please enter height = ") while not base.isdigit() and height.isdigit: base=input("please enter base = ") height=input("please enter height = ") print("the area of the triangle with base & height",base,"&",height,"=",(float(base)*float(height))/2) elif selectedshape=="rectangle": width=input("please enter width = ") length=input ("please enter length = ") while not width.isdigit() and length.isdigit: width=input("please enter width = ") length=input("please enter length = ") print("the area of the triangle with base & height",width,"&",length,"=",float(width)*float(length)) response=input ("Do you want to perform a calculation") else: print("we are happy to miss you") #8 Prompt user for another option
""" Defines the blueprint for the users """ from flask import Blueprint from flask_restful import Api from resources import FilmResource from resources import FilmsResource FILM_BLUEPRINT = Blueprint("film", __name__) Api(FILM_BLUEPRINT).add_resource( FilmResource, "/film/<string:title>/<string:author>" ) FILMS_BLUEPRINT = Blueprint("films", __name__) Api(FILMS_BLUEPRINT).add_resource( FilmsResource, "/films/<string:type>/<string:author>" )
""" Combine multiple saved pickled Pandas data frames into a single pickled Pandas data frame. """ import pickle import optparse import json import pandas as pd # Parse the input arguments. parser = optparse.OptionParser() parser.add_option('-I', '--input', type='str', dest='input_filename', help='Input file specifying the data to collect and analyze.') parser.add_option('-N', '--nfiles', type='int', dest='num_files', help='Number of files to combine.') (options, args) = parser.parse_args() # The input filename. input_filename = options.input_filename num_files = options.num_files # Read the input file. input_file = open(input_filename, 'r') input_args = json.load(input_file) input_file.close() # The combined output filename to save the collected data to. combined_output_filename = '{}.p'.format(input_args['output_filename']) list_of_dfs = [] for ind_file in range(num_files): output_filename = '{}_{}.p'.format(input_args['output_filename'], ind_file) df_file = pd.read_pickle(output_filename) list_of_dfs.append(df_file) # Create a combined pandas DataFrame. df = pd.concat(list_of_dfs) # Pickle the pandas DataFrame. df.to_pickle(combined_output_filename)
import glob import re import threading import tensorflow as tf from tensorflow.contrib import ffmpeg from wavenet import WaveNet BATCH_SIZE = 1 CHANNELS = 256 DATA_DIRECTORY='./VCTK-Corpus' def create_vctk_inputs(directory): # TODO make sure that text is matched correctly to the samples # We retrieve each audio sample, the corresponding text, and the speaker id audio_filenames = glob.glob(directory + '/wav48/*/*.wav') # Mysteriously, speaker 315 doesn't have any text files associated with them audio_filenames = list(filter(lambda x: not '315' in x, audio_filenames)) text_filenames = [f.replace('wav48', 'txt').replace('.wav', '.txt') for f in audio_filenames] # Find the speaker ID and map it into a range [0, ..., num_speakers] dirs = glob.glob(directory + '/txt/p*') SPEAKER_RE = r'p([0-9]+)' ids = [re.findall(SPEAKER_RE, d)[0] for d in dirs] speaker_map = {speaker_id: idx for idx, speaker_id in enumerate(ids)} speaker = [speaker_map[re.findall(SPEAKER_RE, p)[0]] for p in text_filenames] audio_files = tf.train.string_input_producer(audio_filenames) txt_files = tf.train.string_input_producer(text_filenames) speaker_values = tf.train.input_producer(speaker) reader_a = tf.WholeFileReader() _, audio_values = reader_a.read(audio_files) reader_b = tf.WholeFileReader() _, txt_values = reader_a.read(txt_files) waveform = ffmpeg.decode_audio( audio_values, file_format='wav', # Downsample to 16khz samples_per_second=1<<13, # Corpus uses mono channel_count=1) return waveform, txt_values, speaker_values.dequeue() def main(): sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) # Load raw waveform from VCTK corpus with tf.name_scope('create_inputs'): audio, txt, speaker = create_vctk_inputs(DATA_DIRECTORY) queue = tf.PaddingFIFOQueue( 256, ["float32", "string", "int32"], shapes=[(None, 1), (), ()]) enqueue = queue.enqueue([audio, txt, speaker]) audio_batch, _, _ = queue.dequeue_many(BATCH_SIZE) # Create network dilations = [1, 2, 4, 8, 16, 32] net = WaveNet(BATCH_SIZE, CHANNELS, dilations) loss = net.loss(audio_batch) optimizer = tf.train.AdamOptimizer(learning_rate=0.10) trainable = tf.trainable_variables() optim = optimizer.minimize(loss, var_list=trainable) # Set up logging for TensorBoard writer = tf.train.SummaryWriter('./logdir') writer.add_graph(tf.get_default_graph()) summaries = tf.merge_all_summaries() init = tf.initialize_all_variables() sess.run(init) # Enqueue examples in the background coord = tf.train.Coordinator() qr = tf.train.QueueRunner(queue, [enqueue]) qr.create_threads(sess, coord=coord, start=True) threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(1000): summary, loss_value, _ = sess.run([summaries, loss, optim]) writer.add_summary(summary, i) print('Loss: {}'.format(loss_value)) coord.request_stop() coord.join(threads) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """ Created on Wed Jun 17 20:58:41 2020 @author: sdavis """ import h5py import csv import numpy as np import os import gdal, osr import matplotlib.pyplot as plt import sys from math import floor import time import warnings import pandas as pd from multiprocessing import Pool import functools import seaborn as sns from functools import partial def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg): cols = array.shape[1] rows = array.shape[0] originX = rasterOrigin[0] originY = rasterOrigin[1] driver = gdal.GetDriverByName('GTiff') outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32) outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) outband = outRaster.GetRasterBand(1) outband.WriteArray(array) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromEPSG(epsg) outRaster.SetProjection(outRasterSRS.ExportToWkt()) outband.FlushCache() def stack_images(tif_files): #takes a list of tif files and stacks them all up #Get extents of all images min_x,min_y,max_x,max_y,bands = get_map_extents(tif_files) #Creates an empty image full_extent = np.zeros((int(np.ceil(max_y-min_y)),int(np.ceil(max_x-min_x)),int(bands)),dtype=np.float) file_counter=0 for tif_file in tif_files: data_layer, metadata = raster2array(tif_file) ul_x = metadata['ext_dict']['xMin'] lr_x = metadata['ext_dict']['xMax'] ul_y = metadata['ext_dict']['yMax'] lr_y = metadata['ext_dict']['yMin'] bands = metadata['bands'] rows = int(ul_y-lr_y); columns = int(lr_x-ul_x); start_index_x = int(np.ceil(abs(max_y - ul_y))); start_index_y = int(np.ceil(abs(min_x - ul_x))); full_extent[start_index_x:start_index_x+rows,start_index_y:start_index_y+columns,file_counter:file_counter+bands] = data_layer #np.reshape(data_layer, (data_layer_shape[0], data_layer_shape[1])) file_counter += bands return full_extent, min_x,min_y,max_x,max_y def raster2array(geotif_file): metadata = {} dataset = gdal.Open(geotif_file) metadata['array_rows'] = dataset.RasterYSize metadata['array_cols'] = dataset.RasterXSize metadata['bands'] = dataset.RasterCount metadata['driver'] = dataset.GetDriver().LongName metadata['projection'] = dataset.GetProjection() metadata['geotransform'] = dataset.GetGeoTransform() mapinfo = dataset.GetGeoTransform() metadata['pixelWidth'] = mapinfo[1] metadata['pixelHeight'] = mapinfo[5] metadata['ext_dict'] = {} metadata['ext_dict']['xMin'] = mapinfo[0] metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1] metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5] metadata['ext_dict']['yMax'] = mapinfo[3] metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'], metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax']) raster = dataset.GetRasterBand(1) metadata['noDataValue'] = raster.GetNoDataValue() metadata['scaleFactor'] = raster.GetScale() # band statistics metadata['bandstats'] = {} #make a nested dictionary to store band stats in same stats = raster.GetStatistics(True,True) metadata['bandstats']['min'] = round(stats[0],2) metadata['bandstats']['max'] = round(stats[1],2) metadata['bandstats']['mean'] = round(stats[2],2) metadata['bandstats']['stdev'] = round(stats[3],2) array = np.empty((metadata['array_rows'],metadata['array_cols'],metadata['bands']), dtype=np.float) for band_counter in range(1,metadata['bands']+1): array[:,:,band_counter-1] = dataset.GetRasterBand(band_counter).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(np.float) array[array==int(metadata['noDataValue'])]=np.nan array = array/metadata['scaleFactor'] return array, metadata def get_map_extents(files): max_x=0; min_x=1000000; max_y=0; min_y=9000000; band_counter = 0 filename, file_extension = os.path.splitext(files[0]) for file in files: if file_extension == '.h5': ul_x,ul_y,lr_x,lr_y = get_image_extent_h5(file) elif file_extension == '.tif': array, metadata = raster2array(file) ul_x = metadata['ext_dict']['xMin'] lr_x = metadata['ext_dict']['xMax'] ul_y = metadata['ext_dict']['yMax'] lr_y = metadata['ext_dict']['yMin'] num_bands = metadata['bands'] if ul_x < min_x: min_x = ul_x if ul_y > max_y: max_y = ul_y if lr_x > max_x: max_x = lr_x if lr_y < min_y: min_y = lr_y band_counter += num_bands return min_x,min_y,max_x,max_y,band_counter def matchingFiles(coverage_directory, data_product_directory, data_product_file): tif_files_list = [] CoverageFiles = os.listdir(coverage_directory) CoverageTifFiles = [i for i in CoverageFiles if i.endswith('.tif')] for CoverageTifFile in CoverageTifFiles: coverage_tif_file_split = CoverageTifFile.split('_') site_coverage = coverage_tif_file_split[1] for root, dirs, files in os.walk(data_product_directory): for fname in files: root = root.replace('\\', '/') root_split = root.split('/') site_data_product = root_split[4].split('_')[1] if site_coverage == site_data_product and fname == data_product_file: tif_files_list.append([os.path.join(coverage_directory,CoverageTifFile), os.path.join(root,fname)]) return tif_files_list def percent_covered(matchingFiles): firstFile= matchingFiles[0].split('/')[3] site_name = firstFile.split('_')[1] print("Site name:", site_name) full_extent, min_x,min_y,max_x,max_y = stack_images(matchingFiles) full_extent[np.isnan(full_extent)] = 0 reference_area = np.zeros_like(full_extent[:,:,0]) reference_area[full_extent[:,:,1]!=0]=1 np.sum(reference_area) reference_coverage_area = np.multiply(reference_area, full_extent[:,:,0]) AreaCoveredDataProduct_Site = np.sum(reference_coverage_area) / np.sum(full_extent[:,:,0]) return site_name, AreaCoveredDataProduct_Site def weather_percent_covered(matchingFiles): firstFile= matchingFiles[0].split('/')[3] siteName = firstFile.split('_')[1] full_extent, min_x,min_y,max_x,max_y = stack_images(matchingFiles) full_extent[np.isnan(full_extent)] = 0 referenceAreaRed = np.zeros_like(full_extent[:,:,0]) referenceAreaYellow = np.zeros_like(full_extent[:,:,0]) referenceAreaGreen = np.zeros_like(full_extent[:,:,0]) referenceAreaRed[full_extent[:,:,2]==23]=1 referenceAreaYellow[full_extent[:,:,3]==6]=1 referenceAreaGreen[full_extent[:,:,1]==23]=1 np.sum(referenceAreaRed) np.sum(referenceAreaYellow) np.sum(referenceAreaGreen) referenceAreaRed = np.multiply(referenceAreaRed, full_extent[:,:,0]) referenceAreaYellow = np.multiply(referenceAreaYellow, full_extent[:,:,0]) referenceAreaGreen = np.multiply(referenceAreaGreen, full_extent[:,:,0]) pixelsRed = np.sum(referenceAreaRed) pixelsYellow = np.sum(referenceAreaYellow) pixelsGreen = np.sum(referenceAreaGreen) pixelsSummed = np.sum([pixelsYellow,pixelsGreen,pixelsRed], dtype='float64') totalPixels = np.sum(full_extent[:,:,0]) pixelsMissed = (totalPixels - pixelsSummed) areaGreen = np.sum(referenceAreaGreen) / np.sum(full_extent[:,:,0]) areaRed = np.sum(referenceAreaRed) / np.sum(full_extent[:,:,0]) areaYellow = np.sum(referenceAreaYellow) / np.sum(full_extent[:,:,0]) return [siteName, areaGreen, areaRed, areaYellow, pixelsGreen, pixelsRed, pixelsYellow, totalPixels, pixelsSummed, pixelsMissed] def lidarUncertainty(matchingFiles, threshold): firstFile = matchingFiles[0] firstPath = firstFile.split('/')[3] siteName = firstPath.split('_')[1] full_extent, min_x,min_y,max_x,max_y = stack_images(matchingFiles) full_extent[np.isnan(full_extent)] = 0 uncertaintyReferenceArea = np.zeros_like(full_extent[:,:,0]) uncertaintyReferenceArea[full_extent[:,:,1]<threshold]=1 thresholdLayer = np.multiply(uncertaintyReferenceArea, full_extent[:,:,0]) sumThreshLayer = np.sum(thresholdLayer) sumFullExtent = np.sum(full_extent[:,:,0]) percentBelow = sumThreshLayer/sumFullExtent return [siteName, percentBelow, sumThreshLayer, sumFullExtent] if __name__ == '__main__': print('Starting program') print('Before the P1FB pool') weatherCoverageFilesP1FB = matchingFiles('D:/Coverage/P1FB/', 'D:/2019/FullSite/', 'Weather_Quality_Indicator.tif') pool = Pool(processes = 30) weatherOutputsP1FB = pool.map(weather_percent_covered, weatherCoverageFilesP1FB) print('Done') weatherCoverageFilesTOS = matchingFiles('D:/Coverage/TOS/', 'D:/2019/FullSite/', 'Weather_Quality_Indicator.tif') print("Before the TOS pool") pool = Pool(processes = 30) weatherOutputsTOS = pool.map(weather_percent_covered, weatherCoverageFilesTOS) print("Done") weatherCoverageFilesAirsheds = matchingFiles('D:/Coverage/Airsheds/', 'D:/2019/FullSite/', 'Weather_Quality_Indicator.tif') print("Before the Airsheds pool") pool = Pool(processes =30) weatherOutputsAirsheds = pool.map(weather_percent_covered, weatherCoverageFilesAirsheds) print("Done") #P1FB lists for df siteP1FB = [] areaGreenP1FB = [] areaRedP1FB = [] areaYellowP1FB = [] boundaryP1FB = [] yearP1FB = [] for lst in weatherOutputsP1FB: siteP1FB.append(lst[0]) areaGreenP1FB.append(lst[1]) areaRedP1FB.append(lst[2]) areaYellowP1FB.append(lst[3]) boundaryP1FB.append('P1FB') yearP1FB.append(2019) #TOS lists for df siteTOS = [] areaGreenTOS = [] areaRedTOS = [] areaYellowTOS = [] boundaryTOS = [] yearTOS = [] for lst in weatherOutputsTOS: siteTOS.append(lst[0]) areaGreenTOS.append(lst[1]) areaRedTOS.append(lst[2]) areaYellowTOS.append(lst[3]) boundaryTOS.append('TOS') yearTOS.append(2019) #Airsheds list for df siteAirsheds = [] areaGreenAirsheds = [] areaRedAirsheds = [] areaYellowAirsheds = [] boundaryAirsheds = [] yearAirsheds = [] for lst in weatherOutputsAirsheds: siteAirsheds.append(lst[0]) areaGreenAirsheds.append(lst[1]) areaRedAirsheds.append(lst[2]) areaYellowAirsheds.append(lst[3]) boundaryAirsheds.append('Airsheds') yearAirsheds.append(2019) dfP1FB = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']) dfP1FB['Sites'] = siteP1FB dfP1FB['Green Percent Covered'] = areaGreenP1FB dfP1FB['Red Percent Covered'] = areaRedP1FB dfP1FB['Yellow Percent Covered'] = areaYellowP1FB dfP1FB['Year'] = yearP1FB dfP1FB['Boundary'] = boundaryP1FB dfP1FB.set_index('Sites', inplace=True, drop=True) dfTOS = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']) dfTOS['Sites'] = siteTOS dfTOS['Green Percent Covered'] = areaGreenTOS dfTOS['Red Percent Covered'] = areaRedTOS dfTOS['Yellow Percent Covered'] = areaYellowTOS dfTOS['Year'] = yearTOS dfTOS['Boundary'] = boundaryTOS dfTOS.set_index('Sites', inplace=True, drop=True) dfAirsheds = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']) dfAirsheds['Sites'] = siteAirsheds dfAirsheds['Green Percent Covered'] = areaGreenAirsheds dfAirsheds['Red Percent Covered'] = areaRedAirsheds dfAirsheds['Yellow Percent Covered'] = areaYellowAirsheds dfAirsheds['Boundary'] = boundaryAirsheds dfAirsheds['Year'] = yearAirsheds dfAirsheds.set_index('Sites', inplace=True, drop=True) dfWeather = pd.concat([dfP1FB, dfTOS, dfAirsheds]) fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(20,10)) for i, (name, group) in enumerate(dfWeather.groupby('Boundary')): axes[i].set_title(name) group.plot(kind="bar", y=['Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered'], ax=axes[i], legend=True, stacked=True, color=['g', 'r', 'y']) axes[i].set_xlabel("") axes[i].set_ylabel("Percent Covered") axes[i].set_xlabel("Sites") plt.tight_layout() plotP1FB = dfP1FB.loc[:, ['Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']].plot(kind='bar', stacked=True, color=['g', 'r', 'y']) plt.title('Weather Percent Covered for P1FB Boundary') plotAirsheds = dfAirsheds.loc[:, ['Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']].plot(kind='bar', stacked=True, color=['g', 'r', 'y']) plt.title('Weather Percent Covered for Airsheds Boundary') plotTOS = dfTOS.loc[:, ['Green Percent Covered', 'Red Percent Covered', 'Yellow Percent Covered']].plot(kind='bar', stacked=True, color=['g', 'r', 'y']) plt.title('Weather Percent Covered for TOS Boundary') #P1FB lists for pixel counts p1fbPixelSites = [] p1fbPixelsGreen = [] p1fbPixelsRed = [] p1fbPixelsYellow = [] p1fbPixelsTotal = [] p1fbPixelsMissed = [] p1fbPixelYear = [] p1fbBoundaryName = [] for lst in weatherOutputsP1FB: p1fbPixelSites.append(lst[0]) p1fbPixelsGreen.append(lst[4]) p1fbPixelsRed.append(lst[5]) p1fbPixelsYellow.append(lst[6]) p1fbPixelsMissed.append(lst[9]) p1fbPixelsTotal.append(lst[7]) p1fbPixelYear.append(2019) p1fbBoundaryName.append('P1FB') dfPixelP1FB = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Pixels', 'Red Pixels', 'Yellow Pixels', 'Total Pixels', 'Pixels Missed']) dfPixelP1FB['Sites'] = p1fbPixelSites dfPixelP1FB['Boundary'] = p1fbBoundaryName dfPixelP1FB['Year'] = p1fbPixelYear dfPixelP1FB['Green Pixels'] = p1fbPixelsGreen dfPixelP1FB['Red Pixels'] = p1fbPixelsRed dfPixelP1FB['Yellow Pixels'] = p1fbPixelsYellow dfPixelP1FB['Total Pixels'] = p1fbPixelsTotal dfPixelP1FB['Pixels Missed'] = p1fbPixelsMissed p1fbSummedPixels = dfPixelP1FB.sum(axis=0) p1fbTotalGreenPix = p1fbSummedPixels['Green Pixels'] p1fbTotalRedPix = p1fbSummedPixels['Red Pixels'] p1fbTotalYellowPix = p1fbSummedPixels['Yellow Pixels'] p1fbTotalMissedPix = p1fbSummedPixels['Pixels Missed'] p1fbTotalPixs = p1fbSummedPixels['Total Pixels'] p1fbPercentPixGreen = p1fbTotalGreenPix/p1fbTotalPixs p1fbPercentPixRed = p1fbTotalRedPix/p1fbTotalPixs p1fbPercentPixYellow = p1fbTotalYellowPix/p1fbTotalPixs p1fbPercentMissedPix = p1fbTotalMissedPix/p1fbTotalPixs p1fbTotalPixelData = {'Boundary': ['P1FB'], 'Year': [2019], 'Percent Green Pixels': p1fbPercentPixGreen, 'Percent Red Pixels': p1fbPercentPixRed, 'Percent Yellow Pixels': p1fbPercentPixYellow, 'Percent Missed Pixels': p1fbPercentMissedPix} dfTotalPixelP1FB = pd.DataFrame(p1fbTotalPixelData, columns = ['Boundary', 'Year', 'Percent Green Pixels', 'Percent Red Pixels', 'Percent Yellow Pixels', 'Percent Missed Pixels']) #TOS lists pixel counts tosPixelSites = [] tosPixelsGreen = [] tosPixelsRed = [] tosPixelsYellow = [] tosPixelsTotal = [] tosPixelsMissed = [] tosPixelYear = [] tosBoundaryName = [] for lst in weatherOutputsTOS: tosPixelSites.append(lst[0]) tosPixelsGreen.append(lst[4]) tosPixelsRed.append(lst[5]) tosPixelsYellow.append(lst[6]) tosPixelsMissed.append(lst[9]) tosPixelsTotal.append(lst[7]) tosPixelYear.append(2019) tosBoundaryName.append('TOS') dfPixelTOS = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Pixels', 'Red Pixels', 'Yellow Pixels', 'Total Pixels', 'Pixels Missed']) dfPixelTOS['Sites'] = tosPixelSites dfPixelTOS['Boundary'] = tosBoundaryName dfPixelTOS['Year'] = tosPixelYear dfPixelTOS['Green Pixels'] = tosPixelsGreen dfPixelTOS['Red Pixels'] = tosPixelsRed dfPixelTOS['Yellow Pixels'] = tosPixelsYellow dfPixelTOS['Total Pixels'] = tosPixelsTotal dfPixelTOS['Pixels Missed'] = tosPixelsMissed tosSummedPixels = dfPixelTOS.sum(axis=0) tosTotalGreenPix = tosSummedPixels['Green Pixels'] tosTotalRedPix = tosSummedPixels['Red Pixels'] tosTotalYellowPix = tosSummedPixels['Yellow Pixels'] tosTotalMissedPix = tosSummedPixels['Pixels Missed'] tosTotalPixs = tosSummedPixels['Total Pixels'] tosPercentPixGreen = tosTotalGreenPix/tosTotalPixs tosPercentPixRed = tosTotalRedPix/tosTotalPixs tosPercentPixYellow = tosTotalYellowPix/tosTotalPixs tosPercentMissedPix = tosTotalMissedPix/tosTotalPixs tosTotalPixelData = {'Boundary': ['TOS'], 'Year': [2019], 'Percent Green Pixels': tosPercentPixGreen, 'Percent Red Pixels': tosPercentPixRed, 'Percent Yellow Pixels': tosPercentPixYellow, 'Percent Missed Pixels': tosPercentMissedPix} dfTotalPixelTos = pd.DataFrame(tosTotalPixelData, columns = ['Boundary', 'Year', 'Percent Green Pixels', 'Percent Red Pixels', 'Percent Yellow Pixels', 'Percent Missed Pixels']) #Airsheds list pixel counts airshedsPixelSites = [] airshedsPixelsGreen = [] airshedsPixelsRed = [] airshedsPixelsYellow = [] airshedsPixelsTotal = [] airshedsPixelsMissed = [] airshedsPixelYear = [] airshedsBoundaryName = [] for lst in weatherOutputsAirsheds: airshedsPixelSites.append(lst[0]) airshedsPixelsGreen.append(lst[4]) airshedsPixelsRed.append(lst[5]) airshedsPixelsYellow.append(lst[6]) airshedsPixelsMissed.append(lst[9]) airshedsPixelsTotal.append(lst[7]) airshedsPixelYear.append(2019) airshedsBoundaryName.append('Airsheds') dfPixelAirsheds = pd.DataFrame(columns = ['Sites', 'Boundary', 'Year', 'Green Pixels', 'Red Pixels', 'Yellow Pixels', 'Total Pixels', 'Pixels Missed']) dfPixelAirsheds['Sites'] = airshedsPixelSites dfPixelAirsheds['Boundary'] = airshedsBoundaryName dfPixelAirsheds['Year'] = airshedsPixelYear dfPixelAirsheds['Green Pixels'] = airshedsPixelsGreen dfPixelAirsheds['Red Pixels'] = airshedsPixelsRed dfPixelAirsheds['Yellow Pixels'] = airshedsPixelsYellow dfPixelAirsheds['Total Pixels'] = airshedsPixelsTotal dfPixelAirsheds['Pixels Missed'] = airshedsPixelsMissed airshedsSummedPixels = dfPixelAirsheds.sum(axis=0) airshedsTotalGreenPix = airshedsSummedPixels['Green Pixels'] airshedsTotalRedPix = airshedsSummedPixels['Red Pixels'] airshedsTotalYellowPix = airshedsSummedPixels['Yellow Pixels'] airshedsTotalMissedPix = airshedsSummedPixels['Pixels Missed'] airshedsTotalPixs = airshedsSummedPixels['Total Pixels'] airshedsPercentPixGreen = airshedsTotalGreenPix/airshedsTotalPixs airshedsPercentPixRed = airshedsTotalRedPix/airshedsTotalPixs airshedsPercentPixYellow = airshedsTotalYellowPix/airshedsTotalPixs airshedsPercentMissedPix = airshedsTotalMissedPix/airshedsTotalPixs airshedsTotalPixelData = {'Boundary': ['Airsheds'], 'Year': [2019], 'Percent Green Pixels': airshedsPercentPixGreen, 'Percent Red Pixels': airshedsPercentPixRed, 'Percent Yellow Pixels': airshedsPercentPixYellow, 'Percent Missed Pixels': airshedsPercentMissedPix} dfTotalPixelAirsheds = pd.DataFrame(airshedsTotalPixelData, columns = ['Boundary', 'Year', 'Percent Green Pixels', 'Percent Red Pixels', 'Percent Yellow Pixels', 'Percent Missed Pixels']) dfTotalPixelBoundaries = pd.concat([dfTotalPixelAirsheds, dfTotalPixelP1FB, dfTotalPixelTos]) dfTotalPixelBoundaries.set_index('Boundary', inplace=True, drop=True) plotTotalPixels = dfTotalPixelBoundaries.loc[:, ['Percent Green Pixels', 'Percent Red Pixels', 'Percent Yellow Pixels', 'Percent Missed Pixels']].plot(kind='bar', stacked=True, color=['g', 'r', 'y', 'b']) #Lidar products #Horizontal Uncertainty P1FB print("Matching P1FB with Horizontal Uncertainty") lidarFilesHorzP1FB = matchingFiles('D:/Coverage/P1FB/', 'D:/2019/FullSite/', 'HorzUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.58) lidarOutputsHorzP1FB = pool.map(func, lidarFilesHorzP1FB) #Vertical Uncertainty P1FB print("Matching P1FB with Vertical Uncertainty") lidarFilesVertP1FB = matchingFiles('D:/Coverage/P1FB/', 'D:/2019/FullSite/', 'VertUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.15) lidarOutputsVertP1FB = pool.map(func, lidarFilesVertP1FB) #Horizontal Uncertainty TOS print("Matching TOS with Horizontal Uncertainty") lidarFilesHorzTOS = matchingFiles('D:/Coverage/TOS/', 'D:/2019/FullSite/', 'HorzUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.58) lidarOutputsHorzTOS = pool.map(func, lidarFilesHorzTOS) #Vertical Uncertainty TOS print("Matching TOS with Vertical Uncertainty") lidarFilesVertTOS = matchingFiles('D:/Coverage/TOS/', 'D:/2019/FullSite/', 'VertUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.15) lidarOutputsVertTOS = pool.map(func, lidarFilesVertTOS) #Horizontal Uncertainty Airsheds print("Matching Airsheds with Horizontal Uncertainty") lidarFilesHorzAirsheds = matchingFiles('D:/Coverage/Airsheds/', 'D:/2019/FullSite/', 'HorzUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.58) lidarOutputsHorzAirsheds = pool.map(func, lidarFilesHorzAirsheds) #Vertical Uncertainty Airsheds print("Matching Airsheds with Vertical Uncertainty") lidarFilesVertAirsheds = matchingFiles('D:/Coverage/Airsheds/', 'D:/2019/FullSite/', 'VertUncertainty.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 0.15) lidarOutputsVertAirsheds = pool.map(func, lidarFilesVertAirsheds) #P1FB Longest_Triangular_Edge_All_Points print("Running P1FB with Longest_Triangular_Edge_All_Points.tif") lidarFilesTriAllP1FB = matchingFiles('D:/Coverage/P1FB/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_All_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriAllP1FB = pool.map(func, lidarFilesTriAllP1FB) #P1FB Longest_Triangular_Edge_Ground_Points print("Running P1FB with Longest_Triangular_Edge_Ground_Points.tif") lidarFilesTriGroundP1FB = matchingFiles('D:/Coverage/P1FB/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_Ground_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriGroundP1FB = pool.map(func, lidarFilesTriGroundP1FB) #TOS Longest_Triangular_Edge_All_Points print("Running TOS with Longest_Triangular_Edge_All_Points.tif") lidarFilesTriAllTOS = matchingFiles('D:/Coverage/TOS/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_All_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriAllTOS = pool.map(func, lidarFilesTriAllTOS) #TOS Longest_Triangular_Edge_Ground_Points print("Running TOS with Longest_Triangular_Edge_Ground_Points.tif") lidarFilesTriGroundTOS = matchingFiles('D:/Coverage/TOS/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_Ground_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriGroundTOS = pool.map(func, lidarFilesTriGroundTOS) #Airsheds Longest_Triangular_Edge_All_Points print("Running Airsheds with Longest_Triangular_Edge_All_Points.tif") lidarFilesTriAllAirsheds = matchingFiles('D:/Coverage/Airsheds/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_All_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriAllAirsheds = pool.map(func, lidarFilesTriAllAirsheds) #Airsheds Longest_Triangular_Edge_Ground_Points print("Running Airsheds with Longest_Triangular_Edge_Ground_Points.tif") lidarFilesTriGroundAirsheds = matchingFiles('D:/Coverage/Airsheds/', 'D:/2019/FullSite/', 'Longest_Triangular_Edge_Ground_Points.tif') print("Before the pool") pool = Pool(processes = 30) func = partial(lidarUncertainty, threshold = 1.0) lidarOutputsTriGroundAirsheds = pool.map(func, lidarFilesTriGroundAirsheds) #P1FB Uncertainty Plot site = [] percentBelow = [] pixelsBelow = [] totalPixels = [] for lst in lidarOutputsHorzP1FB: site.append(lst[0]) percentBelow.append(lst[1]) pixelsBelow.append(lst[2]) totalPixels.append(lst[3]) sitesVert = [] percentBelowVert = [] pixelsBelowVert = [] totalPixelsVert = [] for lst in lidarOutputsVertP1FB: percentBelowVert.append(lst[1]) pixelsBelowVert.append(lst[2]) totalPixelsVert.append(lst[3]) dfUncertP1FB = pd.DataFrame(columns = ['Sites', 'Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold', 'Pixels Below Horizontal Uncertainty Threshold', 'Pixels Below Vertical Uncertainty Threshold', 'Total Pixels Horizontal', 'Total Pixels Vertical']) dfUncertP1FB['Sites'] = site dfUncertP1FB['Percent Below Horizontal Uncertainty Threshold'] = percentBelow dfUncertP1FB['Percent Below Vertical Uncertainty Threshold'] = percentBelowVert dfUncertP1FB['Pixels Below Horizontal Uncertainty Threshold'] = pixelsBelow dfUncertP1FB['Pixels Below Vertical Uncertainty Threshold'] = pixelsBelowVert dfUncertP1FB['Total Pixels Horizontal'] = totalPixels dfUncertP1FB['Total Pixels Vertical'] = totalPixelsVert dfUncertP1FB.set_index('Sites', inplace=True, drop=True) plotUncertP1FB = dfUncertP1FB.loc[:, ['Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percentage of Site Uncertainties within P1FB Boundary') plt.ylabel('Percent') #TOS Uncertainty Plot siteTOS = [] percentBelowTOS = [] pixelsBelowTOS = [] totalPixelsTOS = [] for lst in lidarOutputsHorzTOS: siteTOS.append(lst[0]) percentBelowTOS.append(lst[1]) pixelsBelowTOS.append(lst[2]) totalPixelsTOS.append(lst[3]) percentBelowVertTOS = [] pixelsBelowVertTOS = [] totalPixelsVertTOS = [] for lst in lidarOutputsVertTOS: percentBelowVertTOS.append(lst[1]) pixelsBelowVertTOS.append(lst[2]) totalPixelsVertTOS.append(lst[3]) dfUncertTOS = pd.DataFrame(columns = ['Sites', 'Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold', 'Pixels Below Horizontal Uncertainty Threshold', 'Pixels Below Vertical Uncertainty Threshold', 'Total Pixels Horizontal', 'Total Pixels Vertical']) dfUncertTOS['Sites'] = siteTOS dfUncertTOS['Percent Below Horizontal Uncertainty Threshold'] = percentBelowTOS dfUncertTOS['Percent Below Vertical Uncertainty Threshold'] = percentBelowVertTOS dfUncertTOS['Pixels Below Horizontal Uncertainty Threshold'] = pixelsBelowTOS dfUncertTOS['Pixels Below Vertical Uncertainty Threshold'] = pixelsBelowVertTOS dfUncertTOS['Total Pixels Horizontal'] = totalPixelsTOS dfUncertTOS['Total Pixels Vertical'] = totalPixelsVertTOS dfUncertTOS.set_index('Sites', inplace=True, drop=True) plotUncertTOS = dfUncertTOS.loc[:, ['Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percentage of Each Site in TOS Boundary Under Uncertainty Threshold') plt.ylabel('Percent') #Airsheds Uncertainty Plot siteAirsheds = [] percentBelowAirsheds = [] pixelsBelowAirsheds = [] totalPixelsAirsheds = [] for lst in lidarOutputsHorzAirsheds: siteAirsheds.append(lst[0]) percentBelowAirsheds.append(lst[1]) pixelsBelowAirsheds.append(lst[2]) totalPixelsAirsheds.append(lst[3]) percentBelowVertAirsheds = [] pixelsBelowVertAirsheds = [] totalPixelsVertAirsheds = [] for lst in lidarOutputsVertAirsheds: percentBelowVertAirsheds.append(lst[1]) pixelsBelowVertAirsheds.append(lst[2]) totalPixelsVertAirsheds.append(lst[3]) dfUncertAirsheds = pd.DataFrame(columns = ['Sites', 'Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold', 'Pixels Below Horizontal Uncertainty Threshold', 'Pixels Below Vertical Uncertainty Threshold', 'Total Pixels Horizontal', 'Total Pixels Vertical']) dfUncertAirsheds['Sites'] = siteAirsheds dfUncertAirsheds['Percent Below Horizontal Uncertainty Threshold'] = percentBelowAirsheds dfUncertAirsheds['Percent Below Vertical Uncertainty Threshold'] = percentBelowVertAirsheds dfUncertAirsheds['Pixels Below Horizontal Uncertainty Threshold'] = pixelsBelowAirsheds dfUncertAirsheds['Pixels Below Vertical Uncertainty Threshold'] = pixelsBelowVertAirsheds dfUncertAirsheds['Total Pixels Horizontal'] = totalPixelsAirsheds dfUncertAirsheds['Total Pixels Vertical'] = totalPixelsVertAirsheds dfUncertAirsheds.set_index('Sites', inplace=True, drop=True) plotUncertAirsheds = dfUncertAirsheds.loc[:, ['Percent Below Horizontal Uncertainty Threshold', 'Percent Below Vertical Uncertainty Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percentage of Each Site in Airsheds Boundary Under Uncertainty Threshold') plt.ylabel('Percent') airshedsUncertSummedPixels = dfUncertAirsheds.sum(axis=0) tosUncertSummedPixels = dfUncertTOS.sum(axis=0) p1fbUncertSummedPixels = dfUncertP1FB.sum(axis=0) siteAirsheds = 'Airsheds' airshedsTotalUnderHorz = airshedsUncertSummedPixels['Pixels Below Horizontal Uncertainty Threshold'] airshedsTotalUnderVert = airshedsUncertSummedPixels['Pixels Below Vertical Uncertainty Threshold'] airshedsTotalHorz = airshedsUncertSummedPixels['Total Pixels Horizontal'] airshedsTotalVert = airshedsUncertSummedPixels['Total Pixels Vertical'] siteTOS = 'TOS' tosTotalUnderHorz = tosUncertSummedPixels['Pixels Below Horizontal Uncertainty Threshold'] tosTotalUnderVert = tosUncertSummedPixels['Pixels Below Vertical Uncertainty Threshold'] tosTotalHorz = tosUncertSummedPixels['Total Pixels Horizontal'] tosTotalVert = tosUncertSummedPixels['Total Pixels Vertical'] sitep1fb = 'P1FB' p1fbTotalUnderHorz = p1fbUncertSummedPixels['Pixels Below Horizontal Uncertainty Threshold'] p1fbTotalUnderVert = p1fbUncertSummedPixels['Pixels Below Vertical Uncertainty Threshold'] p1fbTotalHorz = p1fbUncertSummedPixels['Total Pixels Horizontal'] p1fbTotalVert = p1fbUncertSummedPixels['Total Pixels Vertical'] threeBoundaries = ['Airsheds', 'TOS', 'P1FB'] percentUnderHorz = [airshedsTotalUnderHorz/airshedsTotalHorz, tosTotalUnderHorz/tosTotalHorz, p1fbTotalUnderHorz/p1fbTotalHorz] percentUnderVert = [airshedsTotalUnderVert/airshedsTotalVert, tosTotalUnderVert/tosTotalVert, p1fbTotalUnderVert/p1fbTotalVert] dfTotalUncertPixsBoundaries = pd.DataFrame(columns = ['Boundaries', 'Percent Under Horizontal Threshold', 'Percent Under Vertical Threshold']) dfTotalUncertPixsBoundaries['Boundaries'] = threeBoundaries dfTotalUncertPixsBoundaries['Percent Under Horizontal Threshold'] = percentUnderHorz dfTotalUncertPixsBoundaries['Percent Under Vertical Threshold'] = percentUnderVert dfTotalUncertPixsBoundaries.set_index('Boundaries', inplace=True, drop=True) plotTotalUncertPixBoundaries = dfTotalUncertPixsBoundaries.loc[:, ['Percent Under Horizontal Threshold', 'Percent Under Vertical Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'b']) plt.title('Percent Pixels Under Horizontal and Vertical Threshold by Boundary in 2019') plt.ylabel('Percent') #Triangle Interpolation Plots #P1FB Triangle_All Plot siteP1FBTriAll = [] percentBelowTriAll = [] pixelsBelowTriAll = [] totalPixelsTriAll = [] for lst in lidarOutputsTriAllP1FB: siteP1FBTriAll.append(lst[0]) percentBelowTriAll.append(lst[1]) pixelsBelowTriAll.append(lst[2]) totalPixelsTriAll.append(lst[3]) percentBelowTriGround = [] pixelsBelowTriGround = [] totalPixelsTriGround = [] for lst in lidarOutputsTriGroundP1FB: percentBelowTriGround.append(lst[1]) pixelsBelowTriGround.append(lst[2]) totalPixelsTriGround.append(lst[3]) dfP1FBTri = pd.DataFrame(columns = ['Sites', 'Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold', 'Pixels Below Triangle All Points Threshold', 'Pixels Below Triangle Ground Points Threshold', 'Total Pixels Triangle All Points', 'Total Pixels Triangle Ground Points']) dfP1FBTri['Sites'] = siteP1FBTriAll dfP1FBTri['Percent Below Triangle All Points Threshold'] = percentBelowTriAll dfP1FBTri['Percent Below Triangle Ground Points Threshold'] = percentBelowTriGround dfP1FBTri['Pixels Below Triangle All Points Threshold'] = pixelsBelowTriAll dfP1FBTri['Pixels Below Triangle Ground Points Threshold'] = pixelsBelowTriGround dfP1FBTri['Total Pixels Triangle All Points'] = totalPixelsTriAll dfP1FBTri['Total Pixels Triangle Ground Points'] = totalPixelsTriGround dfP1FBTri.set_index('Sites', inplace=True, drop=True) plotTriP1FB = dfP1FBTri.loc[:, ['Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percent of Triangular Points Under Threshold by Site in P1FB') plt.ylabel('Percent') # TOS Interpolation Plots siteTOSTriAll = [] percentBelowTOSTriAll = [] pixelsBelowTOSTriAll = [] totalPixelsTOSTriAll = [] for lst in lidarOutputsTriAllTOS: siteTOSTriAll.append(lst[0]) percentBelowTOSTriAll.append(lst[1]) pixelsBelowTOSTriAll.append(lst[2]) totalPixelsTOSTriAll.append(lst[3]) percentBelowTOSTriGround = [] pixelsBelowTOSTriGround = [] totalPixelsTOSTriGround = [] for lst in lidarOutputsTriGroundTOS: percentBelowTOSTriGround.append(lst[1]) pixelsBelowTOSTriGround.append(lst[2]) totalPixelsTOSTriGround.append(lst[3]) dfTOSTri = pd.DataFrame(columns = ['Sites', 'Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold', 'Pixels Below Triangle All Points Threshold', 'Pixels Below Triangle Ground Points Threshold', 'Total Pixels Triangle All Points', 'Total Pixels Triangle Ground Points']) dfTOSTri['Sites'] = siteTOSTriAll dfTOSTri['Percent Below Triangle All Points Threshold'] = percentBelowTOSTriAll dfTOSTri['Percent Below Triangle Ground Points Threshold'] = percentBelowTOSTriGround dfTOSTri['Pixels Below Triangle All Points Threshold'] = pixelsBelowTOSTriAll dfTOSTri['Pixels Below Triangle Ground Points Threshold'] = pixelsBelowTOSTriGround dfTOSTri['Total Pixels Triangle All Points'] = totalPixelsTOSTriAll dfTOSTri['Total Pixels Triangle Ground Points'] = totalPixelsTOSTriGround dfTOSTri.set_index('Sites', inplace=True, drop=True) plotTriTOS = dfTOSTri.loc[:, ['Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percent of Triangular Points Under Threshold by Site in TOS') plt.ylabel('Percent') #Airsheds Interpolation Plots siteAirshedsTriAll = [] percentBelowAirshedsTriAll = [] pixelsBelowAirshedsTriAll = [] totalPixelsAirshedsTriAll = [] for lst in lidarOutputsTriAllAirsheds: siteAirshedsTriAll.append(lst[0]) percentBelowAirshedsTriAll.append(lst[1]) pixelsBelowAirshedsTriAll.append(lst[2]) totalPixelsAirshedsTriAll.append(lst[3]) percentBelowAirshedsTriGround = [] pixelsBelowAirshedsTriGround = [] totalPixelsAirshedsTriGround = [] for lst in lidarOutputsTriGroundAirsheds: percentBelowAirshedsTriGround.append(lst[1]) pixelsBelowAirshedsTriGround.append(lst[2]) totalPixelsAirshedsTriGround.append(lst[3]) dfAirshedsTri = pd.DataFrame(columns = ['Sites', 'Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold', 'Pixels Below Triangle All Points Threshold', 'Pixels Below Triangle Ground Points Threshold', 'Total Pixels Triangle All Points', 'Total Pixels Triangle Ground Points']) dfAirshedsTri['Sites'] = siteAirshedsTriAll dfAirshedsTri['Percent Below Triangle All Points Threshold'] = percentBelowAirshedsTriAll dfAirshedsTri['Percent Below Triangle Ground Points Threshold'] = percentBelowAirshedsTriGround dfAirshedsTri['Pixels Below Triangle All Points Threshold'] = pixelsBelowAirshedsTriAll dfAirshedsTri['Pixels Below Triangle Ground Points Threshold'] = pixelsBelowAirshedsTriGround dfAirshedsTri['Total Pixels Triangle All Points'] = totalPixelsAirshedsTriAll dfAirshedsTri['Total Pixels Triangle Ground Points'] = totalPixelsAirshedsTriGround dfAirshedsTri.set_index('Sites', inplace=True, drop=True) plotTriAirsheds = dfAirshedsTri.loc[:, ['Percent Below Triangle All Points Threshold', 'Percent Below Triangle Ground Points Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'r']) plt.title('Percent of Triangular Points Under Threshold by Site in Airsheds') plt.ylabel('Percent') airshedsTriSummed = dfAirshedsTri.sum(axis=0) tosTriSummed = dfTOSTri.sum(axis=0) p1fbTriSummed= dfP1FBTri.sum(axis=0) siteAirsheds = 'Airsheds' airshedsTotalUnderAll = airshedsTriSummed['Pixels Below Triangle All Points Threshold'] airshedsTotalUnderGround = airshedsTriSummed['Pixels Below Triangle Ground Points Threshold'] airshedsTotalAll = airshedsTriSummed['Total Pixels Triangle All Points'] airshedsTotalGround = airshedsTriSummed['Total Pixels Triangle Ground Points'] siteTOS = 'TOS' tosTotalUnderAll = tosTriSummed['Pixels Below Triangle All Points Threshold'] tosTotalUnderGround = tosTriSummed['Pixels Below Triangle Ground Points Threshold'] tosTotalAll = tosTriSummed['Total Pixels Triangle All Points'] tosTotalGround = tosTriSummed['Total Pixels Triangle Ground Points'] sitep1fb = 'P1FB' p1fbTotalUnderAll = p1fbTriSummed['Pixels Below Triangle All Points Threshold'] p1fbTotalUnderGround = p1fbTriSummed['Pixels Below Triangle Ground Points Threshold'] p1fbTotalAll = p1fbTriSummed['Total Pixels Triangle All Points'] p1fbTotalGround = p1fbTriSummed['Total Pixels Triangle Ground Points'] threeBoundariesTri = ['Airsheds', 'TOS', 'P1FB'] percentUnderAll = [airshedsTotalUnderAll/airshedsTotalAll, tosTotalUnderAll/tosTotalAll, p1fbTotalUnderAll/p1fbTotalAll] percentUnderGround= [airshedsTotalUnderGround/airshedsTotalGround, tosTotalUnderGround/tosTotalGround, p1fbTotalUnderGround/p1fbTotalGround] dfTotalTriBoundary = pd.DataFrame(columns = ['Boundaries', 'Percent Pixels Under Triangle All Points Threshold', 'Percent Pixels Under Triangle Ground Points Threshold']) dfTotalTriBoundary['Boundaries'] = threeBoundariesTri dfTotalTriBoundary['Percent Pixels Under Triangle All Points Threshold'] = percentUnderAll dfTotalTriBoundary['Percent Pixels Under Triangle Ground Points Threshold'] = percentUnderGround dfTotalTriBoundary.set_index('Boundaries', inplace=True, drop=True) plotTotalTriBoundary = dfTotalTriBoundary.loc[:, ['Percent Pixels Under Triangle All Points Threshold', 'Percent Pixels Under Triangle Ground Points Threshold']].plot(kind='bar', figsize = (20, 10), color=['g', 'b']) plt.title('Percent Pixels Under Triangle Point Threshold by Boundary in 2019') plt.ylabel('Percent Pixels')
import requests from bs4 import BeautifulSoup import pandas as pd import numpy as np group = [] years = np.arange(2005,2018,1) years = years[::-1] for year in years: url = "http://www.basketball-reference.com/draft/NBA_{0}.html".format(year) req = requests.get(url) soup = BeautifulSoup(req.text, 'html.parser') stuff = soup.select('tr') for i in np.arange(2,32,1): group.append(stuff[i].select('td')[2]['csk']) print(year) years = np.arange(1995,2005,1) years = years[::-1] for year in years: url = "http://www.basketball-reference.com/draft/NBA_{0}.html".format(year) req = requests.get(url) soup = BeautifulSoup(req.text, 'html.parser') stuff = soup.select('tr') for i in np.arange(2,30,1): group.append(stuff[i].select('td')[2]['csk']) print(year)
numList = [2000, 2003, 2005, 2006] stringList = ["Essential", "Python", "Code"] mixedList = [1, 2, "three", 4] subList = ["Python", "Phrasebook", ["Copyright", 2006]] listList = [numList, stringList, mixedList, subList] #All items for x in numList: print x+1 #Specific items print stringList[0] + ' ' + stringList[1] + ' ' + \ stringList[2] #Negative indicies print stringList[-2] #Accessing items in sublists if isinstance(subList, list): print subList[2][0]
file = open("P106_Names.txt", "a") name = input("Enter a name: ").strip() file.write(name) file.close()
import logging from colorama import Fore Colors = { "GREEN": Fore.LIGHTGREEN_EX, "YELLOW": Fore.LIGHTYELLOW_EX, "BLUE": Fore.LIGHTBLUE_EX, "CYAN": Fore.CYAN, "RED": Fore.LIGHTRED_EX, "GREY": Fore.LIGHTBLACK_EX, "DEFAULT": Fore.RESET } Levels = { "WARNING": Colors["YELLOW"], "INFO": Colors["CYAN"], "DEBUG": Colors["GREY"], "ERROR": Colors["RED"], } Format = { "WARNING": "warn", "INFO": "info", "DEBUG": "debug", "ERROR": "error", } class VyFormatter(logging.Formatter): def __init__(self, msg, use_color=True): logging.Formatter.__init__(self, msg) self.use_color = use_color def format(self, record): levelname = record.levelname if self.use_color: levelcolor_name = None if levelname in Levels: levelname_color = Levels[levelname] + "[" + \ Format[levelname] + "] " + Colors["DEFAULT"] record.levelname = levelname_color return logging.Formatter.format(self, record) class VyLogger(logging.Logger): FORMAT = "%(levelname)s %(message)s" def __init__(self, name): logging.Logger.__init__(self, name, logging.DEBUG) color_formatter = VyFormatter(self.FORMAT) console = logging.StreamHandler() console.setFormatter(color_formatter) self.addHandler(console) return logging.setLoggerClass(VyLogger)
# -*- coding: utf-8 -*- import os import pickle #%% def read_data_from_1810_09466(): # elements in datalist: # element[0] = R (kpc) # element[1] = vc (km/s) # element[2] = sigma- (km/s) # element[3] = sigma+ (km/s) # element[4] = syst (km/s) # saved later dir_path = os.path.dirname(os.path.realpath(__file__)) data_file=dir_path+'/1810_09466.dat' with open(data_file, 'r') as f: content = f.readlines() data_lists = [] for line in content: if not line.startswith('#') and line.strip(): data_lists.append([float(a) for a in line.split()]) systematics_file = dir_path+'/1810_09466-sys-data.dat' with open(systematics_file, 'r') as f: content = f.readlines() syst_list = [] for line in content: if not line.startswith('#') and line.strip(): syst_list.append([float(a) for a in line.split()]) relative_syst_list = [data[1] for data in syst_list] data_values_list = [data[1] for data in data_lists] syst_list = [] for i in range(len(relative_syst_list)): syst_list.append(relative_syst_list[i]*data_values_list[i]) counter = 0 for element in data_lists: element.append(syst_list[counter]) counter += 1 return data_lists def pickle_results(Analysis, file_name): data_to_pickle = { 'lnlikelihoods':Analysis.sampler.lnprobability, 'chains':Analysis.sampler.chain, 'acceptance_fractions':Analysis.sampler.acceptance_fraction, 'variable_names':Analysis.variables_key_list, } with open(file_name, "wb") as f: pickle.dump(data_to_pickle,file=f) def load_pickle_results(file_name): with open(file_name, "rb") as f: return pickle.load(f) #%% if __name__ == '__main__': pass
#! /usr/bin/env python from stdatamodels.jwst import datamodels from ..stpipe import Step from . import wfss_contam __all__ = ["WfssContamStep"] class WfssContamStep(Step): """ This Step performs contamination correction of WFSS spectra. """ class_alias = "wfss_contam" spec = """ save_simulated_image = boolean(default=False) # Save full-frame simulated image save_contam_images = boolean(default=False) # Save source contam estimates maximum_cores = option('none', 'quarter', 'half', 'all', default='none') skip = boolean(default=True) """ reference_file_types = ['photom', 'wavelengthrange'] def process(self, input_model, *args, **kwargs): with datamodels.open(input_model) as dm: max_cores = self.maximum_cores # Get the wavelengthrange ref file waverange_ref = self.get_reference_file(dm, 'wavelengthrange') self.log.info(f'Using WAVELENGTHRANGE reference file {waverange_ref}') waverange_model = datamodels.WavelengthrangeModel(waverange_ref) # Get the photom ref file photom_ref = self.get_reference_file(dm, 'photom') self.log.info(f'Using PHOTOM reference file {photom_ref}') photom_model = datamodels.open(photom_ref) result, simul, contam = wfss_contam.contam_corr(dm, waverange_model, photom_model, max_cores) # Save intermediate results, if requested if self.save_simulated_image: simul_path = self.save_model(simul, suffix="simul", force=True) self.log.info(f'Full-frame simulated grism image saved to "{simul_path}"') if self.save_contam_images: contam_path = self.save_model(contam, suffix="contam", force=True) self.log.info(f'Contamination estimates saved to "{contam_path}"') # Return the corrected data return result
# -*- coding: utf-8 -*- """ Created on Mon Sep 21 14:00:06 2020 @author: whyang """ # -*- coding: utf-8 -*- import os import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import preprocessing, metrics from sklearn.preprocessing import MinMaxScaler, LabelEncoder from sklearn.cluster import KMeans from mpl_toolkits.mplot3d import Axes3D ##################### # declare functions # ##################### ## # remove leading and trailing characters of each value across all cells in dataframe def trim_all_cells(df): # trim whitespace from ends of each value across all series in dataframe trim_strings = lambda x: x.strip() if isinstance(x, str) else x return df.applymap(trim_strings) def heatmap(x, y, size, corr): ### # heatmap 1: demonstrate the correlation of each two features in terms of the size of correlated ratio (position/negative) ## fig, ax = plt.subplots(figsize=(16, 14)) # Mapping from column names to integer coordinates x_labels = [v for v in sorted(x.unique())] y_labels = [v for v in sorted(y.unique())] x_to_num = {p[1]:p[0] for p in enumerate(x_labels)} y_to_num = {p[1]:p[0] for p in enumerate(y_labels)} #sns.set(font=['sans-serif']) size_scale = 300 ax.scatter( x=x.map(x_to_num), # Use mapping for x y=y.map(y_to_num), # Use mapping for y s=size * size_scale, # Vector of square sizes, proportional to size parameter marker='s' # Use square as scatterplot marker ) # Show column labels on the axes ax.set_xticks([x_to_num[v] for v in x_labels]) ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right', fontsize=16) ax.set_yticks([y_to_num[v] for v in y_labels]) ax.set_yticklabels(y_labels, fontsize=16) ax.grid(True, 'minor') ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True) ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True) ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5]) ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5]) ax.set_title('圖書書目清單 (Feature Correlation)') ax.set_xlabel('特徵') ax.set_ylabel('特徵') plt.show() # display the graph ### # heatmap 2: demonstrate the correlation of each two features in terms of the correlated ratio ## fig, ax1 = plt.subplots(figsize=(16,8)) corr = corr.pivot('x', 'y', 'value') ax1 = sns.heatmap(corr, vmax=1, vmin=-1, cmap='coolwarm', center=0, robust=True, annot=True, annot_kws={'size':14}, fmt='.1f', linewidths=0.5, square=True) ax1.set_xticklabels(ax1.get_yticklabels(), rotation=45, fontsize=16) ax1.set_title('圖書書目清單 (Feature Correlation)') ax1.set_xlabel('特徵') ax1.set_ylabel('特徵') plt.show() def preprocess(base_dir): ### # step 1: read into the booklist's content ## booklist = os.path.join(base_dir, 'booklist.xlsx') # the configuration file df = pd.read_excel(booklist, usecols=['書目系統號', '書刊名', '出版項', '出版年', '簡繁體代碼', '標題', '出版社', '作者', 'ISBN', '領域別', '摘要', '索書號', '分類號']) trim_all_cells(df) # remove leading and tailing white space of string (content of cell in dataframe) #統計為空的數目 print(df.isnull().sum(axis = 0)) ### # step 2: replacing all NULL values in the dataframe of booklist with na ## df.fillna('na', inplace = True) # df['書目系統號'].fillna('na', inplace = True) ### # step 3: 利用LabelEncoder編碼每個attribute ## class_le = LabelEncoder() # construct LabelEncoder # '書目系統號' print('書目系統號') for idx, label in enumerate(df['書目系統號']): pass #print(idx, label) #df['書目系統號'] = class_le.fit_transform((df['書目系統號'].values).astype(str)) df['書目系統號'] = class_le.fit_transform(df['書目系統號'].astype(str)) # '書刊名' print('書刊名') for idx, label in enumerate(df['書刊名']): pass #print(idx, label) df['書刊名'] = class_le.fit_transform(df['書刊名'].astype(str)) # '出版項' print('出版項') for idx, label in enumerate(df['出版項']): pass #print(idx, label) df['出版項'] = class_le.fit_transform(df['出版項'].astype(str)) # '簡繁體代碼' print('簡繁體代碼') for idx, label in enumerate(df['簡繁體代碼']): pass #print(idx, label) df['簡繁體代碼'] = class_le.fit_transform(df['簡繁體代碼'].astype(str)) # '標題' print('標題') for idx, label in enumerate(df['標題']): pass #print(idx, label) df['標題'] = class_le.fit_transform(df['標題'].astype(str)) # '出版社' print('出版社') for idx, label in enumerate(df['出版社']): pass #print(idx, label) df['出版社'] = class_le.fit_transform(df['出版社'].astype(str)) # '作者' print('作者') for idx, label in enumerate(df['作者']): pass #print(idx, label) df['作者'] = class_le.fit_transform(df['作者'].astype(str)) # 'ISBN' print('ISBN') for idx, label in enumerate(df['ISBN']): pass #print(idx, label) df['ISBN'] = class_le.fit_transform(df['ISBN'].astype(str)) # '領域別' print('領域別') for idx, label in enumerate(df['領域別']): pass #print(idx, label) df['領域別'] = class_le.fit_transform(df['領域別'].astype(str)) # '摘要' print('摘要') for idx, label in enumerate(df['摘要']): pass #print(idx, label) df['摘要'] = class_le.fit_transform(df['摘要'].astype(str)) # '索書號' print('索書號') for idx, label in enumerate(df['索書號']): pass #print(idx, label) df['索書號'] = class_le.fit_transform(df['索書號'].astype(str)) # '分類號' print('分類號') for idx, label in enumerate(df['分類號']): pass #print(idx, label) df['分類號'] = class_le.fit_transform(df['分類號'].astype(str)) #統計為空的數目 print('after drop nan', df.isnull().sum(axis = 0)) df.dropna(inplace=True) # omit the row of data in which any NAN value is contained # keep booklist after data cleansing booklist = os.path.join(base_dir, 'booklist_cleansing.xlsx') # the configuration file df.to_excel(booklist, sheet_name='cleansed', index=False) ################ # main program # ################ if __name__ == '__main__': # Register converters to avoid warnings pd.plotting.register_matplotlib_converters() plt.rc("figure", figsize=(16,14)) plt.rc("font", size=16) plt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體) # anchor the folder's path of the booklist file base_dir = os.path.dirname(__file__) ### # conduct preprocessing to the data of booklist if needs # input: booklist.xlsx # output: booklist_cleansing.xlsx ## preprocess(base_dir) ### # read into the booklist's contents which are fulfilling data cleansing process ## booklist = os.path.join(base_dir, 'booklist_cleansing.xlsx') # the configuration file df = pd.read_excel(booklist, usecols=['書目系統號', '書刊名', '出版項', '出版年', '簡繁體代碼', '標題', '出版社', '作者', 'ISBN', '領域別', '摘要', '索書號', '分類號']) trim_all_cells(df) # remove leading and tailing white space of string (content of cell in dataframe) # 統計為空的數目 print(df.isnull().sum(axis = 0)) # normalizing each attribute sc = MinMaxScaler(feature_range=(-1, 1)) df['書目系統號'] = sc.fit_transform(df[['書目系統號']]) df['書刊名'] = sc.fit_transform(df[['書刊名']]) df['出版項'] = sc.fit_transform(df[['出版項']]) df['出版年'] = sc.fit_transform(df[['出版年']]) df['簡繁體代碼'] = sc.fit_transform(df[['簡繁體代碼']]) df['標題'] = sc.fit_transform(df[['標題']]) df['出版社'] = sc.fit_transform(df[['出版社']]) df['作者'] = sc.fit_transform(df[['作者']]) df['ISBN'] = sc.fit_transform(df[['ISBN']]) df['領域別'] = sc.fit_transform(df[['領域別']]) df['摘要'] = sc.fit_transform(df[['索書號']]) df['索書號'] = sc.fit_transform(df[['索書號']]) df['分類號'] = sc.fit_transform(df[['分類號']]) ### # step 1: observe the correlation between any two features ## columns = ['書目系統號', '書刊名', '出版項', '出版年', '簡繁體代碼', '標題', '出版社', '作者', 'ISBN', '領域別', '摘要', '索書號', '分類號'] corr = df[columns].corr() corr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y corr.columns = ['x', 'y', 'value'] heatmap(x=corr['x'], y=corr['y'], size=corr['value'].abs(), corr=corr) # clustering silhouette_avgs = [] _select = 2 _score = 0 _labels = [] _cluster_centers = [] _maxCluster = 21 # the maximal number of clusters ks = range(2, _maxCluster) # loop of evaluating process for k in ks: kmeans = KMeans(n_clusters = k) #kmeans.fit(df[['書目系統號', '書刊名', '出版項', '標題', '出版社', '作者', 'ISBN', '索書號', '分類號']]) kmeans.fit(df[['書目系統號', '書刊名', '出版項', '出版社', '作者', '索書號']]) cluster_labels = kmeans.labels_ # get cluster's labels after conducting KMeans #silhouette_avg = metrics.silhouette_score(df[['書目系統號', '書刊名', '出版項', '標題', '出版社', '作者', 'ISBN', '索書號', '分類號']], silhouette_avg = metrics.silhouette_score(df[['書目系統號', '書刊名', '出版項', '出版社', '作者', '索書號']], cluster_labels) cluster_centers = kmeans.cluster_centers_ # get cluster's center after conducting KMeans silhouette_avgs.append(silhouette_avg) # record evaluated silhouette value if silhouette_avg > _score: _select = k _score = silhouette_avg _labels = cluster_labels _cluster_centers = cluster_centers # 做圖並印出 k = 2 到 _maxCluster 的績效 plt.bar(ks, silhouette_avgs) df['cluster'] = _labels plt.show() print(silhouette_avgs) print('---------------------') #由績效圖找到在k群的績效比較好,選擇 k groups print('_select = ', _select) print('_score = ', _score) print('_labels = ', _labels) print('_cluster_centers = ', _cluster_centers) df['label'] = _labels ### # keep booklist after clustering ## booklist = os.path.join(base_dir, 'booklist_cluster.xlsx') # the configuration file df.to_excel(booklist, sheet_name='cluster', index=False) ### # display figures for observing clustering distribution ## #plt.figure(figsize=(16, 8)) figure_dir = os.path.join(base_dir, 'figure') pd.plotting.register_matplotlib_converters() plt.rc("figure", figsize=(16,14)) plt.rc("font", size=20) plt.rcParams['axes.unicode_minus'] = False # 修復負號顯示問題(正黑體) # 作者_索書號 plt.xlabel('作者') plt.ylabel('索書號') plt.scatter(df['作者'], df['索書號'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '作者_索書號.jpg') plt.savefig(_figure, dpi=300) plt.show() # 出版社_索書號 plt.xlabel('出版社') plt.ylabel('索書號') plt.scatter(df['出版社'], df['索書號'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '出版社_索書號.jpg') plt.savefig(_figure, dpi=300) plt.show() # 出版社_作者 plt.xlabel('出版社') plt.ylabel('作者') plt.scatter(df['出版社'], df['作者'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '出版社_作者.jpg') plt.savefig(_figure, dpi=300) plt.show() # 出版項_索書號 plt.xlabel('出版項') plt.ylabel('索書號') plt.scatter(df['出版項'], df['索書號'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '出版項_索書號.jpg') plt.savefig(_figure, dpi=300) plt.show() # 出版項_作者 plt.xlabel('出版項') plt.ylabel('作者') plt.scatter(df['出版項'], df['作者'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '出版項_作者.jpg') plt.savefig(_figure, dpi=300) plt.show() # 出版項_出版社 plt.xlabel('出版項') plt.ylabel('出版社') plt.scatter(df['出版項'], df['出版社'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '出版項_出版社.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書刊名_索書號 plt.xlabel('書刊名') plt.ylabel('索書號') plt.scatter(df['書刊名'], df['索書號'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書刊名_索書號.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書刊名_作者 plt.xlabel('書刊名') plt.ylabel('作者') plt.scatter(df['書刊名'], df['作者'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書刊名_作者.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書刊名_出版社 plt.xlabel('書刊名') plt.ylabel('出版社') plt.scatter(df['書刊名'], df['出版社'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書刊名_出版社.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書刊名_出版項 plt.xlabel('書刊名') plt.ylabel('出版項') plt.scatter(df['書刊名'], df['出版項'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書刊名_出版項.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書目系統號_索書號 plt.xlabel('書目系統號') plt.ylabel('索書號') plt.scatter(df['書目系統號'], df['索書號'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書目系統號_索書號.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書目系統號_作者 plt.xlabel('書目系統號') plt.ylabel('作者') plt.scatter(df['書目系統號'], df['作者'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書目系統號_作者.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書目系統號_出版社 plt.xlabel('書目系統號') plt.ylabel('出版社') plt.scatter(df['書目系統號'], df['出版社'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書目系統號_出版社.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書目系統號_出版項 plt.xlabel('書目系統號') plt.ylabel('出版項') plt.scatter(df['書目系統號'], df['出版項'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書目系統號_出版項.jpg') plt.savefig(_figure, dpi=300) plt.show() # 書目系統號_書刊名 plt.xlabel('書目系統號') plt.ylabel('書刊名') plt.scatter(df['書目系統號'], df['書刊名'], c=df['label']) #C是第三維度 以顏色做維度 _figure = os.path.join(figure_dir, '書目系統號_書刊名.jpg') plt.savefig(_figure, dpi=300) plt.show() ### # end of file ##
import cv2 import numpy as np img = cv2.imread('tuoer.jpg',1) print(img.shape) # 确定ROI区域 这里是引用 ROI = img[200:399, 480:639] # 这里是复制 注意引用和复制的区别 ROIR = np.copy(ROI) # 在引用的ROI矩阵中画红色矩形框 cv2.rectangle(ROI,(0,0),(158,198),(0,0,255)) # 分别显示原图和ROI区域 cv2.namedWindow('SRC') cv2.namedWindow('ROI') cv2.imshow('SRC', img) cv2.imshow('ROI', ROIR) cv2.imwrite('tuoer_ROI.jpg', img) cv2.waitKey(0) cv2.destroyAllWindows()
import multiprocessing import time start = time.perf_counter() def do_something(seconds): print(f'Sleeping {seconds} second...') time.sleep(seconds) print('Done sleeping') # # p1 = multiprocessing.Process(target = do_something) # p2 = multiprocessing.Process(target = do_something) # # # p1.start() # p2.start() # # p1.join() # p1.join() processes = [] for _ in range(10): p = multiprocessing.Process(target = do_something, args=[1.5]) p.start() processes.append(p) for process in processes: process.join() #wait until finish all process and go to finish time finish = time.perf_counter() print(f'Finished in {round(finish-start,2)} second(s)')
""" Overview - Data Compression In general, a data compression algorithm reduces the amount of memory (bits) required to represent a message (data). The compressed data, in turn, helps to reduce the transmission time from a sender to receiver. The sender encodes the data, and the receiver decodes the encoded data. As part of this problem, you have to implement the logic for both encoding and decoding. A data compression algorithm could be either lossy or lossless, meaning that when compressing the data, there is a loss (lossy) or no loss (lossless) of information. The Huffman Coding is a lossless data compression algorithm. Let us understand the two phases - encoding and decoding with the help of an example. A. Huffman Encoding Assume that we have a string message AAAAAAABBBCCCCCCCDDEEEEEE comprising of 25 characters to be encoded. The string message can be an unsorted one as well. We will have two phases in encoding - building the Huffman tree (a binary tree), and generating the encoded data. The following steps illustrate the Huffman encoding: Phase I - Build the Huffman Tree A Huffman tree is built in a bottom-up approach. First, determine the frequency of each character in the message. In our example, the following table presents the frequency of each character. (Unique) Character Frequency A 7 B 3 C 7 D 2 E 6 Each row in the table above can be represented as a node having a character, frequency, left child, and right child. In the next step, we will repeatedly require to pop-out the node having the lowest frequency. Therefore, build and sort a list of nodes in the order lowest to highest frequencies. Remember that a list preserves the order of elements in which they are appended. We would need our list to work as a priority queue, where a node that has lower frequency should have a higher priority to be popped-out. The following snapshot will help you visualize the example considered above: Can you come up with other data structures to create a priority queue? How about using a min-heap instead of a list? You are free to choose from anyone. Pop-out two nodes with the minimum frequency from the priority queue created in the above step. Create a new node with a frequency equal to the sum of the two nodes picked in the above step. This new node would become an internal node in the Huffman tree, and the two nodes would become the children. The lower frequency node becomes a left child, and the higher frequency node becomes the right child. Reinsert the newly created node back into the priority queue. Do you think that this reinsertion requires the sorting of priority queue again? If yes, then a min-heap could be a better choice due to the lower complexity of sorting the elements, every time there is an insertion. Repeat steps #3 and #4 until there is a single element left in the priority queue. The snapshots below present the building of a Huffman tree. For each node, in the Huffman tree, assign a bit 0 for left child and a 1 for right child. See the final Huffman tree for our example: Phase II - Generate the Encoded Data Based on the Huffman tree, generate unique binary code for each character of our string message. For this purpose, you'd have to traverse the path from root to the leaf node. (Unique) Character Frequency Huffman Code D 2 000 B 3 001 E 6 01 A 7 10 C 7 11 Points to Notice Notice that the whole code for any character is not a prefix of any other code. Hence, the Huffman code is called a Prefix code. Notice that the binary code is shorter for the more frequent character, and vice-versa. The Huffman code is generated in such a way that the entire string message would now require a much lesser amount of memory in binary form. Notice that each node present in the original priority queue has become a leaf node in the final Huffman tree. This way, our encoded data would be 1010101010101000100100111111111111111000000010101010101 B. Huffman Decoding Once we have the encoded data, and the (pointer to the root of) Huffman tree, we can easily decode the encoded data using the following steps: Declare a blank decoded string Pick a bit from the encoded data, traversing from left to right. Start traversing the Huffman tree from the root. If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if the current bit is 1. If a leaf node is encountered, append the (alphabetical) character of the leaf node to the decoded string. Repeat steps #2 and #3 until the encoded data is completely traversed. You will have to implement the logic for both encoding and decoding in the following template. Also, you will need to create the sizing schemas to present a summary. Visualization Resource Check this website to visualize the Huffman encoding for any string message - Huffman Visualization! https://people.ok.ubc.ca/ylucet/DS/Huffman.html https://classroom.udacity.com/nanodegrees/nd256-ent/parts/3e1f628f-e44f-4278-9cce-77bfa29f7ea2/modules/19cbca3b-396e-42c0-bfe2-5f92899e35fd/lessons/e6a27355-3fa2-43f1-936e-112c0a097f62/concepts/b97f3d67-ed9e-4759-8841-d13096f5cdd7 """ from hashlib import new import sys class Node: def __init__(self, char, freq, left=None, right=None, binary_value=''): self.char = char self.left = left self.right = right self.binary_value = binary_value self.freq = freq class Tree(object): def __init__(self): self.root = None def get_root(self): return self.root def print_nodes(node, val=''): new_value = val + str(node.binary_value) if(node.left): print_nodes(node.left, new_value) if(node.right): print_nodes(node.right, new_value) if(not node.left and not node.right): print(f"{node.char} -> {new_value}") def pre_order(tree): visit_order = list() root = tree.get_root() def tranverse(node): if node: visit_order.append(node.get_value()) tranverse(node.get_left_child()) tranverse(node.get_right_child()) tranverse(root) return visit_order def get_encoded_data(node, data): new_value = [] def tranverse(node): if node: new_value.append(str(node.binary_value)) if(node.left and data in node.left.char): tranverse(node.left) if(node.right and data in node.right.char): tranverse(node.right) tranverse(node) result = '' return result.join(new_value) def huffman_encoding(data): quantities = {} tree = Tree() if not data: return "", None for character in data: if character not in quantities: quantities[character] = 1 else: quantities[character] += 1 nodes = [] for value in quantities: nodes.append(Node(value, quantities[value])) if len(nodes) == 1: nodes[0].binary_value = 0 while len(nodes) > 1: nodes = sorted(nodes, key=lambda x: x.freq) left = nodes[0] right = nodes[1] left.binary_value = 0 right.binary_value = 1 parent = Node(left.char + right.char, left.freq + right.freq, left, right) nodes.remove(left) nodes.remove(right) nodes.append(parent) print_nodes(nodes[0]) tree.root = nodes[0] encoded_data = '' for character in data: encoded_data += get_encoded_data(tree.root, character) return encoded_data, tree def huffman_decoding(data, tree): if not data: return "" decoded_data = '' node = tree.root bit = 0 for index in range(len(data) + 1): if index < len(data): bit = int(data[index]) if node.left is None and node.right is None and len(decoded_data) < len(data): decoded_data += node.char node = tree.root if node.left and bit == 0 and bit == node.left.binary_value: node = node.left elif node.right and bit == 1 and bit == node.right.binary_value: node = node.right return decoded_data if __name__ == "__main__": codes = {} a_great_sentence = "The bird is the word" print("The size of the data is: {}\n".format(sys.getsizeof(a_great_sentence))) print("The content of the data is: {}\n".format(a_great_sentence)) encoded_data, tree = huffman_encoding(a_great_sentence) print("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2)))) print("The content of the encoded data is: {}\n".format(encoded_data)) # 0110111011111100111000001010110000100011010011110111111010101011001010 decoded_data = huffman_decoding(encoded_data, tree) print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data))) print("The content of the encoded data is: {}\n".format(decoded_data)) a_great_sentence = "AAAAAAAAAAAAAAA" print("The size of the data is: {}\n".format(sys.getsizeof(a_great_sentence))) print("The content of the data is: {}\n".format(a_great_sentence)) encoded_data, tree = huffman_encoding(a_great_sentence) print("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2)))) print("The content of the encoded data is: {}\n".format(encoded_data)) #000000000000000 decoded_data = huffman_decoding(encoded_data, tree) print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data))) print("The content of the encoded data is: {}\n".format(decoded_data)) a_great_sentence = "" print("The size of the data is: {}\n".format(sys.getsizeof(a_great_sentence))) print("The content of the data is: {}\n".format(a_great_sentence)) encoded_data, tree = huffman_encoding(a_great_sentence) print("The size of the encoded data is: {}\n".format(0 if not encoded_data else sys.getsizeof(int(encoded_data, base=2)))) print("The content of the encoded data is: {}\n".format(encoded_data)) decoded_data = huffman_decoding(encoded_data, tree) print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data))) print("The content of the encoded data is: {}\n".format(decoded_data))
#COMMIT DAMN YOU import pygame, sys, random from pygame.locals import * from Tile import * from Dungeon import * from Actor import * from Menu import * class Menu(pygame.Surface): def __init__(self, Player,xloc,yloc): #pygame.init() self.WHITE = (255, 255, 255) self.GREEN = (0, 255, 0) self.BLUE = (0, 0, 255) self.BLACK = (0,0,0) #pygame.font.init() self.Player = Player #X,Y coordinates of the surface itself self.xcoord = 0 self.ycoord = 0 #X,Y location on the display self.xloc=xloc self.yloc=yloc self.menusurface def Inventory(self): self.menusurface = pygame.Surface((500,300)) self.menusurface.fill(self.BLACK) fontObj = pygame.font.Font('freesansbold.ttf', 16) iteminformationnumber = fontObj.render("No.", True, self.WHITE, self.BLACK) iteminformationname = fontObj.render("Name", True, self.WHITE,self.BLACK) iteminformationdamage = fontObj.render("Damage", True, self.WHITE, self.BLACK) iteminformationhealth = fontObj.render("Health", True, self.WHITE, self.BLACK) iteminformationtype = fontObj.render("Type", True, self.WHITE, self.BLACK) xcoord = 50 ycoord = 30 itemindex = 1 self.menusurface.blit(iteminformationnumber, (xcoord, ycoord-20)) self.menusurface.blit(iteminformationname, (xcoord+50, ycoord-20)) self.menusurface.blit(iteminformationdamage, (xcoord+200, ycoord-20)) self.menusurface.blit(iteminformationhealth, (xcoord+300, ycoord-20)) self.menusurface.blit(iteminformationtype, (xcoord+400, ycoord-20)) for item in self.Player.inventory.items: #print(itemindex) itemnumber = fontObj.render(str(itemindex), True, self.WHITE, self.BLACK) itemname = fontObj.render(str(item.name), True, self.WHITE, self.BLACK) itemdamage = fontObj.render(str(item.damage), True, self.WHITE, self.BLACK) itemhealth = fontObj.render(str(item.health), True, self.WHITE, self.BLACK) itemtype = fontObj.render(str(item.type), True, self.WHITE, self.BLACK) itemindex += 1 self.menusurface.blit(itemnumber, (xcoord, ycoord)) #Consider making the xcoordinate addition for subsequent values a ratio of screen size versus hardcoded self.menusurface.blit(itemname, (xcoord+50, ycoord)) self.menusurface.blit(itemdamage, (xcoord+200,ycoord)) self.menusurface.blit(itemhealth, (xcoord+300,ycoord)) self.menusurface.blit(itemtype, (xcoord+400,ycoord)) ycoord += 25 return self.menusurface #def newInventory(self): def Character(self): self.menusurface = pygame.Surface((480,310)) self.menusurface.fill(self.BLACK) fontObj = pygame.font.Font('freesansbold.ttf', 16) namestring = str(self.Player.name) namerender = fontObj.render(namestring, True, self.WHITE, self.BLACK) levelstring = "Level: %s" % str(self.Player.level) levelrender = fontObj.render(levelstring, True, self.WHITE, self.BLACK) levelxpstring = "%s/%s" % (str(self.Player.XP), str(self.Player.levelXP)) levelxprender = fontObj.render(levelxpstring, True, self.WHITE, self.BLACK) HPstring = "HP: %s/%s (%s+%s)" % (str(self.Player.current_health), str(self.Player.total_health), str(self.Player.base_health), str(self.Player.bonus_health)) hprender = fontObj.render(HPstring, True, self.WHITE, self.BLACK) goldstring = "Gold: %s" % (str(self.Player.gold)) goldrender = fontObj.render(goldstring, True, self.WHITE, self.BLACK) damagestring = "Attack Damage: %s (%s+%s)" % (str(self.Player.total_damage), str(self.Player.base_damage),str(self.Player.bonus_damage)) damagerender = fontObj.render(damagestring, True, self.WHITE, self.BLACK) inventorystring = "Inventory: %s/%s" % (len(self.Player.inventory.items),str(self.Player.inventory.maxNumberOfItems)) inventoryrender = fontObj.render(inventorystring, True, self.WHITE, self.BLACK) self.menusurface.blit(namerender, (175, 20)) self.menusurface.blit(levelrender, (170, 45)) self.menusurface.blit(levelxprender, (170, 70)) self.menusurface.blit(hprender,(15,150)) self.menusurface.blit(damagerender,(225, 150)) self.menusurface.blit(goldrender,(15,200)) self.menusurface.blit(inventoryrender,(225,200)) #print(self.Player.name) return self.menusurface class StatsMenu(Menu): def __init__(self, Player): pygame.init() self.WHITE = (255, 255, 255) self.GREEN = (0, 255, 0) self.BLUE = (0, 0, 255) self.BLACK = (0,0,0) self.width=384 self.height=160 self.Player = Player #X,Y coordinates of the surface itself self.xcoord = 640 self.ycoord = 800 #X,Y location on the display pygame.font.init() self.menusurface = pygame.Surface((self.width,self.height)) self.menusurface.fill(self.BLACK) #fontObj = pygame.font.Font('freesansbold.ttf',16) self.font = pygame.font.SysFont("times new roman", 15) self.Update(self.Player) def Update(self, player): self.Player=player #Extract all of the current self.playerName = self.font.render(self.Player.name, 1, (255,255,0)) self.playerHealth = self.font.render("Health: " + str(self.Player.current_health) + " / " + str(self.Player.total_health), 1, (255,255,0)) self.playerDamage = self.font.render("Damage: " + str(self.Player.total_damage), 1, (255,255,0)) self.playerLevel = self.font.render("Level: " + str(self.Player.level), 1, (255,255,0)) self.playerXP = self.font.render("Experience: " + str(self.Player.XP) + " / " + str(self.Player.levelXP), 1, (255,255,0)) self.playerGold = self.font.render("Current Gold: " + str(self.Player.gold), 1, (255,255,0)) #update all of the information on the menusurface self.menusurface.fill(self.BLACK) self.menusurface.blit(self.playerName, (150,0)) self.menusurface.blit(self.playerLevel, (150,20)) self.menusurface.blit(self.playerXP, (120,40)) self.menusurface.blit(self.playerHealth, (20,60)) self.menusurface.blit(self.playerDamage, (20,80)) self.menusurface.blit(self.playerGold, (20,100)) class InventoryMenu(Menu): def __init__(self, Player,xloc,yloc): pygame.init() self.WHITE = (255, 255, 255) self.GREEN = (0, 255, 0) self.BLUE = (0, 0, 255) self.BLACK = (0,0,0) self.width=150 self.height=100 self.Player = Player #X,Y coordinates of the surface itself self.xcoord = 0 self.ycoord = 0 #X,Y location on the display self.xloc=xloc self.yloc=yloc pygame.font.init() self.menusurface = pygame.Surface((self.width,self.height)) self.menusurface.fill(self.WHITE) self.menusurface.blit(pygame.image.load('images/inventoryGrid.png'), (0,0)) fontObj = pygame.font.Font('freesansbold.ttf',16) self.menusurface.blit(fontObj.render('Inventory', True, self.WHITE,self.BLACK),(self.xcoord+45,self.ycoord)) self.Update() def Update(self): self.menusurface.blit(pygame.image.load('images/inventoryGrid.png'), (0,0)) fontObj = pygame.font.Font('freesansbold.ttf',16) self.menusurface.blit(fontObj.render('Inventory', True, self.WHITE,self.BLACK),(self.xcoord+45,self.ycoord)) tempx=10 tempy=20 for item in self.Player.inventory.items: self.menusurface.blit(item.image,(tempx,tempy)) if tempx==110: tempx=10 tempy=60 else: tempx=tempx+50 class EquipmentMenu(Menu): def __init__(self, Player,xloc,yloc): pygame.init() self.WHITE = (255, 255, 255) self.GREEN = (0, 255, 0) self.BLUE = (0, 0, 255) self.BLACK = (0,0,0) self.width=200 self.height=200 self.Player = Player #X,Y coordinates of the surface itself self.xcoord = 0 self.ycoord = 0 #X,Y location on the display self.xloc=xloc self.yloc=yloc pygame.font.init() self.menusurface = pygame.Surface((self.width,self.height)) self.menusurface.fill(self.WHITE) self.menusurface.blit(pygame.image.load('images/equipmentGrid.png'), (0,0)) fontObj = pygame.font.Font('freesansbold.ttf',16) self.menusurface.blit(fontObj.render(self.Player.name, True, self.WHITE,self.BLACK),(self.xcoord+75,self.ycoord)) self.Update() def Update(self): self.menusurface.blit(pygame.image.load('images/equipmentGrid.png'), (0,0)) fontObj = pygame.font.Font('freesansbold.ttf',16) self.menusurface.blit(fontObj.render(self.Player.name, True, self.WHITE,self.BLACK),(self.xcoord+75,self.ycoord)) for item in self.Player.equipment.items: if item.type=='head': self.menusurface.blit(item.image,(self.xcoord+10,self.ycoord+10)) if item.type=='shoulders': self.menusurface.blit(item.image,(self.xcoord+150,self.ycoord+10)) if item.type=='chest': self.menusurface.blit(item.image,(self.xcoord+10,self.ycoord+60)) if item.type=='hands': self.menusurface.blit(item.image,(self.xcoord+150,self.ycoord+60)) if item.type=='legs': self.menusurface.blit(item.image,(self.xcoord+10,self.ycoord+110)) if item.type=='feet': self.menusurface.blit(item.image,(self.xcoord+150,self.ycoord+110)) if item.type=='1h' or item.type=='2h': self.menusurface.blit(item.image,(self.xcoord+10,self.ycoord+160)) if item.type=='shield': self.menusurface.blit(item.image,(self.xcoord+150,self.ycoord+160)) def Equipment(self): self.menusurface = pygame.Surface((500,300)) self.menusurface.fill(self.BLACK) fontObj = pygame.font.Font('freesansbold.ttf', 16) iteminformationnumber = fontObj.render("No.", True, self.WHITE, self.BLACK) iteminformationname = fontObj.render("Name", True, self.WHITE,self.BLACK) iteminformationdamage = fontObj.render("Damage", True, self.WHITE, self.BLACK) iteminformationhealth = fontObj.render("Health", True, self.WHITE, self.BLACK) iteminformationtype = fontObj.render("Type", True, self.WHITE, self.BLACK) xcoord = 50 ycoord = 30 itemindex = 1 self.menusurface.blit(iteminformationnumber, (xcoord, ycoord-20)) self.menusurface.blit(iteminformationname, (xcoord+50, ycoord-20)) self.menusurface.blit(iteminformationdamage, (xcoord+200, ycoord-20)) self.menusurface.blit(iteminformationhealth, (xcoord+300, ycoord-20)) self.menusurface.blit(iteminformationtype, (xcoord+400, ycoord-20)) for item in self.Player.equipment.items: itemnumber = fontObj.render(str(itemindex), True, self.WHITE, self.BLACK) itemname = fontObj.render(str(item.name), True, self.WHITE, self.BLACK) itemdamage = fontObj.render(str(item.damage), True, self.WHITE, self.BLACK) itemhealth = fontObj.render(str(item.health), True, self.WHITE, self.BLACK) itemtype = fontObj.render(str(item.type), True, self.WHITE, self.BLACK) itemindex += 1 self.menusurface.blit(itemnumber, (xcoord, ycoord)) #Consider making the xcoordinate addition for subsequent values a ratio of screen size versus hardcoded self.menusurface.blit(itemname, (xcoord+50, ycoord)) self.menusurface.blit(itemdamage, (xcoord+200,ycoord)) self.menusurface.blit(itemhealth, (xcoord+300,ycoord)) self.menusurface.blit(itemtype, (xcoord+400,ycoord)) ycoord += 25 #print(self.Player.name) return self.menusurface #WHITE = (255, 255, 255) #pygame.init() #DISPLAYSURF = pygame.display.set_mode((500,500), 0, 32) #pygame.display.set_caption('Dev') #Window Title #DISPLAYSURF.fill(WHITE) #Fill the screen area with white. #player = Player(32,32,'images/player.png',"David",100,10) #item1 = Item(0, "sword", 10, 10,'images/player.png',"1h") #item2 = Item(1, "monkey dick", 5, 35,'images/player.png',"2h") #item3 = Item(32, "monkey dick", 5, 35,'images/player.png',"2h") #item4 = Item(5, "grawblewarble", 10, 15, 'images/player.png', "1h") #player.inventory.add_item(item1) #player.inventory.add_item(item2) #player.inventory.add_item(item3) #player.inventory.add_item(item4) #menu = Menu(player) #menu.Character() #DISPLAYSURF.blit(menu.Character(), (25, 25)) #pygame.display.update()
from .device import Device from .dreamevacuum import DreameVacuum from .exceptions import DeviceError, DeviceException from .protocol import Message, Utils
mat1 = [[1, 2], [3, 4]] mat2 = [[1, 2], [3, 4]] mat3 = [[0, 0], [0, 0]] for i in range(0, 2): for j in range(0, 2): mat3[i][j] = mat1[i][j] + mat2[i][j] print("Addition of two matrices") for i in range(0, 2): for j in range(0, 2): print(mat3[i][j], end = "") print() mat1 = [[7, 4], [6, 4]] mat2 = [[8, 2], [4, 3]] mat3 = [[0, 0], [0, 0]] for i in range(0, 2): for j in range(0, 2): mat3[i][j] = mat1[i][j] - mat2[i][j] print("Subtraction of two matrices") for i in range(0, 2): for j in range(0, 2): print(mat3[i][j], end = "") print() mat1 = [[7, 4], [6, 4]] mat2 = [[8, 2], [4, 3]] mat3 = [[0, 0], [0, 0]] for i in range(0, 2): for j in range(0, 2): mat3[i][j] = mat1[i][j] * mat2[i][j] print("Multiplication of two matrices") for i in range(0, 2): for j in range(0, 2): print(mat3[i][j], end = "") print()
#!/usr/bin/python -d # getmailq-by-instance.py - written by Tyzhnenko Dmitry 2013. Steal and share. # Get postfix instances queue lengths and extend SNMP OID import sys import os import re from subprocess import call __version__ = '2.0' #Place in /usr/local/bin/ #pass .1.3.6.1.4.1.2021.54 postfix-instance-mailq /usr/local/bin/getmailq-by-instance.py .1.3.6.1.4.1.2021.55 DATA={} BASE=sys.argv[1] REQ_T=sys.argv[2] REQ=sys.argv[3] def sort_nicely( l ): """ Sort the given list in the way that humans expect. """ convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] l.sort( key=alphanum_key ) def count_queue(queue_dir): COUNT = os.popen("find %s/ -type f | wc -l" % (queue_dir) ).read() return COUNT def get_data(oid): global DATA data = DATA[str(oid)] if type(data) == dict : count = count_queue("%s/%s/" % ( data["instance"], data["queue"] )) new_data = "%s\ninteger\n%i" % (oid,int(count)) return new_data else: return data def main(): global DATA global BASE global REQ_T global REQ POSTCONF="/usr/sbin/postconf" QUEUES = { 1: "deferred", 2: "active", 3: "incoming", 4: "hold" } INSTANCES_CONFS=os.popen(POSTCONF + ' -h multi_instance_directories').read().split() inst_path = os.popen(POSTCONF + ' -h queue_directory').read().split() inst_num=10 INSTANCES_Q_DIRS = { inst_num : inst_path[0]} inst_num=20 for conf_d in INSTANCES_CONFS: INSTANCES_Q_DIRS[inst_num] = os.popen(POSTCONF + ' -c ' + conf_d + ' -h queue_directory').read().split()[0] inst_num = inst_num + 10 #print INSTANCES_Q_DIRS #sys.exit(1) #q_num=1 for key in INSTANCES_Q_DIRS: instance = INSTANCES_Q_DIRS[key] inst_name = os.path.basename(instance) DATA["%s.1.%i" % (BASE, key)] = "%s.1.%i\ninteger\n%i\n" % (BASE, key, key) DATA["%s.2.%i" % (BASE, key)] = "%s.2.%i\nstring\n%s\n" % (BASE, key, inst_name) #print DATA #sys.exit(1) for key in QUEUES: DATA["%s.3.%i" % (BASE, key)] = "%s.3.%i\ninteger\n%i\n" % (BASE, key, key) DATA["%s.4.%i" % (BASE, key)] = "%s.4.%i\nstring\n%s\n" % (BASE, key, QUEUES[key]) #print DATA #sys.exit(1) for inst_key in INSTANCES_Q_DIRS: for queues_key in QUEUES: DATA["%s.5.%i.%i" % (BASE, inst_key, queues_key)] = {"instance" : INSTANCES_Q_DIRS[inst_key], "queue": QUEUES[queues_key]} for queues_key in QUEUES: for inst_key in INSTANCES_Q_DIRS: DATA["%s.6.%i.%i" % (BASE, queues_key, inst_key)] = {"instance" : INSTANCES_Q_DIRS[inst_key], "queue": QUEUES[queues_key]} #for queue in QUEUES: # #print "%s: \n\t" % queue # DATA["%s.1.%i" % (BASE,q_num)] = "%s.1.%s\ninteger\n%s\n" % (BASE, q_num, q_num) # #print DATA["%s.1.1" % BASE] # DATA["%s.2.%i" % (BASE,q_num)] = "%s.2.%s\nstring\n%s\n" % (BASE, q_num, queue) # for instance in INSTANCES_Q_DIRS: # inst_name = os.path.basename(instance) # DATA["%s.3.%i.1.%i" % (BASE,q_num,inst_num)] = "%s.3.%i.1.%i\ninteger\n%i" % (BASE,q_num,inst_num,inst_num) # DATA["%s.3.%i.2.%i" % (BASE,q_num,inst_num)] = "%s.3.%i.2.%i\nstring\n%s\n" % (BASE,q_num,inst_num,inst_name) # #COUNT = os.popen("find %s/%s/ -type f | wc -l" % (instance,queue) ).read() # #print "%s:%s \n\t" % (instance,COUNT) # #DATA["%s.3.%i.3.%i" % (BASE,q_num,inst_num)] = "%s.3.%i.3.%i\ninteger\n%i\n" % (BASE,q_num,inst_num,int(COUNT)) # DATA["%s.3.%i.3.%i" % (BASE,q_num,inst_num)] = {"instance" : instance, "queue": queue} # inst_num = inst_num+1 # #DATA["%s.3.%i.3.%i" % (BASE,q_num,inst_num-1)]["next"] = "%s.1.%i" % (BASE,q_num+1) # #print DATA["%s.1.1" % BASE] # inst_num = 1 # q_num = q_num + 1 # #print "\n" #print DATA["%s.1.1" % BASE] #sorted_keys=sorted(DATA, key=DATA.get) sorted_keys=DATA.keys() sort_nicely(sorted_keys) #sorted_keys.sort() #for k in sorted_keys: # print "%i:%s" % (sorted_keys.index(k), k) #sys.exit(0) #print DATA["%s.1.1" % BASE] if REQ_T == '-g': #print DATA[REQ] #print DATA["%s.1.1" % BASE] print get_data(str(REQ)) elif REQ_T == '-n': if REQ == BASE: #print DATA["%s.1.1" % BASE] #print get_data("%s.1.1" % BASE) print get_data(sorted_keys[0]) elif DATA.has_key(REQ) is False: pos = [i for i,x in enumerate(sorted_keys) if x.find(REQ) >= 0][0] #print "@%i" % pos #print "#%s" % sorted_keys[pos] #print DATA[sorted_keys[pos]] #print "$ %i > %i" % (len(sorted_keys[pos]), len(REQ)) if len(sorted_keys[pos]) > len(REQ): next = sorted_keys[pos] else: next = sorted_keys[pos+1] #next = sorted_keys[sorted_keys.index(REQ)+1] print get_data(next) #if DATA.has_key(REQ+".1") is True: # #print DATA["%s.1" % REQ] # print get_data("%s.1" % REQ) #elif DATA.has_key(REQ+".1.1") is True: # #print DATA["%s.1.1" % REQ] # print get_data("%s.1.1" % REQ) #elif DATA.has_key(REQ+".1.1.1") is True: # #print DATA["%s.1.1.1" % REQ] # print get_data("%s.1.1.1" % REQ) elif DATA.has_key(REQ) is True: next = sorted_keys[sorted_keys.index(REQ)+1] #print DATA[next] print get_data(next) # if REQ[-3] == next[-3]: # #print DATA[sorted_keys[sorted_keys.index(REQ)+1]] # print DATA[next] # elif len(REQ) else: #print DATA["%s.1.1" % BASE] print get_data("%s.1.1" % BASE) else: print """Read help please""" if __name__ == '__main__': main()
# https://leetcode.com/problems/degree-of-an-array/ """ Given a non-empty array of non-negative integers nums, the degree of this array is defined as the maximum frequency of any one of its elements. Your task is to find the smallest possible length of a (contiguous) subarray of nums, that has the same degree as nums. Example 1: Input: [1, 2, 2, 3, 1] Output: 2 Explanation: The input array has a degree of 2 because both elements 1 and 2 appear twice. Of the subarrays that have the same degree: [1, 2, 2, 3, 1], [1, 2, 2, 3], [2, 2, 3, 1], [1, 2, 2], [2, 2, 3], [2, 2] The shortest length is 2. So return 2. Example 2: Input: [1,2,2,3,1,4,2] Output: 6 Note: nums.length will be between 1 and 50,000. nums[i] will be an integer between 0 and 49,999. """ from collections import Counter from typing import List def get_len(nums: List[int], item: int) -> int: return len(nums) - nums[::-1].index(item) - nums.index(item) def find_shortest_sub_array(nums: List[int]) -> int: ct = Counter(nums) max_f = -1 min_l = float("inf") for item, f in ct.most_common(): if f >= max_f: max_f = f l = get_len(nums, item) if l < min_l: min_l = l return min_l def find_shortest_sub_array(nums: List[int]) -> int: first, count, res, degree = {}, {}, 0, 0 for i, num in enumerate(nums): first.setdefault(num, i) count[num] = count.get(num, 0) + 1 if count[num] > degree: degree = count[num] res = i - first[num] + 1 elif count[num] == degree: res = min(res, i - first[num] + 1) return res
import requests from bs4 import BeautifulSoup #brickyard = requests.get('https://www.wunderground.com/weather/us/ca/santa-barbara/93105') #brickyard_data = (brickyard.text) #brickyard_soup = BeautifulSoup(brickyard_data, 'html.parser') #brickyard_high = brickyard_soup.find("span", "_ngcontent-app-root-c5", class_="hi").text #brickyard_low = brickyard_soup.find("span", "_ngcontent-app-root-c5", class_="lo").text #brickyard_weather = brickyard_soup.find("div", "_ngcontent-app-root-c5", class_="condition-icon small-6 medium-12 columns").p.text #print(brickyard_high) #print(brickyard_low) #print(brickyard_weather) #schedule.every().day.at("8:00").do(post()) #https://forecast.weather.gov/MapClick.php?textField1=34.4694&textField2=-119.6825 #https://forecast.weather.gov/MapClick.php?lat=34.498&lon=-119.861 def get_location_url(spots): return ('https://forecast.weather.gov/MapClick.php?lat=' + spots[0] + '&lon=' + spots[1]) #return ('https://www.wunderground.com/weather/us/' + spots[0] + '/' + spots[1] + '/' + spots[2]) def get_weather(location): website = requests.get(location) data = (website.text) soup = BeautifulSoup(data, 'html.parser') day = soup.find("li", class_="forecast-tombstone") high = day.find("p", class_="temp temp-high").text day = day.next_sibling low = day.find("p", class_="temp temp-low").text weather = soup.find("p", class_="short-desc").text #high = soup.find("span", "_ngcontent-app-root-c5", class_="hi").text #low = soup.find("span", "_ngcontent-app-root-c5", class_="lo").text #weather = soup.find("div", "_ngcontent-app-root-c5", class_="condition-icon small-6 medium-12 columns").p.text weather_dict = [high,low,weather] return weather_dict #high = soup.find("p", class_="temp temp-high").text #soup.find("span", "_ngcontent-app-root-c5", class_="hi").text #low = soup.find("p", class_="temp temp-low").text
# -*- encoding: utf-8 -*- __author__ = 'fredy' import random def aleatorio_punto_inicio(): return random.randint(1,3) def personaje_aleatorio(): return random.randint(1,6) def BuscaRepetido(lista, elemento): if len(lista) !=0: for x in lista: if x[1]==elemento: return True return False #Funcion para leer tabla de costos y agregarlo a un diccionario def LeerTablaCostos(): dic={} archi=open('../Soluciones/costos.txt','r') linea="a" while linea!="": try: linea=archi.readline() lin= linea.split(',') print lin #print sin #raw_input("Espera") a= lin[0] c=lin[1] b=c[:-1] dic[a]=b except IndexError,e: pass #Este error se produce por caracter de salto de linea en el ultimo archivo archi.close() return dic #retorna en forma de diccionario todos los costos 'P3_M_K_P':'costo' def ComprobacionCosto(punto,personaje,d,costos,lista): puntos ={1:'P1',2:'P2',3:'P3'} personajes ={1:'H',2:'M',3:'C',4:'O',5:'S',6:'W'} destinos ={1:'T',2:'K',3:'S',4:'F'} cadena=""+puntos[punto]+"_"+personajes[personaje]+"_"+destinos[d]+"_P" if costos.has_key(cadena): if costos[cadena] == "1000000": return True else: if len(lista) !=0: for x in lista: if x[1]==personaje: return True #else: # return False return False else: return True def CalculoFX(poblacion,costos): puntos ={'1':'P1','2':'P2','3':'P3'} personajes ={'1':'H','2':'M','3':'C','4':'O','5':'S','6':'W'} destinos ={'1':'T','2':'K','3':'S','4':'F'} for ind in poblacion: #para cada cromosoma del numero de la poblacion calcular su fx ob=1 # posicion en cromosoma tenemos 4 porque son 4 objetivos sum = 0 for obj in ind: cadena=""+puntos[str(obj[0])]+"_"+personajes[str(obj[1])]+"_"+destinos[str(ob)]+"_P" valor = costos[cadena] print valor sum = sum + int(valor) ob = ob + 1 ind.append(sum) return poblacion def Penalizar(poblacion,costos): #las penalizaciones estarán dadas primero por repetirse y segunda por tener costos negativos puntos ={'1':'P1','2':'P2','3':'P3'} personajes ={'1':'H','2':'M','3':'C','4':'O','5':'S','6':'W'} destinos ={'1':'T','2':'K','3':'S','4':'F'} for ind in poblacion: listaPersonajes=[] count=1 pena=0 for ob in ind: try: cadena=""+puntos[str(ob[0])]+"_"+personajes[str(ob[1])]+"_"+destinos[str(count)]+"_P" cost=costos[cadena] if cost=="1000000": print "Esta penalizando por costo infinito" print cadena pena=pena+10000 except TypeError,e: pass #lo que pasa es que metimos f(X) y no corresponde el tamaño try: listaPersonajes.append(ob[1]) count=count+1 except TypeError,e: pena=0 pass #print listaPersonajes num_rep=0 for a in listaPersonajes: num_rep=num_rep+listaPersonajes.count(a) penaliz=0 if num_rep !=4: print "Esta penalizando por repeticion" #comienza la penalizacion segun su numero de repeticiones. penaliz=num_rep * 100000 #else: # penaliz=0 #print penaliz #Una vez terminada la penalizacion por repeticion de personaje en la mision global. #Se penalizara por tener un costo negativo :D #print ind totalpena= pena + penaliz + ind[4] #print totalpena ind.append(totalpena) return poblacion def SumatoriaFX(lista): suma=0 for x in lista: suma=suma+abs(int(x[4])) return suma def Probabilidad(sum,individuos): # for x in individuos: # resta=float(sum)-float(x[4]) # p=(resta*100)/float(sum) # x.append(int(p)) for x in individuos: #resta=float(sum)-float(x[4]) p=(float(x[5])/(float(sum)))*100 x.append(int(p)) return individuos def Ruleta(lista): rul=[] for x in lista: x.reverse() rul.append(x) rul.sort() new=[] for y in rul: y.reverse() new.append(y) return new def SeleccionParejas(opciones,p): count =0 tabla_hijos=[] while count!=p: n=random.randrange(0,100) for x in opciones: if int(x[6]) <= n: tabla_hijos.append(x[:4]) count=count+1 break else: pass return opciones,tabla_hijos def BEST(hijos,prueba): for h in hijos: if h[4]<prueba[4]: prueba[0]=h[0] prueba[1]=h[1] prueba[2]=h[2] prueba[3]=h[3] prueba[4]=h[4] return prueba
import requests from requests import ConnectTimeout, ReadTimeout from requests.exceptions import ConnectionError from Responses.BaseResponse import BaseResponse from RemitaBillingService.EncryptionConfig import EncryptionConfig from RemitaBillingService.EnvironmentConfig import EnvironmentConfig from Responses.SdkResponseCode import SdkResponseCode class BillPaymentNotificatiion(object): def send_payment_notification(self, bill_notification_payload, credentials): try: get_response = EncryptionConfig() if not get_response.credential_available(credentials): return get_response.throw_exception(code=get_response.empty_credential_code, message=get_response.empty_credential_msg) else: billing_environment = EnvironmentConfig.set_billing_environment(credentials) headers = self.set_header(bill_notification_payload, billing_environment['SECRET_KEY'], billing_environment['PUBLIC_KEY']) time_out = EnvironmentConfig.set_time_out(credentials) url = billing_environment['NOTIFICATION_URL'] payload = {'rrr': bill_notification_payload.rrr, 'incomeAccount': bill_notification_payload.incomeAccount, 'debittedAccount': bill_notification_payload.debittedAccount, 'paymentAuthCode': bill_notification_payload.paymentAuthCode, 'paymentChannel': bill_notification_payload.paymentChannel, 'tellerName': bill_notification_payload.tellerName, 'branchCode': bill_notification_payload.branchCode, 'amountDebitted': bill_notification_payload.amountDebitted, 'fundingSource': bill_notification_payload.fundingSource } try: response = requests.post(url, headers=headers, json=payload, timeout=time_out["CONNECTION_TIMEOUT"]) get_notification_response = BaseResponse(response.content) except ConnectTimeout: return get_response.throw_exception(code=SdkResponseCode.CONNECTION_TIMEOUT_CODE, message=SdkResponseCode.CONNECTION_TIMEOUT) except ValueError: return get_response.throw_exception(code=SdkResponseCode.ERROR_IN_VALUE_CODE, message=SdkResponseCode.ERROR_IN_VALUE) except ReadTimeout: return get_response.throw_exception(code=SdkResponseCode.CONNECTION_TIMEOUT_CODE, message=SdkResponseCode.CONNECTION_TIMEOUT) except ConnectionError as e: return get_response.throw_exception(code=SdkResponseCode.ERROR_WHILE_CONNECTING_CODE, message=SdkResponseCode.ERROR_WHILE_CONNECTING) return get_notification_response except Exception as e: return get_response.throw_exception(code=SdkResponseCode.ERROR_PROCESSING_REQUEST_CODE, message=SdkResponseCode.ERROR_PROCESSING_REQUEST) def set_header(self, bill_payload, secret_key, public_key): hash_string = bill_payload.rrr + bill_payload.amountDebitted + bill_payload.fundingSource + \ bill_payload.debittedAccount + bill_payload.paymentAuthCode + secret_key txn_hash = EncryptionConfig.sha512(hash_string) headers = {'Content-type': 'application/json', 'publicKey': public_key, 'transactionId': bill_payload.transactionId, 'TXN_HASH': txn_hash} return headers
import re import os selection = input('Please choose which file to summarize: [1] or [2]') if selection == '1': filename = 'raw_data/paragraph_1.txt' elif selection == '2': filename = 'raw_data/paragraph_2.txt' else: input(f'try again, select [1] or [2]') print(f"user selected: {filename}") file = open(filename, 'r') text = file.read() letter_count = sum(c.isalpha() for c in text) sentence_readout = re.split("(?<=[.!?]) +", text) # word_readout = re.split(" ", text) word_count = len(text.split(' ')) print('Paragraph Analysis') print('-'*60) # word_count = len(word_readout) print(f"Approximate Word Count: {word_count}") sentence_count = len(sentence_readout) print(f"Approximate Sentence Count : {sentence_count}") print("Average letter count: " + str(round(letter_count/word_count,2))) print(f"Average Sentence Length: {word_count / sentence_count}")