content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import json from django.utils.translation import ugettext as _ from liboozie.oozie_api import get_oozie from jobbrowser.apis.base_api import Api, MockDjangoRequest from jobbrowser.apis.workflow_api import _manage_oozie_job, _filter_oozie_jobs from jobbrowser.apis.schedule_api import MockGet from oozie.views.dashboard import list_oozie_bundle LOG = logging.getLogger(__name__) try: from oozie.conf import OOZIE_JOBS_COUNT from oozie.views.dashboard import get_oozie_job_log, massaged_oozie_jobs_for_json except Exception, e: LOG.exception('Some application are not enabled: %s' % e) class BundleApi(Api): def apps(self, filters): oozie_api = get_oozie(self.user) kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []} _filter_oozie_jobs(self.user, filters, kwargs) jobs = oozie_api.get_bundles(**kwargs) return { 'apps':[{ 'id': app['id'], 'name': app['appName'], 'status': app['status'], 'apiStatus': self._api_status(app['status']), 'type': 'bundle', 'user': app['user'], 'progress': app['progress'], 'queue': app['group'], 'duration': app['durationInMillis'], 'submitted': app['kickoffTimeInMillis'] * 1000 } for app in massaged_oozie_jobs_for_json(jobs.jobs, self.user)['jobs']], 'total': jobs.total } def app(self, appid): request = MockDjangoRequest(self.user, get=MockGet()) response = list_oozie_bundle(request, job_id=appid) bundle = json.loads(response.content) common = { 'id': bundle['id'], 'name': bundle['name'], 'status': bundle['status'], 'apiStatus': self._api_status(bundle['status']), 'progress': bundle['progress'], 'type': 'bundle', 'user': bundle['user'], 'submitted': bundle['submitted'], 'properties': {} } common['properties']['actions'] = bundle['actions'] common['properties']['xml'] = '' common['properties']['properties'] = '' common['doc_url'] = bundle.get('doc_url') return common def action(self, app_ids, action): return _manage_oozie_job(self.user, action, app_ids) def logs(self, appid, app_type, log_name=None): request = MockDjangoRequest(self.user) data = get_oozie_job_log(request, job_id=appid) return {'logs': json.loads(data.content)['log']} def profile(self, appid, app_type, app_property, app_filters): if app_property == 'xml': oozie_api = get_oozie(self.user) workflow = oozie_api.get_bundle(jobid=appid) return { 'xml': workflow.definition, } elif app_property == 'properties': oozie_api = get_oozie(self.user) workflow = oozie_api.get_bundle(jobid=appid) return { 'properties': workflow.conf_dict, } def _api_status(self, status): if status in ['PREP', 'RUNNING', 'RUNNINGWITHERROR']: return 'RUNNING' elif status in ['PREPSUSPENDED', 'SUSPENDED', 'SUSPENDEDWITHERROR', 'PREPPAUSED', 'PAUSED', 'PAUSEDWITHERROR']: return 'PAUSED' elif status == 'SUCCEEDED': return 'SUCCEEDED' else: return 'FAILED' # DONEWITHERROR, KILLED, FAILED
nilq/baby-python
python
from raybot import config from raybot.model import db, POI, Location from raybot.bot import bot from raybot.util import h, get_user, get_map, pack_ids, uncap, tr import csv import re import os import random import logging from typing import List, Tuple from datetime import datetime from aiogram import types from aiogram.utils.callback_data import CallbackData from aiogram.dispatcher.filters.state import State, StatesGroup HTML = types.ParseMode.HTML POI_LIST_CB = CallbackData('poi', 'id') POI_LOCATION_CB = CallbackData('poiloc', 'id') POI_SIMILAR_CB = CallbackData('similar', 'id') POI_EDIT_CB = CallbackData('poiedit', 'id', 'd') POI_FULL_CB = CallbackData('plst', 'query', 'ids') POI_HOUSE_CB = CallbackData('poih', 'house', 'floor') POI_STAR_CB = CallbackData('poistar', 'id', 'action') REVIEW_HOUSE_CB = CallbackData('hreview', 'house') class PoiState(StatesGroup): poi = State() poi_list = State() def star_sort(star: Tuple[int, bool]): """First sort by has user's, second by stars.""" if not star: return 0, 0 if star[0] < 2: grade = 0 elif star[0] < 5: grade = 1 elif star[0] < 10: grade = 2 elif star[0] < 20: grade = 3 elif star[0] < 50: grade = 4 else: grade = 5 return 1 if star[1] else 0, grade async def print_poi_list(user: types.User, query: str, pois: List[POI], full: bool = False, shuffle: bool = True, relative_to: Location = None, comment: str = None): max_buttons = 9 if not full else 20 location = (await get_user(user)).location or relative_to if shuffle: if location: pois.sort(key=lambda p: location.distance(p.location)) else: random.shuffle(pois) stars = await db.stars_for_poi_list(user.id, [p.id for p in pois]) if stars: pois.sort(key=lambda p: star_sort(stars.get(p.id)), reverse=True) pois.sort(key=lambda p: bool(p.hours) and not p.hours.is_open()) total_count = len(pois) all_ids = pack_ids([p.id for p in pois]) if total_count > max_buttons: pois = pois[:max_buttons if full else max_buttons - 1] # Build the message content = tr('poi_list', query) + '\n' for i, poi in enumerate(pois, 1): if poi.description: content += h(f'\n{i}. {poi.name} — {uncap(poi.description)}') else: content += h(f'\n{i}. {poi.name}') if poi.hours and not poi.hours.is_open(): content += ' 🌒' if total_count > max_buttons: if not full: content += '\n\n' + tr('poi_not_full', total_count=total_count) else: content += '\n\n' + tr('poi_too_many', total_count=total_count) if comment: content += '\n\n' + comment # Prepare the inline keyboard if len(pois) == 4: kbd_width = 2 else: kbd_width = 4 if len(pois) > 9 else 3 kbd = types.InlineKeyboardMarkup(row_width=kbd_width) for i, poi in enumerate(pois, 1): b_title = f'{i} {poi.name}' kbd.insert(types.InlineKeyboardButton( b_title, callback_data=POI_LIST_CB.new(id=poi.id))) if total_count > max_buttons and not full: try: callback_data = POI_FULL_CB.new(query=query[:55], ids=all_ids) except ValueError: # Too long callback_data = POI_FULL_CB.new(query=query[:55], ids='-') kbd.insert(types.InlineKeyboardButton( f'🔽 {config.MSG["all"]} {total_count}', callback_data=callback_data)) # Make a map and send the message map_file = get_map([poi.location for poi in pois], ref=location) if not map_file: await bot.send_message(user.id, content, parse_mode=HTML, reply_markup=kbd) else: await bot.send_photo( user.id, types.InputFile(map_file.name), caption=content, parse_mode=HTML, reply_markup=kbd) map_file.close() def relative_day(next_day): days = (next_day.date() - datetime.now().date()).days if days < 1: opens_day = '' elif days == 1: opens_day = tr('tomorrow') else: opens_day = tr('relative_days')[next_day.weekday()] return opens_day def describe_poi(poi: POI): deleted = '' if not poi.delete_reason else ' 🗑️' result = [f'<b>{h(poi.name)}</b>{deleted}'] if poi.description: result.append(h(poi.description)) part2 = [] if poi.hours: if poi.hours.is_24_7: part2.append('🌞 ' + tr('open_247')) elif poi.hours.is_open(): closes = poi.hours.next_change() open_now = '☀️ ' + tr('now_open', closes.strftime("%H:%M")) if (closes - datetime.now()).seconds <= 3600 * 2: opens = poi.hours.next_change(closes) open_now += ' ' + tr('next_open', day=relative_day(opens).capitalize(), hour=opens.strftime("%H:%M").lstrip("0")) part2.append(open_now) else: opens = poi.hours.next_change() part2.append('🌒 ' + tr('now_closed', day=relative_day(opens), hour=opens.strftime("%H:%M").lstrip("0"))) if poi.links and len(poi.links) > 1: part2.append('🌐 ' + tr('poi_links') + ': {}.'.format(', '.join( ['<a href="{}">{}</a>'.format(h(link[1]), h(link[0])) for link in poi.links] ))) if poi.house_name or poi.address_part: address = ', '.join( [s for s in (poi.house_name, uncap(poi.floor), uncap(poi.address_part)) if s]) part2.append(f'🏠 {address}.') if poi.has_wifi is True: part2.append('📶 ' + tr('has_wifi')) if poi.accepts_cards is True: part2.append('💳 ' + tr('accepts_cards')) elif poi.accepts_cards is False: part2.append('💰 ' + tr('no_cards')) if poi.phones: part2.append('📞 {}.'.format(', '.join( [re.sub(r'[^0-9+]', '', phone) for phone in poi.phones] ))) if part2: result.append('') result.extend(part2) if poi.comment: result.append('') result.append(poi.comment) return '\n'.join(result) async def make_poi_keyboard(user: types.User, poi: POI): buttons = [] stars, given_star = await db.count_stars(user.id, poi.id) if not given_star: star_button = '☆ ' + tr('star') else: star_button = '⭐ ' + tr('starred') buttons.append(types.InlineKeyboardButton( star_button, callback_data=POI_STAR_CB.new( id=poi.id, action='del' if given_star else 'set') )) buttons.append(types.InlineKeyboardButton( '📍 ' + tr('loc_btn'), callback_data=POI_LOCATION_CB.new(id=poi.id))) buttons.append(types.InlineKeyboardButton( '📝 ' + tr('edit_poi'), callback_data=POI_EDIT_CB.new(id=poi.id, d='0'))) if poi.links: link_dict = dict(poi.links) if tr('default_link') in link_dict: link_title = tr('open_link') link = link_dict[tr('default_link')] else: link_title = poi.links[0][0] link = poi.links[0][1] buttons.append(types.InlineKeyboardButton('🌐 ' + link_title, url=link)) if poi.tag and poi.tag not in ('building', 'entrance'): emoji = config.TAGS['emoji'].get(poi.tag, config.TAGS['emoji']['default']) buttons.append(types.InlineKeyboardButton( emoji + ' ' + tr('similar'), callback_data=POI_SIMILAR_CB.new(id=poi.id) )) kbd = types.InlineKeyboardMarkup(row_width=2 if len(buttons) < 5 else 3) kbd.add(*buttons) return kbd async def make_house_keyboard(user: types.User, poi: POI): if not poi.key: return None pois = await db.get_poi_by_house(poi.key) if not pois: return None kbd = types.InlineKeyboardMarkup().add( types.InlineKeyboardButton( tr('poi_in_house'), callback_data=POI_HOUSE_CB.new(house=poi.key, floor='-')) ) info = await get_user(user) if info.is_moderator(): # Suggest reviewing kbd.insert( types.InlineKeyboardButton( tr(('review', 'start')), callback_data=REVIEW_HOUSE_CB.new(house=poi.key)) ) return kbd def log_poi(poi: POI): row = [datetime.now().strftime('%Y-%m-%d'), poi.id, poi.name] try: with open(os.path.join(config.LOGS, 'poi.log'), 'a') as f: w = csv.writer(f, delimiter='\t') w.writerow(row) except IOError: logging.warning('Failed to write log line: %s', row) async def print_poi(user: types.User, poi: POI, comment: str = None, buttons: bool = True): log_poi(poi) chat_id = user.id content = describe_poi(poi) if comment: content += '\n\n' + h(comment) # Prepare photos photos = [] photo_names = [] for photo in [poi.photo_in, poi.photo_out]: if photo: path = os.path.join(config.PHOTOS, photo + '.jpg') if os.path.exists(path): file_ids = await db.find_file_ids({photo: os.path.getsize(path)}) if photo in file_ids: photos.append(file_ids[photo]) photo_names.append(None) else: photos.append(types.InputFile(path)) photo_names.append([photo, os.path.getsize(path)]) # Generate a map location = (await get_user(user)).location map_file = get_map([poi.location], location) if map_file: photos.append(types.InputFile(map_file.name)) photo_names.append(None) # Prepare the inline keyboard if poi.tag == 'building': kbd = await make_house_keyboard(user, poi) else: kbd = None if not buttons else await make_poi_keyboard(user, poi) # Send the message if not photos: msg = await bot.send_message(chat_id, content, parse_mode=HTML, reply_markup=kbd, disable_web_page_preview=True) elif len(photos) == 1: msg = await bot.send_photo(chat_id, photos[0], caption=content, parse_mode=HTML, reply_markup=kbd) else: media = types.MediaGroup() for i, photo in enumerate(photos): if not kbd and i == 0: photo = types.input_media.InputMediaPhoto( photo, caption=content, parse_mode=HTML) media.attach_photo(photo) if kbd: msg = await bot.send_media_group(chat_id, media=media) await bot.send_message(chat_id, content, parse_mode=HTML, reply_markup=kbd, disable_web_page_preview=True) else: msg = await bot.send_media_group(chat_id, media=media) if map_file: map_file.close() # Store file_ids for new photos if isinstance(msg, list): file_ids = [m.photo[-1].file_id for m in msg if m.photo] else: file_ids = [msg.photo[-1]] if msg.photo else [] for i, file_id in enumerate(file_ids): if photo_names[i]: await db.store_file_id(photo_names[i][0], photo_names[i][1], file_id) async def print_poi_by_key(user: types.User, poi_id: str, comment: str = None, buttons: bool = True): poi = await db.get_poi_by_key(poi_id) if not poi: await bot.send_message(user.id, f'Cannot find POI with id {poi_id}') else: await print_poi(user, poi, comment=comment, buttons=buttons)
nilq/baby-python
python
import numpy as np import math from keras.initializers import RandomUniform from keras.models import model_from_json from keras.models import Sequential from keras.layers import Dense, Flatten, Input, Lambda, Activation from keras.layers.merge import concatenate from keras.models import Sequential, Model from keras.optimizers import Adam import keras.backend as K import tensorflow as tf import os HIDDEN1_UNITS = 100 HIDDEN2_UNITS = 100 class CriticNetwork(object): def __init__(self, sess, state_size, action_size, gamma, tau, learning_rate): self.sess = sess self.tau = tau self.gamma = gamma self.s_dim = state_size self.a_dim = action_size self.learning_rate = learning_rate self.action_size = action_size self.stat_ops = [] self.stat_names = [] K.set_session(sess) # Now create the model self.model, self.action, self.state = self.create_critic_network(state_size, action_size) self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size) self.out = self.model.output self.action_grads = tf.gradients(self.out, self.action) # GRADIENTS for policy update # Setting up stats self.stat_ops += [tf.reduce_mean(self.out)] self.stat_names += ['Mean Q values'] self.stat_ops += [tf.reduce_mean(self.action_grads)] self.stat_names += ['reference_action_grads'] #TODO: fix by using a local initalizer self.sess.run(tf.global_variables_initializer()) def gradients(self, states, actions): return self.sess.run(self.action_grads, feed_dict={ self.state: states, self.action: actions })[0] def predict_target(self, states, actions): # TODO: clipping target critic values to [-10, 100] (max possible values) return self.target_model.predict_on_batch([states, actions]) def train(self, states, actions, targets): return self.model.train_on_batch([states, actions], targets) def target_train(self): critic_weights = self.model.get_weights() critic_target_weights = self.target_model.get_weights() for i in range(len(critic_weights)): critic_target_weights[i] = self.tau * critic_weights[i] + (1 - self.tau) * critic_target_weights[i] self.target_model.set_weights(critic_target_weights) def create_critic_network(self, state_size, action_dim): S = Input(shape=[state_size]) A = Input(shape=[action_dim], name='action2') w = Dense(400, activation="relu", kernel_initializer="he_uniform")(S) h = concatenate([w, A]) h3 = Dense(300, activation="relu", kernel_initializer="he_uniform")(h) V = Dense(1, activation='linear', kernel_initializer=RandomUniform(minval=-3e-3, maxval=3e-3, seed=None))(h3) model = Model(inputs=[S, A], outputs=V) adam = Adam(lr=self.learning_rate) #TODO add clipping gradients/huber loss possibility model.compile(loss='mse', optimizer=adam) return model, A, S def get_stats(self, stats_sample): critic_values = self.sess.run(self.stat_ops, feed_dict={ self.state: stats_sample['state0'], self.action: stats_sample['action'], }) names = self.stat_names[:] assert len(names) == len(critic_values) stats = dict(zip(names, critic_values)) # critic_with_actor_values = self.sess.run(self.stats_ops, feed_dict={ # self.inputs: stats_sample[0], # self.action: stats_sample['action'], # }) # # for name, val in zip(names, critic_with_actor_values): # stats[name+'_actor'] = val return stats def save_weights(self, filepath, overwrite=False): print("Saving weights") self.model.save_weights(filepath, overwrite=overwrite) def load_weights(self, filepath): self.model.load_weights(filepath) def save_target_weights(self, filepath, overwrite=False): print("Saving weights") self.target_model.save_weights(filepath, overwrite=overwrite) def load_target_weights(self, filepath): self.target_model.load_weights(filepath) def hard_target_update(self): self.target_model.set_weights(self.model.get_weights())
nilq/baby-python
python
# ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** from fabric.api import * from fabric.contrib.files import exists import logging import os import random import sys import string import json, uuid, time, datetime, csv from dlab.meta_lib import * from dlab.actions_lib import * import dlab.actions_lib import re import traceback def ensure_pip(requisites): try: if not exists('/home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user'])): sudo('echo PATH=$PATH:/usr/local/bin/:/opt/spark/bin/ >> /etc/profile') sudo('echo export PATH >> /etc/profile') sudo('pip install -UI pip=={} --no-cache-dir'.format(os.environ['conf_pip_version'])) sudo('pip install -U {} --no-cache-dir'.format(requisites)) sudo('touch /home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user'])) except: sys.exit(1) def dataengine_dir_prepare(cluster_dir): local('mkdir -p ' + cluster_dir) def install_pip_pkg(requisites, pip_version, lib_group): status = list() error_parser = "Could not|No matching|ImportError:|failed|EnvironmentError:" try: if pip_version == 'pip3' and not exists('/bin/pip3'): sudo('ln -s /bin/pip3.5 /bin/pip3') sudo('{} install -U pip=={} setuptools'.format(pip_version, os.environ['conf_pip_version'])) sudo('{} install -U pip=={} --no-cache-dir'.format(pip_version, os.environ['conf_pip_version'])) sudo('{} install --upgrade pip=={}'.format(pip_version, os.environ['conf_pip_version'])) for pip_pkg in requisites: sudo('{0} install {1} --no-cache-dir 2>&1 | if ! grep -w -i -E "({2})" > /tmp/{0}install_{1}.log; then echo "" > /tmp/{0}install_{1}.log;fi'.format(pip_version, pip_pkg, error_parser)) err = sudo('cat /tmp/{0}install_{1}.log'.format(pip_version, pip_pkg)).replace('"', "'") sudo('{0} freeze | if ! grep -w -i {1} > /tmp/{0}install_{1}.list; then echo "" > /tmp/{0}install_{1}.list;fi'.format(pip_version, pip_pkg)) res = sudo('cat /tmp/{0}install_{1}.list'.format(pip_version, pip_pkg)) changed_pip_pkg = False if res == '': changed_pip_pkg = pip_pkg.replace("_", "-").split('-') changed_pip_pkg = changed_pip_pkg[0] sudo( '{0} freeze | if ! grep -w -i {1} > /tmp/{0}install_{1}.list; then echo "" > /tmp/{0}install_{1}.list;fi'.format( pip_version, changed_pip_pkg)) res = sudo( 'cat /tmp/{0}install_{1}.list'.format(pip_version, changed_pip_pkg)) if res: res = res.lower() ansi_escape = re.compile(r'\x1b[^m]*m') ver = ansi_escape.sub('', res).split("\r\n") if changed_pip_pkg: version = [i for i in ver if changed_pip_pkg.lower() in i][0].split('==')[1] else: version = \ [i for i in ver if pip_pkg.lower() in i][0].split( '==')[1] status.append({"group": "{}".format(lib_group), "name": pip_pkg, "version": version, "status": "installed"}) else: status.append({"group": "{}".format(lib_group), "name": pip_pkg, "status": "failed", "error_message": err}) return status except Exception as err: append_result("Failed to install {} packages".format(pip_version), str(err)) print("Failed to install {} packages".format(pip_version)) sys.exit(1) def id_generator(size=10, chars=string.digits + string.ascii_letters): return ''.join(random.choice(chars) for _ in range(size)) def ensure_dataengine_tensorflow_jars(jars_dir): local('wget https://dl.bintray.com/spark-packages/maven/tapanalyticstoolkit/spark-tensorflow-connector/1.0.0-s_2.11/spark-tensorflow-connector-1.0.0-s_2.11.jar \ -O {}spark-tensorflow-connector-1.0.0-s_2.11.jar'.format(jars_dir)) def prepare(dataengine_service_dir, yarn_dir): local('mkdir -p ' + dataengine_service_dir) local('mkdir -p ' + yarn_dir) local('sudo mkdir -p /opt/python/') result = os.path.exists(dataengine_service_dir + 'usr/') return result def configuring_notebook(dataengine_service_version): jars_path = '/opt/' + dataengine_service_version + '/jars/' local("""sudo bash -c "find """ + jars_path + """ -name '*netty*' | xargs rm -f" """) def append_result(error, exception=''): ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') with open('/root/result.json', 'a+') as f: text = f.read() if len(text) == 0: res = '{"error": ""}' with open('/root/result.json', 'w') as f: f.write(res) with open("/root/result.json") as f: data = json.load(f) if exception: data['error'] = data['error'] + " [Error-" + st + "]:" + error + " Exception: " + str(exception) else: data['error'] = data['error'] + " [Error-" + st + "]:" + error with open("/root/result.json", 'w') as f: json.dump(data, f) print(data) def put_resource_status(resource, status, dlab_path, os_user, hostname): env['connection_attempts'] = 100 keyfile = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem" env.key_filename = [keyfile] env.host_string = os_user + '@' + hostname sudo('python ' + dlab_path + 'tmp/resource_status.py --resource {} --status {}'.format(resource, status)) def configure_jupyter(os_user, jupyter_conf_file, templates_dir, jupyter_version, exploratory_name): if not exists('/home/' + os_user + '/.ensure_dir/jupyter_ensured'): try: sudo('pip2 install notebook=={} --no-cache-dir'.format(jupyter_version)) sudo('pip2 install jupyter --no-cache-dir') sudo('pip3.5 install notebook=={} --no-cache-dir'.format(jupyter_version)) sudo('pip3.5 install jupyter --no-cache-dir') sudo('rm -rf {}'.format(jupyter_conf_file)) run('jupyter notebook --generate-config --config {}'.format(jupyter_conf_file)) with cd('/home/{}'.format(os_user)): run('mkdir -p ~/.jupyter/custom/') run('echo "#notebook-container { width: auto; }" > ~/.jupyter/custom/custom.css') sudo('echo "c.NotebookApp.ip = \'0.0.0.0\'" >> {}'.format(jupyter_conf_file)) sudo('echo "c.NotebookApp.base_url = \'/{0}/\'" >> {1}'.format(exploratory_name, jupyter_conf_file)) sudo('echo c.NotebookApp.open_browser = False >> {}'.format(jupyter_conf_file)) sudo('echo \'c.NotebookApp.cookie_secret = b"{0}"\' >> {1}'.format(id_generator(), jupyter_conf_file)) sudo('''echo "c.NotebookApp.token = u''" >> {}'''.format(jupyter_conf_file)) sudo('echo \'c.KernelSpecManager.ensure_native_kernel = False\' >> {}'.format(jupyter_conf_file)) put(templates_dir + 'jupyter-notebook.service', '/tmp/jupyter-notebook.service') sudo("chmod 644 /tmp/jupyter-notebook.service") if os.environ['application'] == 'tensor': sudo("sed -i '/ExecStart/s|-c \"|-c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; |g' /tmp/jupyter-notebook.service") elif os.environ['application'] == 'deeplearning': sudo("sed -i '/ExecStart/s|-c \"|-c \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:" "/usr/local/cuda/lib64:/usr/lib64/openmpi/lib: ; export PYTHONPATH=/home/" + os_user + "/caffe/python:/home/" + os_user + "/pytorch/build:$PYTHONPATH ; |g' /tmp/jupyter-notebook.service") sudo("sed -i 's|CONF_PATH|{}|' /tmp/jupyter-notebook.service".format(jupyter_conf_file)) sudo("sed -i 's|OS_USR|{}|' /tmp/jupyter-notebook.service".format(os_user)) sudo('\cp /tmp/jupyter-notebook.service /etc/systemd/system/jupyter-notebook.service') sudo('chown -R {0}:{0} /home/{0}/.local'.format(os_user)) sudo('mkdir -p /mnt/var') sudo('chown {0}:{0} /mnt/var'.format(os_user)) if os.environ['application'] == 'jupyter': sudo('jupyter-kernelspec remove -f python2 || echo "Such kernel doesnt exists"') sudo('jupyter-kernelspec remove -f python3 || echo "Such kernel doesnt exists"') sudo("systemctl daemon-reload") sudo("systemctl enable jupyter-notebook") sudo("systemctl start jupyter-notebook") sudo('touch /home/{}/.ensure_dir/jupyter_ensured'.format(os_user)) except: sys.exit(1) else: try: sudo( 'sed -i "s/c.NotebookApp.base_url =.*/c.NotebookApp.base_url = \'\/{0}\/\'/" {1}'.format(exploratory_name, jupyter_conf_file)) sudo("systemctl restart jupyter-notebook") except Exception as err: print('Error:', str(err)) sys.exit(1) def ensure_pyspark_local_kernel(os_user, pyspark_local_path_dir, templates_dir, spark_version): if not exists('/home/' + os_user + '/.ensure_dir/pyspark_local_kernel_ensured'): try: sudo('mkdir -p ' + pyspark_local_path_dir) sudo('touch ' + pyspark_local_path_dir + 'kernel.json') put(templates_dir + 'pyspark_local_template.json', '/tmp/pyspark_local_template.json') sudo( "PYJ=`find /opt/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; sed -i 's|PY4J|'$PYJ'|g' /tmp/pyspark_local_template.json") sudo('sed -i "s|SP_VER|' + spark_version + '|g" /tmp/pyspark_local_template.json') sudo('sed -i \'/PYTHONPATH\"\:/s|\(.*\)"|\\1/home/{0}/caffe/python:/home/{0}/pytorch/build:"|\' /tmp/pyspark_local_template.json'.format(os_user)) sudo('\cp /tmp/pyspark_local_template.json ' + pyspark_local_path_dir + 'kernel.json') sudo('touch /home/' + os_user + '/.ensure_dir/pyspark_local_kernel_ensured') except: sys.exit(1) def ensure_py3spark_local_kernel(os_user, py3spark_local_path_dir, templates_dir, spark_version): if not exists('/home/' + os_user + '/.ensure_dir/py3spark_local_kernel_ensured'): try: sudo('mkdir -p ' + py3spark_local_path_dir) sudo('touch ' + py3spark_local_path_dir + 'kernel.json') put(templates_dir + 'py3spark_local_template.json', '/tmp/py3spark_local_template.json') sudo( "PYJ=`find /opt/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; sed -i 's|PY4J|'$PYJ'|g' /tmp/py3spark_local_template.json") sudo('sed -i "s|SP_VER|' + spark_version + '|g" /tmp/py3spark_local_template.json') sudo('sed -i \'/PYTHONPATH\"\:/s|\(.*\)"|\\1/home/{0}/caffe/python:/home/{0}/pytorch/build:"|\' /tmp/py3spark_local_template.json'.format(os_user)) sudo('\cp /tmp/py3spark_local_template.json ' + py3spark_local_path_dir + 'kernel.json') sudo('touch /home/' + os_user + '/.ensure_dir/py3spark_local_kernel_ensured') except: sys.exit(1) def pyspark_kernel(kernels_dir, dataengine_service_version, cluster_name, spark_version, bucket, user_name, region, os_user='', application='', pip_mirror='', numpy_version='1.14.3'): spark_path = '/opt/{0}/{1}/spark/'.format(dataengine_service_version, cluster_name) local('mkdir -p {0}pyspark_{1}/'.format(kernels_dir, cluster_name)) kernel_path = '{0}pyspark_{1}/kernel.json'.format(kernels_dir, cluster_name) template_file = "/tmp/pyspark_dataengine-service_template.json" with open(template_file, 'r') as f: text = f.read() text = text.replace('CLUSTER_NAME', cluster_name) text = text.replace('SPARK_VERSION', 'Spark-' + spark_version) text = text.replace('SPARK_PATH', spark_path) text = text.replace('PYTHON_SHORT_VERSION', '2.7') text = text.replace('PYTHON_FULL_VERSION', '2.7') text = text.replace('PYTHON_PATH', '/usr/bin/python2.7') text = text.replace('DATAENGINE-SERVICE_VERSION', dataengine_service_version) with open(kernel_path, 'w') as f: f.write(text) local('touch /tmp/kernel_var.json') local("PYJ=`find /opt/{0}/{1}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {2} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{3}/caffe/python:/home/{3}/pytorch/build:\"|\' > /tmp/kernel_var.json". format(dataengine_service_version, cluster_name, kernel_path, os_user)) local('sudo mv /tmp/kernel_var.json ' + kernel_path) get_cluster_python_version(region, bucket, user_name, cluster_name) with file('/tmp/python_version') as f: python_version = f.read() if python_version != '\n': installing_python(region, bucket, user_name, cluster_name, application, pip_mirror, numpy_version) local('mkdir -p {0}py3spark_{1}/'.format(kernels_dir, cluster_name)) kernel_path = '{0}py3spark_{1}/kernel.json'.format(kernels_dir, cluster_name) template_file = "/tmp/pyspark_dataengine-service_template.json" with open(template_file, 'r') as f: text = f.read() text = text.replace('CLUSTER_NAME', cluster_name) text = text.replace('SPARK_VERSION', 'Spark-' + spark_version) text = text.replace('SPARK_PATH', spark_path) text = text.replace('PYTHON_SHORT_VERSION', python_version[0:3]) text = text.replace('PYTHON_FULL_VERSION', python_version[0:3]) text = text.replace('PYTHON_PATH', '/opt/python/python' + python_version[:5] + '/bin/python' + python_version[:3]) text = text.replace('DATAENGINE-SERVICE_VERSION', dataengine_service_version) with open(kernel_path, 'w') as f: f.write(text) local('touch /tmp/kernel_var.json') local("PYJ=`find /opt/{0}/{1}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {2} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{3}/caffe/python:/home/{3}/pytorch/build:\"|\' > /tmp/kernel_var.json" .format(dataengine_service_version, cluster_name, kernel_path, os_user)) local('sudo mv /tmp/kernel_var.json {}'.format(kernel_path)) def ensure_ciphers(): try: sudo('echo -e "\nKexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256" >> /etc/ssh/sshd_config') sudo('echo -e "Ciphers aes256-gcm@openssh.com,aes128-gcm@openssh.com,chacha20-poly1305@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" >> /etc/ssh/sshd_config') sudo('echo -e "\tKexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256" >> /etc/ssh/ssh_config') sudo('echo -e "\tCiphers aes256-gcm@openssh.com,aes128-gcm@openssh.com,chacha20-poly1305@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" >> /etc/ssh/ssh_config') try: sudo('service ssh reload') except: sudo('service sshd reload') except Exception as err: traceback.print_exc() print('Failed to ensure ciphers: ', str(err)) sys.exit(1) def install_r_pkg(requisites): status = list() error_parser = "ERROR:|error:|Cannot|failed|Please run|requires" try: for r_pkg in requisites: if r_pkg == 'sparklyr': run('sudo -i R -e \'install.packages("{0}", repos="http://cran.us.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E "({1})" /tmp/tee.tmp > /tmp/install_{0}.log; then echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser)) sudo('R -e \'install.packages("{0}", repos="http://cran.us.r-project.org", dep=TRUE)\' 2>&1 | tee /tmp/tee.tmp; if ! grep -w -E "({1})" /tmp/tee.tmp > /tmp/install_{0}.log; then echo "" > /tmp/install_{0}.log;fi'.format(r_pkg, error_parser)) err = sudo('cat /tmp/install_{0}.log'.format(r_pkg)).replace('"', "'") sudo('R -e \'installed.packages()[,c(3:4)]\' | if ! grep -w {0} > /tmp/install_{0}.list; then echo "" > /tmp/install_{0}.list;fi'.format(r_pkg)) res = sudo('cat /tmp/install_{0}.list'.format(r_pkg)) if res: ansi_escape = re.compile(r'\x1b[^m]*m') version = ansi_escape.sub('', res).split("\r\n")[0].split('"')[1] status.append({"group": "r_pkg", "name": r_pkg, "version": version, "status": "installed"}) else: status.append({"group": "r_pkg", "name": r_pkg, "status": "failed", "error_message": err}) return status except: return "Fail to install R packages" def update_spark_jars(jars_dir='/opt/jars'): try: configs = sudo('find /opt/ /etc/ /usr/lib/ -name spark-defaults.conf -type f').split('\r\n') if exists(jars_dir): for conf in filter(None, configs): des_path = '' all_jars = sudo('find {0} -name "*.jar"'.format(jars_dir)).split('\r\n') if ('-des-' in conf): des_path = '/'.join(conf.split('/')[:3]) all_jars = find_des_jars(all_jars, des_path) sudo('''sed -i '/^# Generated\|^spark.jars/d' {0}'''.format(conf)) sudo('echo "# Generated spark.jars by DLab from {0}\nspark.jars {1}" >> {2}' .format(','.join(filter(None, [jars_dir, des_path])), ','.join(all_jars), conf)) # sudo("sed -i 's/^[[:space:]]*//' {0}".format(conf)) else: print("Can't find directory {0} with jar files".format(jars_dir)) except Exception as err: append_result("Failed to update spark.jars parameter", str(err)) print("Failed to update spark.jars parameter") sys.exit(1) def install_java_pkg(requisites): status = list() error_parser = "ERROR|error|No such|no such|Please run|requires|module not found" templates_dir = '/root/templates/' ivy_dir = '/opt/ivy' ivy_cache_dir = '{0}/cache/'.format(ivy_dir) ivy_settings = 'ivysettings.xml' dest_dir = '/opt/jars/java' try: ivy_jar = sudo('find /opt /usr -name "*ivy-{0}.jar" | head -n 1'.format(os.environ['notebook_ivy_version'])) sudo('mkdir -p {0} {1}'.format(ivy_dir, dest_dir)) put('{0}{1}'.format(templates_dir, ivy_settings), '{0}/{1}'.format(ivy_dir, ivy_settings), use_sudo=True) proxy_string = sudo('cat /etc/profile | grep http_proxy | cut -f2 -d"="') proxy_re = '(?P<proto>http.*)://(?P<host>[^:/ ]+):(?P<port>[0-9]*)' proxy_find = re.search(proxy_re, proxy_string) java_proxy = "export _JAVA_OPTIONS='-Dhttp.proxyHost={0} -Dhttp.proxyPort={1} \ -Dhttps.proxyHost={0} -Dhttps.proxyPort={1}'".format(proxy_find.group('host'), proxy_find.group('port')) for java_pkg in requisites: sudo('rm -rf {0}'.format(ivy_cache_dir)) sudo('mkdir -p {0}'.format(ivy_cache_dir)) group, artifact, version, override = java_pkg print("Installing package (override: {3}): {0}:{1}:{2}".format(group, artifact, version, override)) sudo('{8}; java -jar {0} -settings {1}/{2} -cache {3} -dependency {4} {5} {6} 2>&1 | tee /tmp/tee.tmp; \ if ! grep -w -E "({7})" /tmp/tee.tmp > /tmp/install_{5}.log; then echo "" > /tmp/install_{5}.log;fi' .format(ivy_jar, ivy_dir, ivy_settings, ivy_cache_dir, group, artifact, version, error_parser, java_proxy)) err = sudo('cat /tmp/install_{0}.log'.format(artifact)).replace('"', "'").strip() sudo('find {0} -name "{1}*.jar" | head -n 1 | rev | cut -f1 -d "/" | rev | \ if ! grep -w -i {1} > /tmp/install_{1}.list; then echo "" > /tmp/install_{1}.list;fi'.format(ivy_cache_dir, artifact)) res = sudo('cat /tmp/install_{0}.list'.format(artifact)) if res: sudo('cp -f $(find {0} -name "*.jar" | xargs) {1}'.format(ivy_cache_dir, dest_dir)) status.append({"group": "java", "name": "{0}:{1}".format(group, artifact), "version": version, "status": "installed"}) else: status.append({"group": "java", "name": "{0}:{1}".format(group, artifact), "status": "failed", "error_message": err}) update_spark_jars() return status except Exception as err: append_result("Failed to install {} packages".format(requisites), str(err)) print("Failed to install {} packages".format(requisites)) sys.exit(1) def get_available_r_pkgs(): try: r_pkgs = dict() sudo('R -e \'write.table(available.packages(contriburl="http://cran.us.r-project.org/src/contrib"), file="/tmp/r.csv", row.names=F, col.names=F, sep=",")\'') get("/tmp/r.csv", "r.csv") with open('r.csv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: r_pkgs[row[0]] = row[1] return r_pkgs except: sys.exit(1) def ensure_toree_local_kernel(os_user, toree_link, scala_kernel_path, files_dir, scala_version, spark_version): if not exists('/home/' + os_user + '/.ensure_dir/toree_local_kernel_ensured'): try: sudo('pip install ' + toree_link + ' --no-cache-dir') sudo('ln -s /opt/spark/ /usr/local/spark') sudo('jupyter toree install') sudo('mv ' + scala_kernel_path + 'lib/* /tmp/') put(files_dir + 'toree-assembly-0.2.0.jar', '/tmp/toree-assembly-0.2.0.jar') sudo('mv /tmp/toree-assembly-0.2.0.jar ' + scala_kernel_path + 'lib/') sudo( 'sed -i "s|Apache Toree - Scala|Local Apache Toree - Scala (Scala-' + scala_version + ', Spark-' + spark_version + ')|g" ' + scala_kernel_path + 'kernel.json') sudo('touch /home/' + os_user + '/.ensure_dir/toree_local_kernel_ensured') except: sys.exit(1) def install_ungit(os_user, notebook_name): if not exists('/home/{}/.ensure_dir/ungit_ensured'.format(os_user)): try: sudo('npm -g install ungit@{}'.format(os.environ['notebook_ungit_version'])) put('/root/templates/ungit.service', '/tmp/ungit.service') sudo("sed -i 's|OS_USR|{}|' /tmp/ungit.service".format(os_user)) http_proxy = run('echo $http_proxy') sudo("sed -i 's|PROXY_HOST|{}|g' /tmp/ungit.service".format(http_proxy)) sudo("sed -i 's|NOTEBOOK_NAME|{}|' /tmp/ungit.service".format( notebook_name)) sudo("mv -f /tmp/ungit.service /etc/systemd/system/ungit.service") run('git config --global user.name "Example User"') run('git config --global user.email "example@example.com"') run('mkdir -p ~/.git/templates/hooks') put('/root/scripts/git_pre_commit.py', '~/.git/templates/hooks/pre-commit', mode=0755) run('git config --global init.templatedir ~/.git/templates') run('touch ~/.gitignore') run('git config --global core.excludesfile ~/.gitignore') run('echo ".ipynb_checkpoints/" >> ~/.gitignore') run('echo "spark-warehouse/" >> ~/.gitignore') run('echo "metastore_db/" >> ~/.gitignore') run('echo "derby.log" >> ~/.gitignore') sudo('systemctl daemon-reload') sudo('systemctl enable ungit.service') sudo('systemctl start ungit.service') sudo('touch /home/{}/.ensure_dir/ungit_ensured'.format(os_user)) except: sys.exit(1) else: try: sudo("sed -i 's|--rootPath=/.*-ungit|--rootPath=/{}-ungit|' /etc/systemd/system/ungit.service".format( notebook_name)) http_proxy = run('echo $http_proxy') sudo("sed -i 's|HTTPS_PROXY=.*3128|HTTPS_PROXY={}|g' /etc/systemd/system/ungit.service".format(http_proxy)) sudo("sed -i 's|HTTP_PROXY=.*3128|HTTP_PROXY={}|g' /etc/systemd/system/ungit.service".format(http_proxy)) sudo('systemctl daemon-reload') sudo('systemctl restart ungit.service') except: sys.exit(1) run('git config --global http.proxy $http_proxy') run('git config --global https.proxy $https_proxy') def set_git_proxy(os_user, hostname, keyfile, proxy_host): env['connection_attempts'] = 100 env.key_filename = [keyfile] env.host_string = os_user + '@' + hostname run('git config --global http.proxy {}'.format(proxy_host)) run('git config --global https.proxy {}'.format(proxy_host)) def set_mongo_parameters(client, mongo_parameters): for i in mongo_parameters: client.dlabdb.settings.insert_one({"_id": i, "value": mongo_parameters[i]}) def install_r_packages(os_user): if not exists('/home/' + os_user + '/.ensure_dir/r_packages_ensured'): sudo('R -e "install.packages(\'devtools\', repos = \'http://cran.us.r-project.org\')"') sudo('R -e "install.packages(\'knitr\', repos = \'http://cran.us.r-project.org\')"') sudo('R -e "install.packages(\'ggplot2\', repos = \'http://cran.us.r-project.org\')"') sudo('R -e "install.packages(c(\'devtools\',\'mplot\', \'googleVis\'), ' 'repos = \'http://cran.us.r-project.org\'); require(devtools); install_github(\'ramnathv/rCharts\')"') sudo('touch /home/' + os_user + '/.ensure_dir/r_packages_ensured') def add_breeze_library_local(os_user): if not exists('/home/' + os_user + '/.ensure_dir/breeze_local_ensured'): try: breeze_tmp_dir = '/tmp/breeze_tmp_local/' jars_dir = '/opt/jars/' sudo('mkdir -p {}'.format(breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar -O \ {2}breeze_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar -O \ {2}breeze-natives_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar -O \ {2}breeze-viz_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar -O \ {2}breeze-macros_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar -O \ {2}breeze-parent_{0}-{1}.jar'.format('2.11', '0.12', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar -O \ {1}jfreechart-{0}.jar'.format('1.0.19', breeze_tmp_dir)) sudo('wget http://central.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar -O \ {1}jcommon-{0}.jar'.format('1.0.24', breeze_tmp_dir)) sudo('wget --no-check-certificate https://brunelvis.org/jar/spark-kernel-brunel-all-{0}.jar -O \ {1}spark-kernel-brunel-all-{0}.jar'.format('2.3', breeze_tmp_dir)) sudo('mv {0}* {1}'.format(breeze_tmp_dir, jars_dir)) sudo('touch /home/' + os_user + '/.ensure_dir/breeze_local_ensured') except: sys.exit(1) def configure_data_engine_service_pip(hostname, os_user, keyfile): env['connection_attempts'] = 100 env.key_filename = [keyfile] env.host_string = os_user + '@' + hostname if not exists('/usr/bin/pip2'): sudo('ln -s /usr/bin/pip-2.7 /usr/bin/pip2') if not exists('/usr/bin/pip3') and sudo("python3.4 -V 2>/dev/null | awk '{print $2}'"): sudo('ln -s /usr/bin/pip-3.4 /usr/bin/pip3') elif not exists('/usr/bin/pip3') and sudo("python3.5 -V 2>/dev/null | awk '{print $2}'"): sudo('ln -s /usr/bin/pip-3.5 /usr/bin/pip3') sudo('echo "export PATH=$PATH:/usr/local/bin" >> /etc/profile') sudo('source /etc/profile') run('source /etc/profile') def remove_rstudio_dataengines_kernel(cluster_name, os_user): try: cluster_re = ['-{}"'.format(cluster_name), '-{}-'.format(cluster_name), '-{}/'.format(cluster_name)] get('/home/{}/.Rprofile'.format(os_user), 'Rprofile') data = open('Rprofile').read() conf = filter(None, data.split('\n')) # Filter config from any math of cluster_name in line, # separated by defined symbols to avoid partly matches conf = [i for i in conf if not any(x in i for x in cluster_re)] comment_all = lambda x: x if x.startswith('#master') else '#{}'.format(x) uncomment = lambda x: x[1:] if not x.startswith('#master') else x conf =[comment_all(i) for i in conf] conf =[uncomment(i) for i in conf] last_spark = max([conf.index(i) for i in conf if 'master=' in i] or [0]) active_cluster = conf[last_spark].split('"')[-2] if last_spark != 0 else None conf = conf[:last_spark] + [conf[l][1:] for l in range(last_spark, len(conf)) if conf[l].startswith("#")] \ + [conf[l] for l in range(last_spark, len(conf)) if not conf[l].startswith('#')] with open('.Rprofile', 'w') as f: for line in conf: f.write('{}\n'.format(line)) put('.Rprofile', '/home/{}/.Rprofile'.format(os_user)) get('/home/{}/.Renviron'.format(os_user), 'Renviron') data = open('Renviron').read() conf = filter(None, data.split('\n')) comment_all = lambda x: x if x.startswith('#') else '#{}'.format(x) conf = [comment_all(i) for i in conf] # Filter config from any math of cluster_name in line, # separated by defined symbols to avoid partly matches conf = [i for i in conf if not any(x in i for x in cluster_re)] if active_cluster: activate_cluster = lambda x: x[1:] if active_cluster in x else x conf = [activate_cluster(i) for i in conf] else: last_spark = max([conf.index(i) for i in conf if 'SPARK_HOME' in i]) conf = conf[:last_spark] + [conf[l][1:] for l in range(last_spark, len(conf)) if conf[l].startswith("#")] with open('.Renviron', 'w') as f: for line in conf: f.write('{}\n'.format(line)) put('.Renviron', '/home/{}/.Renviron'.format(os_user)) if len(conf) == 1: sudo('rm -f /home/{}/.ensure_dir/rstudio_dataengine_ensured'.format(os_user)) sudo('rm -f /home/{}/.ensure_dir/rstudio_dataengine-service_ensured'.format(os_user)) sudo('''R -e "source('/home/{}/.Rprofile')"'''.format(os_user)) except: sys.exit(1) def restart_zeppelin(creds=False, os_user='', hostname='', keyfile=''): if creds: env['connection_attempts'] = 100 env.key_filename = [keyfile] env.host_string = os_user + '@' + hostname sudo("systemctl daemon-reload") sudo("systemctl restart zeppelin-notebook") def get_spark_memory(creds=False, os_user='', hostname='', keyfile=''): if creds: with settings(host_string='{}@{}'.format(os_user, hostname)): mem = sudo('free -m | grep Mem | tr -s " " ":" | cut -f 2 -d ":"') instance_memory = int(mem) else: mem = sudo('free -m | grep Mem | tr -s " " ":" | cut -f 2 -d ":"') instance_memory = int(mem) try: if instance_memory > int(os.environ['dataengine_expl_instance_memory']): spark_memory = instance_memory - int(os.environ['dataengine_os_expl_memory']) else: spark_memory = instance_memory * int(os.environ['dataengine_os_memory']) / 100 return spark_memory except Exception as err: print('Error:', str(err)) return err def replace_multi_symbols(string, symbol, symbol_cut=False): try: symbol_amount = 0 for i in range(len(string)): if string[i] == symbol: symbol_amount = symbol_amount + 1 while symbol_amount > 1: string = string.replace(symbol + symbol, symbol) symbol_amount = symbol_amount - 1 if symbol_cut and string[-1] == symbol: string = string[:-1] return string except Exception as err: logging.info("Error with replacing multi symbols: " + str(err) + "\n Traceback: " + traceback.print_exc( file=sys.stdout)) append_result(str({"error": "Error with replacing multi symbols", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout) def update_pyopenssl_lib(os_user): if not exists('/home/{}/.ensure_dir/pyopenssl_updated'.format(os_user)): try: if exists('/usr/bin/pip3'): sudo('pip3 install -U pyopenssl') sudo('pip2 install -U pyopenssl') sudo('touch /home/{}/.ensure_dir/pyopenssl_updated'.format(os_user)) except: sys.exit(1) def find_cluster_kernels(): try: with settings(sudo_user='root'): de = [i for i in sudo('find /opt/ -maxdepth 1 -name "*-de-*" -type d | rev | ' 'cut -f 1 -d "/" | rev | xargs -r').split(' ') if i != ''] des = [i for i in sudo('find /opt/ -maxdepth 2 -name "*-des-*" -type d | rev | ' 'cut -f 1,2 -d "/" | rev | xargs -r').split(' ') if i != ''] return (de, des) except: sys.exit(1) def update_zeppelin_interpreters(multiple_clusters, r_enabled, interpreter_mode='remote'): try: interpreters_config = '/opt/zeppelin/conf/interpreter.json' local_interpreters_config = '/tmp/interpreter.json' if interpreter_mode != 'remote': get(local_interpreters_config, local_interpreters_config) if multiple_clusters == 'true': groups = [{"class": "org.apache.zeppelin.livy.LivySparkInterpreter", "name": "spark"}, {"class": "org.apache.zeppelin.livy.LivyPySparkInterpreter", "name": "pyspark"}, {"class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter", "name": "pyspark3"}, {"class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter", "name": "sql"}] if r_enabled: groups.append({"class": "org.apache.zeppelin.livy.LivySparkRInterpreter", "name": "sparkr"}) else: groups = [{"class": "org.apache.zeppelin.spark.SparkInterpreter","name": "spark"}, {"class": "org.apache.zeppelin.spark.PySparkInterpreter", "name": "pyspark"}, {"class": "org.apache.zeppelin.spark.SparkSqlInterpreter", "name": "sql"}] if r_enabled: groups.append({"class": "org.apache.zeppelin.spark.SparkRInterpreter", "name": "r"}) r_conf = {"zeppelin.R.knitr": "true", "zeppelin.R.image.width": "100%", "zeppelin.R.cmd": "R", "zeppelin.R.render.options": "out.format = 'html', comment = NA, echo = FALSE, results = 'asis', message = F, warning = F"} if interpreter_mode != 'remote': data = json.loads(open(local_interpreters_config).read()) else: data = json.loads(open(interpreters_config).read()) for i in data['interpreterSettings'].keys(): if data['interpreterSettings'][i]['group'] == 'md': continue elif data['interpreterSettings'][i]['group'] == 'sh': continue if r_enabled == 'true': data['interpreterSettings'][i]['properties'].update(r_conf) data['interpreterSettings'][i]['interpreterGroup'] = groups if interpreter_mode != 'remote': with open(local_interpreters_config, 'w') as f: f.write(json.dumps(data, indent=2)) put(local_interpreters_config, local_interpreters_config) sudo('cp -f {0} {1}'.format(local_interpreters_config, interpreters_config)) sudo('systemctl restart zeppelin-notebook') else: with open(interpreters_config, 'w') as f: f.write(json.dumps(data, indent=2)) local('sudo systemctl restart zeppelin-notebook') except Exception as err: print('Failed to update Zeppelin interpreters', str(err)) sys.exit(1) def update_hosts_file(os_user): try: if not exists('/home/{}/.ensure_dir/hosts_file_updated'.format(os_user)): sudo('sed -i "s/^127.0.0.1 localhost/127.0.0.1 localhost localhost.localdomain/g" /etc/hosts') sudo('touch /home/{}/.ensure_dir/hosts_file_updated'.format(os_user)) except Exception as err: print('Failed to update hosts file', str(err)) sys.exit(1)
nilq/baby-python
python
# Given an array of positive numbers and a positive number ‘k’, find the maximum sum of any contiguous subarray of size ‘k’. # Example 1: # Input: [2, 1, 5, 1, 3, 2], k=3 # Output: 9 # Explanation: Subarray with maximum sum is [5, 1, 3]. # Example 2: # Input: [2, 3, 4, 1, 5], k=2 # Output: 7 # Explanation: Subarray with maximum sum is [3, 4]. def maxSubarrayOfSizeK(array, k): ''' Time complexity : O(N) Space Complexity : O(1) ''' start = 0 currentSum = 0 maxSum = 0 for end in range(len(array)): if end < k: currentSum += array[end] if end >= k: currentSum += array[end] currentSum -= array[start] start += 1 maxSum = max(maxSum, currentSum) return maxSum if __name__ == '__main__': array = [2, 1, 5, 1, 3, 2] array2 = [2, 3, 4, 1, 5] print(f"{maxSubarrayOfSizeK(array, 3)}") print(f"{maxSubarrayOfSizeK(array2, 2)}")
nilq/baby-python
python
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from itertools import filterfalse, groupby, tee import json import subprocess from tempfile import NamedTemporaryFile from .core import Benchmark from ..utils.command import Command from ..utils.maven import Maven def partition(pred, iterable): # adapted from python's examples t1, t2 = tee(iterable) return list(filter(pred, t1)), list(filterfalse(pred, t2)) class JavaMicrobenchmarkHarnessCommand(Command): """ Run a Java Micro Benchmark Harness This assumes the binary supports the standard command line options, notably `-Dbenchmark_filter` """ def __init__(self, build, benchmark_filter=None): self.benchmark_filter = benchmark_filter self.build = build self.maven = Maven() """ Extract benchmark names from output between "Benchmarks:" and "[INFO]". Assume the following output: ... Benchmarks: org.apache.arrow.vector.IntBenchmarks.setIntDirectly ... org.apache.arrow.vector.IntBenchmarks.setWithValueHolder org.apache.arrow.vector.IntBenchmarks.setWithWriter ... [INFO] """ def list_benchmarks(self): argv = [] if self.benchmark_filter: argv.append("-Dbenchmark.filter={}".format(self.benchmark_filter)) result = self.build.list( *argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lists = [] benchmarks = False for line in str.splitlines(result.stdout.decode("utf-8")): if not benchmarks: if line.startswith("Benchmarks:"): benchmarks = True else: if line.startswith("org.apache.arrow"): lists.append(line) if line.startswith("[INFO]"): break return lists def results(self, repetitions): with NamedTemporaryFile(suffix=".json") as out: argv = ["-Dbenchmark.runs={}".format(repetitions), "-Dbenchmark.resultfile={}".format(out.name), "-Dbenchmark.resultformat=json"] if self.benchmark_filter: argv.append( "-Dbenchmark.filter={}".format(self.benchmark_filter) ) self.build.benchmark(*argv, check=True) return json.load(out) class JavaMicrobenchmarkHarnessObservation: """ Represents one run of a single Java Microbenchmark Harness """ def __init__(self, benchmark, primaryMetric, forks, warmupIterations, measurementIterations, **counters): self.name = benchmark self.primaryMetric = primaryMetric self.score = primaryMetric["score"] self.score_unit = primaryMetric["scoreUnit"] self.forks = forks self.warmups = warmupIterations self.runs = measurementIterations self.counters = { "mode": counters["mode"], "threads": counters["threads"], "warmups": warmupIterations, "warmupTime": counters["warmupTime"], "measurements": measurementIterations, "measurementTime": counters["measurementTime"], "jvmArgs": counters["jvmArgs"] } self.reciprocal_value = True if self.score_unit.endswith( "/op") else False if self.score_unit.startswith("ops/"): idx = self.score_unit.find("/") self.normalizePerSec(self.score_unit[idx+1:]) elif self.score_unit.endswith("/op"): idx = self.score_unit.find("/") self.normalizePerSec(self.score_unit[:idx]) else: self.normalizeFactor = 1 @property def value(self): """ Return the benchmark value.""" val = 1 / self.score if self.reciprocal_value else self.score return val * self.normalizeFactor def normalizePerSec(self, unit): if unit == "ns": self.normalizeFactor = 1000 * 1000 * 1000 elif unit == "us": self.normalizeFactor = 1000 * 1000 elif unit == "ms": self.normalizeFactor = 1000 elif unit == "min": self.normalizeFactor = 1 / 60 elif unit == "hr": self.normalizeFactor = 1 / (60 * 60) elif unit == "day": self.normalizeFactor = 1 / (60 * 60 * 24) else: self.normalizeFactor = 1 @property def unit(self): if self.score_unit.startswith("ops/"): return "items_per_second" elif self.score_unit.endswith("/op"): return "items_per_second" else: return "?" def __repr__(self): return str(self.value) class JavaMicrobenchmarkHarness(Benchmark): """ A set of JavaMicrobenchmarkHarnessObservations. """ def __init__(self, name, runs): """ Initialize a JavaMicrobenchmarkHarness. Parameters ---------- name: str Name of the benchmark forks: int warmups: int runs: int runs: list(JavaMicrobenchmarkHarnessObservation) Repetitions of JavaMicrobenchmarkHarnessObservation run. """ self.name = name self.runs = sorted(runs, key=lambda b: b.value) unit = self.runs[0].unit time_unit = "N/A" less_is_better = not unit.endswith("per_second") values = [b.value for b in self.runs] times = [] # Slight kludge to extract the UserCounters for each benchmark counters = self.runs[0].counters super().__init__(name, unit, less_is_better, values, time_unit, times, counters) def __repr__(self): return "JavaMicrobenchmark[name={},runs={}]".format( self.name, self.runs) @classmethod def from_json(cls, payload): def group_key(x): return x.name benchmarks = map( lambda x: JavaMicrobenchmarkHarnessObservation(**x), payload) groups = groupby(sorted(benchmarks, key=group_key), group_key) return [cls(k, list(bs)) for k, bs in groups]
nilq/baby-python
python
import tkinter as tk from tkinter import * from tkinter import filedialog from tkinter import messagebox from Bio import Entrez import os doubleBackSlash = r'/ '[0] class Script1: def __init__(self): self.a_InputCSVFileName = "" self.b_OutputPATH = "" self.c_OutputFileName = "your_original_file" self.d_Row_Where_header_starts = 0 self.e_rowWhereDataStarts = 0 self.f_SEQUENCE_COLUMN = 0 #self.g_AVG_READS = 0 #self.i_Nbases = 0 def Add_terminal_path_slash(self, path): path_with_slash = path + "\\" # assert isinstance(path_with_slash, object) return path_with_slash def main(self): # make variables index = 1 index_2 = 1 Output_path_and_name = self.b_OutputPATH + self.c_OutputFileName + ".fasta" Output_extra_file = self.b_OutputPATH + self.c_OutputFileName + "_InputFile_with_unique_ID.txt" inputFile = open(self.a_InputCSVFileName, errors='ignore') OutputFile = open(Output_path_and_name, 'w') Output_EXTRA = open(Output_extra_file, 'w') # MAKING FASTA FILE # Read through and skip header row for c in range(0, self.d_Row_Where_header_starts): HeaderTemp = inputFile.readline() # Reading through the rows and breaking at the end of the data tempstring = "temp" while tempstring: tempstring = inputFile.readline() if tempstring == "": break templine = tempstring.splitlines() x = templine[0] rowlist = x.split(",") #SeqID = rowlist[self.g_AVG_READS] TrimmedSequence = rowlist[self.f_SEQUENCE_COLUMN] #NBases = rowlist[self.i_Nbases] OutputRows = ">" + str(index) + '\n' + TrimmedSequence + '\n' #OutputRows = ">" + str(index) + "_" + SeqID + "_" + NBases + '\n' + TrimmedSequence + '\n' index += 1 OutputFile.write(OutputRows) inputFile.close() OutputFile.close() # MAKING INPUT FILE FOR FILTERING AFTER BLAST # READ AND WRITE AGAIN THE INPUT FILE FOR THIS SCRIPT # Original File # Reading and writing headers inputFile_2 = open(self.a_InputCSVFileName, errors='ignore') AllHeadersJoined_inputFile_2 = "" for c in range(0, self.d_Row_Where_header_starts): headerTemp = inputFile_2.readline() headerLine = headerTemp.splitlines() y = headerLine[0] headerList = y.split(",") header_tab_delimited = "" for j in range(0, (len(headerList) - 1)): header_tab_delimited += headerList[j] + '\t' header_tab_delimited += headerList[(len(headerList) - 1)] AllHeadersJoined_inputFile_2 += ("FastaFileID" + '\t' + header_tab_delimited + '\n') # headerList[f_SEQUENCE_COLUMN] Output_EXTRA.write(AllHeadersJoined_inputFile_2) # Original File # Reading through the rows and breaking at the end of the data. Writing it into a # new document and adding an extra column as Fasta File ID. tempstring = "temp" while tempstring: tempstring = inputFile_2.readline() if tempstring == "": break templine = tempstring.splitlines() x = templine[0] rowlist = x.split(",") data_tab_delimited = "" for i in range(0, (len(rowlist) - 1)): data_tab_delimited += rowlist[i] + '\t' data_tab_delimited += rowlist[(len(rowlist) - 1)] # SequenceID = rowlist[f_SEQUENCE_COLUMN] FastaFileID = (str(index_2)) index_2 += 1 data = (FastaFileID + '\t' + data_tab_delimited + '\n') Output_EXTRA.write(data) inputFile_2.close() Output_EXTRA.close() class Script2: def __init__(self): self.email_DG = "" self.genomeAccessions_DG = "" self.OutputFilePath_DG = "" self.fileName_DG = "" def main(self): # make variables Entrez.email = self.email_DG def get_sequences_from_ID_list_line_by_line(ids): print(ids) DirectoryPath = self.OutputFilePath_DG + self.fileName_DG if not os.path.exists(DirectoryPath): os.makedirs(DirectoryPath) NameOfMyFile = DirectoryPath + '/' + self.fileName_DG + ".fasta" file_DG = open(NameOfMyFile, 'w') counter = 1 for seq_id in ids: handle = Entrez.efetch(db="nucleotide", id=seq_id, rettype="fasta", retmode="text") # Read Data AllLines = handle.readlines() # PRINT AND WRITE LANE 0 NameOfGenome_Line0 = AllLines[0].splitlines() print(NameOfGenome_Line0) str0 = ''.join(NameOfGenome_Line0) file_DG.write(str0) file_DG.write('\n') # Create a loop to read all rows in a file genome_without_header = AllLines[1:] listLength = len(genome_without_header) # print(listLength) complete_genome_string = "" for x in range(0, listLength): tempList = genome_without_header[x].splitlines() tempString = tempList[0] complete_genome_string += tempString file_DG.write(complete_genome_string) file_DG.write('\n') print(counter) counter += 1 file_DG.close() list_of_accessions = self.genomeAccessions_DG.split(',') get_sequences_from_ID_list_line_by_line(list_of_accessions) class Script3: def __init__(self): self.Path_To_NCBI_BLAST_Bin_Directory = "" self.Path_To_Database_Fasta_File = "" self.Data_Base_Type = "" def main(self): # CREATE DATABASE FOR RUNNING BLAST IN WINDOWS CreateDataBase = self.Path_To_NCBI_BLAST_Bin_Directory + "makeblastdb -in " + self.Path_To_Database_Fasta_File + " -dbtype " + self.Data_Base_Type print(CreateDataBase) os.system(CreateDataBase) class Script4: def __init__(self): self.x_Path_to_NCBI_Directory_BF = "" self.y_DC_MegaBlast_BF = 0 self.a_Data_Base_fasta = "" self.b_Query_fasta_file = "" self.c_Output_Path_ = "" # BLAST PARAMETERS self.d_Output_file_name = "_BLAST" self.e_word_size = "20" self.f_Percentage_identity = "70" self.g_number_of_threads = "4" self.i_OutputFormat = "6" # FIlTERING PARAMETERS self.j_Percentage_overlap = "0.8" self.k_bitscore = "50" self.l_InputFile_with_unique_ID = "" def main(self): # make variables Task_megaBlast = "" if self.y_DC_MegaBlast_BF == 1: Task_megaBlast = " -task dc-megablast " print(Task_megaBlast) CommandLine_BF = (self.x_Path_to_NCBI_Directory_BF + "blastn " + Task_megaBlast + " -db " + self.a_Data_Base_fasta + " -query " + self.b_Query_fasta_file + " -out " + self.c_Output_Path_ + self.d_Output_file_name + "BLAST.txt" + " -word_size " + self.e_word_size + " -perc_identity " + self.f_Percentage_identity + " -num_threads " + self.g_number_of_threads + " -outfmt " + '"' + self.i_OutputFormat + ' qseqid sacc stitle qseq sseq nident mismatch pident length evalue bitscore qstart qend sstart send gapopen gaps qlen slen"') print(CommandLine_BF) os.system(CommandLine_BF) ################################################################################################################################### #####################################################PART 2 # FILTERING BLAST OUTPUTFILE # BLAST FILTERING PARAMETRES qseqid = 0 sacc = 1 stitle = 2 qseq = 3 sseq = 4 nident = 5 mismatch = 6 pident = 7 length = 8 evalue = 9 bitscore = 10 qstart = 11 qend = 12 sstart = 13 send = 14 gapopen = 15 gaps = 16 qlen = 17 slen = 18 PercentageOverlapINT = 19 BLAST_OUTPUT_FILE_BF = self.c_Output_Path_ + self.d_Output_file_name + "BLAST.txt" file_BF = open(BLAST_OUTPUT_FILE_BF, 'r') filtered_file_BF = self.c_Output_Path_ + self.d_Output_file_name + "_filtered.txt" filtered_files_BF = open(filtered_file_BF, "w+") # headers AllHeadersFromFilteredFile_BF = "" AllHeadersFromFilteredFile_BF = ("qseqid" + '\t' + "sacc" + '\t' + "stitle" + '\t' + "qseq" + '\t' + "sseq" + '\t' + "nident" + '\t' + "mismatch" + '\t' + "pident" + '\t' + "length" + '\t' + "evalue" + '\t' + "bitscore" + '\t' + "qstart" + '\t' + "qend" + '\t' + "sstart" + '\t' + "send" + '\t' + "gapopen" + '\t' + "gaps" + '\t' + "qlen" + '\t' + "slen" + '\t' + "PercentageOverlap" + '\n') filtered_files_BF.write(AllHeadersFromFilteredFile_BF) # Reading files tempstring = "temp" while tempstring: tempstring = file_BF.readline() if tempstring == "": break templine = tempstring.splitlines() x = templine[0] rowlist = x.split('\t') columns = (rowlist[qseqid] + '\t' + rowlist[sacc] + '\t' + rowlist[stitle] + '\t' + rowlist[qseq] + '\t' + rowlist[sseq] + '\t' + rowlist[nident] + '\t' + rowlist[mismatch] + '\t' + rowlist[pident] + '\t' + rowlist[length] + '\t' + rowlist[evalue] + '\t' + rowlist[bitscore] + '\t' + rowlist[qstart] + '\t' + rowlist[qend] + '\t' + rowlist[ sstart] + '\t' + rowlist[send] + '\t' + rowlist[gapopen] + '\t' + rowlist[gaps] + '\t' + rowlist[qlen] + '\t' + rowlist[slen] + '\t') Querylength_BF = int(rowlist[qlen]) Length_BF = int(rowlist[length]) SubjectLength_BF = int(rowlist[slen]) min_length_BF = min(Querylength_BF, SubjectLength_BF) PercentageOverlap = (Length_BF / min_length_BF) rowlist.append(str(PercentageOverlap)) columns = (rowlist[qseqid] + '\t' + rowlist[sacc] + '\t' + rowlist[stitle] + '\t' + rowlist[qseq] + '\t' + rowlist[sseq] + '\t' + rowlist[nident] + '\t' + rowlist[mismatch] + '\t' + rowlist[pident] + '\t' + rowlist[length] + '\t' + rowlist[evalue] + '\t' + rowlist[bitscore] + '\t' + rowlist[qstart] + '\t' + rowlist[qend] + '\t' + rowlist[ sstart] + '\t' + rowlist[send] + '\t' + rowlist[gapopen] + '\t' + rowlist[gaps] + '\t' + rowlist[qlen] + '\t' + rowlist[slen] + '\t' + rowlist[PercentageOverlapINT] + '\n') # FILTERING STEP 1 <<<<< DEFAULT "Percentage overlap >80% or 0.8" >>>>> AND <<<<< DEFAULT "BitScore >50" >>>>> # HANDLES if float(rowlist[PercentageOverlapINT]) >= float(self.j_Percentage_overlap): if float(rowlist[bitscore]) >= int(self.k_bitscore): filtered_files_BF.write(columns) file_BF.close() filtered_files_BF.close() # TO BE CHECKED filtered_files_2_BF = open(filtered_file_BF, 'r') ################################################################################################################################### #####################################################PART 3 # FILTERING STEP 2 filter_part2_path_and_name_BF = self.c_Output_Path_ + self.d_Output_file_name + "_sorted.txt" # print(filter_part2_path_and_name) filtered_files_part2_BF = open(filter_part2_path_and_name_BF, "w") # headers AllHeadersFromFilteredFile = "" AllHeadersFromFilteredFile_BF = ("qseqid" + '\t' + "sacc" + '\t' + "stitle" + '\t' + "qseq" + '\t' + "sseq" + '\t' + "nident" + '\t' + "mismatch" + '\t' + "pident" + '\t' + "length" + '\t' + "evalue" + '\t' + "bitscore" + '\t' + "qstart" + '\t' + "qend" + '\t' + "sstart" + '\t' + "send" + '\t' + "gapopen" + '\t' + "gaps" + '\t' + "qlen" + '\t' + "slen" + '\t' + "PercentageOverlap" + '\n') filtered_files_part2_BF.write(AllHeadersFromFilteredFile_BF) # Reading files lst_lst = [] counter = 0 tempstring = "temp" while tempstring: tempstring = filtered_files_2_BF.readline() if tempstring == "": break if counter != 0: templine = tempstring.splitlines() x = templine[0] rowlist_2 = x.split('\t') lst_lst.append(rowlist_2) columns = (rowlist_2[qseqid] + '\t' + rowlist_2[sacc] + '\t' + rowlist_2[stitle] + '\t' + rowlist_2[qseq] + '\t' + rowlist_2[sseq] + '\t' + rowlist_2[nident] + '\t' + rowlist_2[ mismatch] + '\t' + rowlist_2[pident] + '\t' + rowlist_2[length] + '\t' + rowlist_2[evalue] + '\t' + rowlist_2[bitscore] + '\t' + rowlist_2[qstart] + '\t' + rowlist_2[qend] + '\t' + rowlist_2[ sstart] + '\t' + rowlist_2[send] + '\t' + rowlist_2[gapopen] + '\t' + rowlist_2[gaps] + '\t' + rowlist_2[qlen] + '\t' + rowlist_2[slen] + '\t' + rowlist_2[PercentageOverlapINT] + '\n') counter += 1 # READ THE NEW FILE AND ENTER THE LOOP # SORTING list.sort(lst_lst, key=lambda DataRow_0: float(DataRow_0[pident]), reverse=True) list.sort(lst_lst, key=lambda DataRow_2: float(DataRow_2[PercentageOverlapINT]), reverse=True) list.sort(lst_lst, key=lambda DataRow_1: float(DataRow_1[bitscore]), reverse=True) list.sort(lst_lst, key=lambda DataRow_3: DataRow_3[qseqid]) Dictionary_lst_lst = {} # Reading list_list length = len(lst_lst) for i in range(length): temp_rowlist = lst_lst[i] temp_rowlist_length = len(temp_rowlist) if temp_rowlist_length != 20: print("length of tem_row_list_is:") print(temp_rowlist_length) continue row_string_for_output = "" Variable_QSeqID = temp_rowlist[qseqid] try: for j in range(temp_rowlist_length - 1): temp_string = temp_rowlist[j] row_string_for_output += (temp_string + "\t") row_string_for_output += temp_rowlist[temp_rowlist_length - 1] row_string_for_output += "\n" except IndexError: print("Exception thrown") print(row_string_for_output) # Tuple TheTuple_rowlist = (Variable_QSeqID, row_string_for_output) if Variable_QSeqID in Dictionary_lst_lst: print("key already in dictionary") else: Dictionary_lst_lst[Variable_QSeqID] = row_string_for_output filtered_files_part2_BF.write(row_string_for_output) filtered_files_part2_BF.close() filtered_files_2_BF.close() ################################################################################################################################### #####################################################PART 4 ###Writting_BLAST_results_back_into_original_file Output_extra_file_BF = self.l_InputFile_with_unique_ID Output_file_only_sequences_with_hits_BF = self.c_Output_Path_ + self.d_Output_file_name + "_only_sequences_with_hits.txt" Output_final_BLAST_File_BF = self.c_Output_Path_ + self.d_Output_file_name + "_all_sequences_with_and_without_hits.txt" OutputFile_BF = open(Output_final_BLAST_File_BF, 'w') inputFilteredBLAST_File_BF = open(filter_part2_path_and_name_BF, 'r') # INDEX qseqid = 0 sacc = 1 stitle = 2 qseq = 3 sseq = 4 nident = 5 mismatch = 6 pident = 7 length = 8 evalue = 9 bitscore = 10 qstart = 11 qend = 12 sstart = 13 send = 14 gapopen = 15 gaps = 16 qlen = 17 slen = 18 PercentageOverlapINT = 19 # Reading files LISTS lst_lst = [] counter = 0 header_temp = "" Complete_output = "" tempstring = "temp" while tempstring: tempstring = inputFilteredBLAST_File_BF.readline() if counter == 0: Split_list = tempstring.splitlines() header_temp = Split_list[0] if tempstring == "": break if counter != 0: templine = tempstring.splitlines() x = templine[0] rowlist_2 = x.split('\t') lst_lst.append(rowlist_2) columns = (rowlist_2[qseqid] + '\t' + rowlist_2[sacc] + '\t' + rowlist_2[stitle] + '\t' + rowlist_2[qseq] + '\t' + rowlist_2[sseq] + '\t' + rowlist_2[nident] + '\t' + rowlist_2[ mismatch] + '\t' + rowlist_2[pident] + '\t' + rowlist_2[length] + '\t' + rowlist_2[evalue] + '\t' + rowlist_2[bitscore] + '\t' + rowlist_2[qstart] + '\t' + rowlist_2[qend] + '\t' + rowlist_2[ sstart] + '\t' + rowlist_2[send] + '\t' + rowlist_2[gapopen] + '\t' + rowlist_2[gaps] + '\t' + rowlist_2[qlen] + '\t' + rowlist_2[slen] + '\t' + rowlist_2[PercentageOverlapINT] + '\n') counter += 1 Dictionary_lst_lst = {} # Reading list_list length = len(lst_lst) for i in range(length): temp_rowlist = lst_lst[i] temp_rowlist_length = len(temp_rowlist) if temp_rowlist_length != 20: continue row_string_for_output = "" Variable_QSeqID = temp_rowlist[qseqid] try: for j in range(temp_rowlist_length): temp_string = temp_rowlist[j] row_string_for_output += (temp_string + "\t") row_string_for_output += "\n" except IndexError: print("Exception thrown") # Tuple TheTuple_rowlist = (Variable_QSeqID, row_string_for_output) if Variable_QSeqID in Dictionary_lst_lst: print("key already in dictionary") else: Dictionary_lst_lst[Variable_QSeqID] = row_string_for_output print(row_string_for_output) # OPEN THE ORIGINAL MODIFIED FILE Original_Modified_file_BF = open(Output_extra_file_BF, 'r') Only_sequences_with_hits_file_BF = open(Output_file_only_sequences_with_hits_BF, 'w') counter2 = 0 Header_Temp_2 = "" tempstring = "temp" while tempstring: tempstring = Original_Modified_file_BF.readline() if counter2 == 0: Split_list_2 = tempstring.splitlines() Header_Temp_2 = Split_list_2[0] OutputFile_BF.write(Header_Temp_2 + "\t" + header_temp + "\n") Only_sequences_with_hits_file_BF.write(Header_Temp_2 + "\t" + header_temp + "\n") if tempstring == "": break if counter2 != 0: templine = tempstring.splitlines() x = templine[0] rowlist = x.split('\t') Temp_QSeqID = rowlist[0] if Temp_QSeqID in Dictionary_lst_lst: Corresponding_row = Dictionary_lst_lst.get(Temp_QSeqID) OutputFile_BF.write(x + "\t") OutputFile_BF.write(Corresponding_row) Only_sequences_with_hits_file_BF.write(x + "\t") Only_sequences_with_hits_file_BF.write(Corresponding_row) else: OutputFile_BF.write( x + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\t" + "\n") # print("not found in dictionary") counter2 += 1 OutputFile_BF.write(Complete_output) Original_Modified_file_BF.close() OutputFile_BF.close() inputFilteredBLAST_File_BF.close() Only_sequences_with_hits_file_BF.close() Only_sequences_with_hits_file_BF.close() class Script55: def __init__(self): self.a_BLAST_input_path_and_file_ = "" self.b_Output_Path_ = "" self.c_Output_file_name = "_BLAST" # FIlTERING PARAMETERS self.d_Percentage_overlap = "0.8" self.e_bitscore = "50" def main(self): # BLAST FILTERING PARAMETERS qseqid = 0 sacc = 1 stitle = 2 qseq = 3 sseq = 4 nident = 5 mismatch = 6 pident = 7 length = 8 evalue = 9 bitscore = 10 qstart = 11 qend = 12 sstart = 13 send = 14 gapopen = 15 gaps = 16 qlen = 17 slen = 18 PercentageOverlapINT = 19 BLAST_OUTPUT_FILE_F = self.a_BLAST_input_path_and_file_ file_F = open(BLAST_OUTPUT_FILE_F, 'r') filtered_file_F = self.b_Output_Path_ + self.c_Output_file_name + "_filtered.txt" filtered_files_F = open(filtered_file_F, "w+") # headers AllHeadersFromFilteredFile_F = "" AllHeadersFromFilteredFile_F = ("qseqid" + '\t' + "sacc" + '\t' + "stitle" + '\t' + "qseq" + '\t' + "sseq" + '\t' + "nident" + '\t' + "mismatch" + '\t' + "pident" + '\t' + "length" + '\t' + "evalue" + '\t' + "bitscore" + '\t' + "qstart" + '\t' + "qend" + '\t' + "sstart" + '\t' + "send" + '\t' + "gapopen" + '\t' + "gaps" + '\t' + "qlen" + '\t' + "slen" + '\t' + "PercentageOverlap" + '\n') filtered_files_F .write(AllHeadersFromFilteredFile_F) # Reading files tempstring = "temp" while tempstring: tempstring = file_F.readline() if tempstring == "": break templine = tempstring.splitlines() x = templine[0] rowlist = x.split('\t') columns = (rowlist[qseqid] + '\t' + rowlist[sacc] + '\t' + rowlist[stitle] + '\t' + rowlist[qseq] + '\t' + rowlist[sseq] + '\t' + rowlist[nident] + '\t' + rowlist[mismatch] + '\t' + rowlist[pident] + '\t' + rowlist[length] + '\t' + rowlist[evalue] + '\t' + rowlist[bitscore] + '\t' + rowlist[qstart] + '\t' + rowlist[qend] + '\t' + rowlist[ sstart] + '\t' + rowlist[send] + '\t' + rowlist[gapopen] + '\t' + rowlist[gaps] + '\t' + rowlist[qlen] + '\t' + rowlist[slen] + '\t') Querylength_F = int(rowlist[qlen]) Length_F = int(rowlist[length]) SubjectLength_F = int(rowlist[slen]) min_length_F = min(Querylength_F, SubjectLength_F) PercentageOverlap_F = (Length_F / min_length_F) rowlist.append(str(PercentageOverlap_F)) columns = (rowlist[qseqid] + '\t' + rowlist[sacc] + '\t' + rowlist[stitle] + '\t' + rowlist[qseq] + '\t' + rowlist[sseq] + '\t' + rowlist[nident] + '\t' + rowlist[mismatch] + '\t' + rowlist[pident] + '\t' + rowlist[length] + '\t' + rowlist[evalue] + '\t' + rowlist[bitscore] + '\t' + rowlist[qstart] + '\t' + rowlist[qend] + '\t' + rowlist[ sstart] + '\t' + rowlist[send] + '\t' + rowlist[gapopen] + '\t' + rowlist[gaps] + '\t' + rowlist[qlen] + '\t' + rowlist[slen] + '\t' + rowlist[PercentageOverlapINT] + '\n') # FILTERING STEP 1 <<<<< DEFAULT "Percentage overlap >80% or 0.8" >>>>> AND <<<<< DEFAULT "BitScore >50" >>> # HANDLES if float(rowlist[PercentageOverlapINT]) >= float(self.d_Percentage_overlap): if float(rowlist[bitscore]) >= int(self.e_bitscore): filtered_files_F.write(columns) file_F.close() filtered_files_F.close() # TO BE CHECKED filtered_files_2_F = open(filtered_file_F, 'r') ################################################################################################################################### #####################################################PART 3 # FILTERING STEP 2 filter_part2_path_and_name_F = self.b_Output_Path_ + self.c_Output_file_name + "_sorted.txt" filtered_files_part2_F = open(filter_part2_path_and_name_F, "w") # headers AllHeadersFromFilteredFile_F = "" AllHeadersFromFilteredFile_F = ("qseqid" + '\t' + "sacc" + '\t' + "stitle" + '\t' + "qseq" + '\t' + "sseq" + '\t' + "nident" + '\t' + "mismatch" + '\t' + "pident" + '\t' + "length" + '\t' + "evalue" + '\t' + "bitscore" + '\t' + "qstart" + '\t' + "qend" + '\t' + "sstart" + '\t' + "send" + '\t' + "gapopen" + '\t' + "gaps" + '\t' + "qlen" + '\t' + "slen" + '\t' + "PercentageOverlap" + '\n') filtered_files_part2_F.write(AllHeadersFromFilteredFile_F) # Reading files lst_lst = [] counter = 0 tempstring = "temp" while tempstring: tempstring = filtered_files_2_F.readline() if tempstring == "": break if counter != 0: templine = tempstring.splitlines() x = templine[0] rowlist_2 = x.split('\t') lst_lst.append(rowlist_2) columns = (rowlist_2[qseqid] + '\t' + rowlist_2[sacc] + '\t' + rowlist_2[stitle] + '\t' + rowlist_2[qseq] + '\t' + rowlist_2[sseq] + '\t' + rowlist_2[nident] + '\t' + rowlist_2[ mismatch] + '\t' + rowlist_2[pident] + '\t' + rowlist_2[length] + '\t' + rowlist_2[evalue] + '\t' + rowlist_2[bitscore] + '\t' + rowlist_2[qstart] + '\t' + rowlist_2[qend] + '\t' + rowlist_2[ sstart] + '\t' + rowlist_2[send] + '\t' + rowlist_2[gapopen] + '\t' + rowlist_2[gaps] + '\t' + rowlist_2[qlen] + '\t' + rowlist_2[slen] + '\t' + rowlist_2[PercentageOverlapINT] + '\n') counter += 1 # READ THE NEW FILE AND ENTER THE LOOP # SORTING list.sort(lst_lst, key=lambda DataRow_0: float(DataRow_0[pident]), reverse=True) list.sort(lst_lst, key=lambda DataRow_2: float(DataRow_2[PercentageOverlapINT]), reverse=True) list.sort(lst_lst, key=lambda DataRow_1: float(DataRow_1[bitscore]), reverse=True) list.sort(lst_lst, key=lambda DataRow_3: DataRow_3[qseqid]) Dictionary_lst_lst = {} # Reading list_list length = len(lst_lst) for i in range(length): temp_rowlist = lst_lst[i] temp_rowlist_length = len(temp_rowlist) if temp_rowlist_length != 20: print("length of tem_row_list_is:") print(temp_rowlist_length) continue row_string_for_output = "" Variable_QSeqID = temp_rowlist[qseqid] try: for j in range(temp_rowlist_length - 1): temp_string = temp_rowlist[j] row_string_for_output += (temp_string + "\t") row_string_for_output += temp_rowlist[temp_rowlist_length - 1] row_string_for_output += "\n" except IndexError: print("Exception thrown") print(row_string_for_output) # Tuple TheTuple_rowlist = (Variable_QSeqID, row_string_for_output) if Variable_QSeqID in Dictionary_lst_lst: print("key already in dictionary") else: Dictionary_lst_lst[Variable_QSeqID] = row_string_for_output filtered_files_part2_F.write(row_string_for_output) filtered_files_part2_F.close() filtered_files_2_F.close() class Win1(Script1, Script2, Script3, Script4, Script55): def __init__(self, window): # Initializations self.wind = window self.wind.title("omicR") self.wind.wm_iconbitmap('Currito.ico') self.wind.resizable(False, False) # Creating A Frame Container frame = LabelFrame(self.wind, text="Select what would you like to do:") frame.grid(row=0, column=0, columnspan=3, padx=40, pady=40) # Buttons tk.Button(frame, text="Create FASTA files and input files for BLAST / filtering", command=self.new_window2).grid(row=3, columnspan=2, padx=5, pady=5, sticky=W + E) tk.Button(frame, text="Download Genomes", command=self.new_window3).grid(row=4, columnspan=2, padx=5, pady=5, sticky=W + E) tk.Button(frame, text="Create Genome Database", command=self.new_window4).grid(row=5, columnspan=2, padx=5, pady=5, sticky=W + E) tk.Button(frame, text="BLAST / filtering", command=self.new_window5 ).grid(row=6, columnspan=2, padx=5, pady=5, sticky=W + E) tk.Button(frame, text="Filtering", command=self.new_window55 ).grid(row=7, columnspan=2, padx=5, pady=5, sticky=W + E) # Instructions tk.Button(frame, text="Instructions", command=self.new_window6).grid(row=8, columnspan=2, padx=5, pady=5, sticky=W + E) # Close Button tk.Button(frame, text="Close", command=self.close_window).grid(row=10, column=1, columnspan=2, padx=5, pady=5, sticky=E) # FASTA FILES def new_window2(self): self.new_window = tk.Toplevel(self.wind) self.app = Win2(self.new_window) # Download Genomes def new_window3(self): self.new_window = tk.Toplevel(self.wind) self.app = Win3(self.new_window) # Create Genome Database def new_window4(self): self.new_window = tk.Toplevel(self.wind) self.app = Win4(self.new_window) # BLAST and Filtering def new_window5(self): self.new_window = tk.Toplevel(self.wind) self.app = Win5(self.new_window) def new_window55(self): self.new_window = tk.Toplevel(self.wind) self.app = Win55(self.new_window) # HELP def new_window6(self): self.new_window = tk.Toplevel(self.wind) self.app = Win6(self.new_window) def close_window(self): self.wind.destroy() # FASTA FILES class Win2(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("Create FASTA files and input files for BLAST filtering") self.wind.wm_iconbitmap('Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="Complete the following parameters ") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # CSV Input # ROW1 # INPUT FILE PATH Label(frame, text="Input file path (CSV file required): ").grid(row=1, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/Bassiana.csv ").grid(row=1, column=6, sticky=W) self.CSVInput = tk.Entry(frame) self.CSVInput.focus() self.CSVInput.grid(row=1, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.select_directory_CSV_input_path).grid(row=1, column=5, sticky=W + E, padx=2, pady=2) # Output File Path # ROW 2 Label(frame, text="Output file path: ").grid(row=2, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/ ").grid(row=2, column=6, sticky=W) self.OutputFilePath = tk.Entry(frame) self.OutputFilePath.grid(row=2, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.select_directory_output_path).grid(row=2, column=5, padx=2, pady=2) # OUTPUT FILE NAME # ROW 3 Label(frame, text="Output file name: ").grid(row=3, column=0, sticky=W) Label(frame, text="Example: Bassiana_BLAST_Results ").grid(row=3, column=6, sticky=W) self.CSVOutputFileName = tk.Entry(frame) self.CSVOutputFileName.grid(row=3, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Row where header starts # Row 4 Label(frame, text="Row where header starts: ").grid(row=4, column=0, sticky=W) Label(frame, text="Example: 1 (Start counting rows from one *Number*").grid(row=4, column=2, sticky=W) self.RowWhereHeaderStarts = tk.Entry(frame) self.RowWhereHeaderStarts.grid(row=4, column=1, columnspan=1, ipadx=50, padx=5, pady=5, sticky=W) # Row where data starts # Row 5 Label(frame, text="Row where data starts: ").grid(row=5, column=0, sticky=W) Label(frame, text="Example: 2 (Start counting rows from one *Number*)").grid(row=5, column=2, sticky=W) self.RowWhereDataStarts = tk.Entry(frame) self.RowWhereDataStarts.grid(row=5, column=1, columnspan=1, ipadx=50, padx=5, pady=5, sticky=W) # Column of sequences # Row 6 Label(frame, text="Column of Sequences: ").grid(row=6, column=0, sticky=W) Label(frame, text="Example: 0 (Start counting columns from zero *Index*)").grid(row=6, column=2, sticky=W) self.ColumnOfSequences = tk.Entry(frame) self.ColumnOfSequences.grid(row=6, column=1, columnspan=1, ipadx=50, padx=5, pady=5, sticky=W) # Column of AVGreads # Row 7 #Label(frame, text="Column of comments [Average Reads]: ").grid(row=7, column=0, sticky=W) #Label(frame, text="Example: 1 (Start counting columns from zero *Index*)").grid(row=7, column=2, sticky=W) #self.ColumnOfAVGreads = tk.Entry(frame) #self.ColumnOfAVGreads.grid(row=7, column=1, columnspan=1, ipadx=50, padx=5, pady=5, sticky=W) # Column of N Bases # Row 8 #Label(frame, text="Column of comments [NBases]: ").grid(row=8, column=0, sticky=W) #Label(frame, text="Example: 2 (Start counting columns from zero *Index*)").grid(row=8, column=2, sticky=W) #Label(frame, text=" ").grid(row=9, column=6, sticky=W) #self.ColumnOfNbases = tk.Entry(frame) #self.ColumnOfNbases.grid(row=8, column=1, columnspan=1, ipadx=50, padx=5, pady=5, sticky=W) # WINDOW FASTA FILES # Button clear tk.Button(frame, text="Clear all", command=lambda: [self.ClearAll_Fasta_files()]).grid(row=17, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Run tk.Button(frame, text="Run", command=lambda: [self.Run_Button_FASTA_FILES()]).grid(row=18, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # BUTTON Close tk.Button(frame, text="Close", command=self.close_window).grid(row=19, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) def Run_Button_FASTA_FILES(self): TextFromCSVInput = self.CSVInput.get() TextFromOutputPath = self.OutputFilePath.get() TextFromOutputFileName = self.CSVOutputFileName.get() TextFromRowWhereHeaderStarts = self.RowWhereHeaderStarts.get() TextFromRowWhereDataStarts = self.RowWhereDataStarts.get() TextFromSequenceColumn = self.ColumnOfSequences.get() #TextFromAVG_reads = self.ColumnOfAVGreads.get() #TextFrom_Nbases = self.ColumnOfNbases.get() if (len(TextFromCSVInput) != 0 and len(TextFromOutputPath) != 0 and len(TextFromRowWhereHeaderStarts) != 0 and len(TextFromRowWhereDataStarts) != 0 and len(TextFromSequenceColumn)): Script1.a_InputCSVFileName = TextFromCSVInput TextFromOutputMod = TextFromOutputPath + doubleBackSlash Script1.b_OutputPATH = TextFromOutputMod if len(TextFromOutputFileName) != 0: Script1.c_OutputFileName = TextFromOutputFileName Script1.c_OutputFileName = TextFromOutputFileName Script1.d_Row_Where_header_starts = int(TextFromRowWhereHeaderStarts) Script1.e_rowWhereDataStarts = int(TextFromRowWhereDataStarts) Script1.f_SEQUENCE_COLUMN = int(TextFromSequenceColumn) #Script1.g_AVG_READS = int(TextFromAVG_reads) #Script1.i_Nbases = int(TextFrom_Nbases) print(Script1.a_InputCSVFileName) print(Script1.b_OutputPATH) print(Script1.c_OutputFileName) print(Script1.d_Row_Where_header_starts) print(Script1.e_rowWhereDataStarts) print(Script1.f_SEQUENCE_COLUMN) #print(Script1.g_AVG_READS) #print(Script1.i_Nbases) # Output Messages self.message = Label(text="Running", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) Script1.main(self) self.message1 = Label(text="Completed!", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) messagebox.showinfo('Information', "Completed!") else: messagebox.showerror("Error", "All required parameters must be filled") self.wind.lift() def select_directory_CSV_input_path(self): folder_selected = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=(("CSV files", "*.csv"), ("all files", "*.*"))) print(folder_selected) self.CSVInput.delete(0, END) self.CSVInput.insert(0, folder_selected) self.wind.lift() return def select_directory_output_path(self): Output_file_path_fasta_files = filedialog.askdirectory(initialdir='.') print(Output_file_path_fasta_files) self.OutputFilePath.delete(0, END) self.OutputFilePath.insert(0, Output_file_path_fasta_files) self.wind.lift() return def ClearAll_Fasta_files(self): self.CSVInput.delete(0, END) self.OutputFilePath.delete(0, END) self.CSVOutputFileName.delete(0, END) self.RowWhereHeaderStarts.delete(0, END) self.RowWhereDataStarts.delete(0, END) self.ColumnOfSequences.delete(0, END) #self.ColumnOfAVGreads.delete(0, END) #self.ColumnOfNbases.delete(0, END) def close_window(self): self.wind.destroy() # Download Genomes class Win3(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("Download genome entries from NCBI") self.wind.wm_iconbitmap('Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="Complete the following parameters ") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # Write your email # Row1 Label(frame, text="E-mail (required to access NCBI): ").grid(row=1, column=0, sticky=W) Label(frame, text="Example: DungogCitizen@nothing.com ").grid(row=1, column=6, sticky=W) self.WriteYourEmail = tk.Entry(frame) self.WriteYourEmail.focus() self.WriteYourEmail.grid(row=1, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Write accession numbers # Row 2 Label(frame, text="RefSeq Number: ").grid(row=2, column=0, sticky=W) Label(frame, text="Example: NC_009328.1, NC_009329.1 ").grid(row=2, column=6, sticky=W) self.WriteAccessionNumbers = tk.Entry(frame) self.WriteAccessionNumbers.grid(row=2, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Your output path for downloading genomes # Row 3 Label(frame, text="Output path directory: ").grid(row=3, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/ ").grid(row=3, column=6, sticky=W) self.OutputPathForDownloadingGenomes = tk.Entry(frame) self.OutputPathForDownloadingGenomes.grid(row=3, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Download_genomes_select_directory_output_path).grid(row=3, column=5, padx=2, pady=2) # Downloading genomes output file name # Row 4 Label(frame, text="Output file name: ").grid(row=4, column=0, sticky=W) Label(frame, text="Example: Geobacillus_sp_Genome").grid(row=4, column=6, sticky=W) Label(frame, text="").grid(row=5, column=6, sticky=W) self.DownloadingGenomes_OutputFileName = tk.Entry(frame) self.DownloadingGenomes_OutputFileName.grid(row=4, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button clear tk.Button(frame, text="Clear all", command=lambda: [self.ClearAll_DownloadGENOMES()]).grid(row=17, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Run tk.Button(frame, text="Run", command=self.Run_Button_Downloading_genomes).grid(row=18, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Close tk.Button(frame, text="Close", command=self.close_window).grid(row=19, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) def Run_Button_Downloading_genomes(self): TextFromWriteYourEmail_DownloadingGenomes = self.WriteYourEmail.get() TextFromWriteAccessionNumbers_DownloadingGenomes = self.WriteAccessionNumbers.get() TextFromOutputPathFor_DownloadingGenomes = self.OutputPathForDownloadingGenomes.get() TextFromOutputFileName_DownloadingGenomes = self.DownloadingGenomes_OutputFileName.get() if (len(TextFromWriteYourEmail_DownloadingGenomes) != 0 and len(TextFromWriteAccessionNumbers_DownloadingGenomes) != 0 and len(TextFromOutputPathFor_DownloadingGenomes) != 0 and len(TextFromOutputFileName_DownloadingGenomes) != 0): Script2.email_DG = TextFromWriteYourEmail_DownloadingGenomes Script2.genomeAccessions_DG = TextFromWriteAccessionNumbers_DownloadingGenomes TextFromOutputMod_DownloadingGenomes = TextFromOutputPathFor_DownloadingGenomes + doubleBackSlash Script2.OutputFilePath_DG = TextFromOutputMod_DownloadingGenomes Script2.fileName_DG = TextFromOutputFileName_DownloadingGenomes print(Script2.email_DG) print(Script2.genomeAccessions_DG) print(Script2.OutputFilePath_DG) print(Script2.fileName_DG) # Output Messages self.message = Label(text="Running", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) Script2.main(self) self.message1 = Label(text="Completed!", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) messagebox.showinfo('Information', "Completed!") else: messagebox.showerror("Error", "All required parameters must be filled") self.wind.lift() def Download_genomes_select_directory_output_path(self): Output_file_path_Downloading_genomes = filedialog.askdirectory(initialdir='.') print(Output_file_path_Downloading_genomes) self.OutputPathForDownloadingGenomes.delete(0, END) self.OutputPathForDownloadingGenomes.insert(0, Output_file_path_Downloading_genomes) self.wind.lift() return def ClearAll_DownloadGENOMES(self): self.WriteYourEmail.delete(0, END) self.WriteAccessionNumbers.delete(0, END) self.OutputPathForDownloadingGenomes.delete(0, END) self.DownloadingGenomes_OutputFileName.delete(0, END) def close_window(self): self.wind.destroy() # Create Genome Database class Win4(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("Create NCBI Database for BLASTn") self.wind.wm_iconbitmap( 'Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="Complete the following parameters") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # Select your NCBI Path # Row 1 Label(frame, text="Select path to NCBI/bin directory : ").grid(row=1, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "NCBI" + doubleBackSlash + "blast-2.8.0+" + doubleBackSlash + "bin ").grid( row=1, column=6, sticky=W) self.NCBIPath_to_BIN = tk.Entry(frame) self.NCBIPath_to_BIN.focus() self.NCBIPath_to_BIN.grid(row=1, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Select_button_for_Create_Database_for_NCBI_BLAST).grid(row=1, column=5, padx=2, pady=2) # Path to Fasta file to Build DB # Row 2 Label(frame, text="Select path to genome FASTA file: ").grid(row=2, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/Genome/Genome.fasta ").grid(row=2, column=6, sticky=W) self.Path_to_FASTA_file = tk.Entry(frame) self.Path_to_FASTA_file.grid(row=2, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.select_path_for_FASTA_file).grid(row=2, column=5, padx=2, pady=2) # Drop down menu # Database type # Row 3 Label(frame, text="Click to select type of database: ").grid(row=3, column=0, sticky=W) Label(frame, text="Example: 'nucl' for nucleotide or 'prot' for protein").grid(row=3, column=2, sticky=W) Label(frame, text="").grid(row=4, column=6, sticky=W) Options = ["", "nucl", "prot"] self.clicked = StringVar() self.clicked.set(Options[0]) self.DropMenu_DB_Type = OptionMenu(frame, self.clicked, *Options,) self.DropMenu_DB_Type.grid(row=3, column=1, columnspan=1, padx=5, pady=5, sticky=W) # Button clear tk.Button(frame, text="Clear all", command=lambda: [self.ClearAll_Create_GENOME_DB()]).grid(row=17, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Run tk.Button(frame, text="Run", command=self.Run_Button_Create_Database).grid(row=18, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # BUTTON Close tk.Button(frame, text="Close", command=self.close_window).grid(row=19, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) def Run_Button_Create_Database(self): TextFromNCBI_path_to_bin_directory = self.NCBIPath_to_BIN.get() TextFrom_Path_to_Genome_fasta_file = self.Path_to_FASTA_file.get() TextFromDropDownMenu_DataBaseType = self.clicked.get() if (len(TextFromNCBI_path_to_bin_directory) != 0 and len(TextFrom_Path_to_Genome_fasta_file) != 0 and len(TextFromDropDownMenu_DataBaseType) !=0): TextFromNCBI_path_to_bin_directory_CreateBD_MOD = TextFromNCBI_path_to_bin_directory + doubleBackSlash Script3.Path_To_NCBI_BLAST_Bin_Directory = TextFromNCBI_path_to_bin_directory_CreateBD_MOD Script3.Path_To_Database_Fasta_File = TextFrom_Path_to_Genome_fasta_file Script3.Data_Base_Type = TextFromDropDownMenu_DataBaseType print(Script3.Path_To_NCBI_BLAST_Bin_Directory) print(Script3.Path_To_Database_Fasta_File) print(Script3.Data_Base_Type) # Output Messages self.message = Label(text="Running", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) Script3.main(self) self.message1 = Label(text="Completed!", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) messagebox.showinfo('Information', "Completed!") else: messagebox.showerror("Error", "All required parameters must be filled") self.wind.lift() def Select_button_for_Create_Database_for_NCBI_BLAST(self): Input_file_path_NCBI_Bin_Directory = filedialog.askdirectory(initialdir='.') print(Input_file_path_NCBI_Bin_Directory) self.NCBIPath_to_BIN.delete(0, END) self.NCBIPath_to_BIN.insert(0, Input_file_path_NCBI_Bin_Directory) self.wind.lift() return def select_path_for_FASTA_file(self): folder_selected_for_FASTA_File = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=( ("FASTA files", "*.fasta"), ("all files", "*.*"))) print(folder_selected_for_FASTA_File) self.Path_to_FASTA_file.delete(0, END) self.Path_to_FASTA_file.insert(0, folder_selected_for_FASTA_File) self.wind.lift() return def Drop_down_definition_selected(self): return def ClearAll_Create_GENOME_DB(self): self.NCBIPath_to_BIN.delete(0, END) self.Path_to_FASTA_file.delete(0, END) def close_window(self): self.wind.destroy() # BLAST and Filtering class Win5(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("BLAST and filtering") self.wind.wm_iconbitmap( 'Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="Complete the following parameters") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # Select your NCBI bin Path in BF # Row 1 Label(frame, text="Select path to NCBI/bin directory : ").grid(row=1, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "NCBI" + doubleBackSlash + "blast-2.8.0+" + doubleBackSlash + "bin ").grid( row=1, column=6, sticky=W) self.NCBIPath_to_BIN_in_BF = tk.Entry(frame) self.NCBIPath_to_BIN_in_BF.focus() self.NCBIPath_to_BIN_in_BF.grid(row=1, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Select_button_for_Select_NCBI_path_in_BF).grid(row=1, column=5, padx=2, pady=2) # Select your NCBI Path to database # Row 2 to row 4 Label(frame, text="Select path to your NCBI database: ").grid(row=2, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/Genome/Genome.fasta ").grid(row=2, column=6, sticky=W) Label(frame, text="Note: There should be other files created when the database was made.").grid(row=3, column=6, sticky=W) Label(frame, text="Example: Genome.fasta.nhr / Genome.fasta.nin / Genome.fasta.nsq ").grid(row=4, column=6, sticky=W) self.SelectDataBase_BF = tk.Entry(frame) self.SelectDataBase_BF.focus() self.SelectDataBase_BF.grid(row=2, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Select_button_for_Select_NCBI_Database_in_BF).grid(row=2, column=5, padx=2, pady=2) # Select path to the query # Row 5 Label(frame, text="Select path to your query: ").grid(row=5, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/Genome/MyFile.fasta ").grid(row=5, column=6, sticky=W) self.SelectPathToQuery_in_BF = tk.Entry(frame) self.SelectPathToQuery_in_BF.grid(row=5, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Select_button_for_Select_Query_in_BF).grid(row=5, column=5, padx=2, pady=2) # Select output path in BF # Row 6 Label(frame, text="Output path directory: ").grid(row=6, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/ ").grid(row=6, column=6, sticky=W) self.OutputPathFor_BF = tk.Entry(frame) self.OutputPathFor_BF.grid(row=6, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Button_select_directory_output_path_BF).grid(row=6, column=5, padx=2, pady=2) # Select output name in BF # Row 7 Label(frame, text="Output file name: ").grid(row=7, column=0, sticky=W) Label(frame, text=" Example: My_BLAST_results").grid(row=7, column=6, sticky=W) Label(frame, text="").grid(row=7, column=6, sticky=W) self.OutputFileName_BF = tk.Entry(frame) self.OutputFileName_BF.grid(row=7, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Row 8 # SELECT THE PATH TO THE FILE InputFile_with_unique_ID.txt # Row 8 Label(frame, text="Path to file with Unique ID (Optional): ").grid(row=8, column=0, sticky=W) Label(frame, text="Example: My_BLAST_results_InputFile_with_unique_ID.txt ").grid(row=8, column=6, sticky=W) Label(frame, text=" ").grid(row=13, column=6, sticky=W) self.InputFile_UniqueID_BF = tk.Entry(frame) self.InputFile_UniqueID_BF.grid(row=8, column=1, columnspan=2, ipadx=200, padx=5, pady=5, sticky=W) # Button Select tk.Button(frame, text="Select", command=self.Button_select_file_Unique_ID).grid(row=8, column=5, padx=2, pady=2, sticky=W) # Word Size # Row 9 Label(frame, text="Word size: ").grid(row=9, column=0, sticky=W) Label(frame, text="Recommended value: 11 ").grid(row=9, column=2, sticky=W) self.WordSize_BF = tk.Entry(frame) self.WordSize_BF.grid(row=9, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT YOUR PERCENTAGE IDENTITY = DEFAULT IS 70 %, you can change it. - f 70 # Row 10 Label(frame, text="Percentage Identity: ").grid(row=10, column=0, sticky=W) Label(frame, text="Recommended value: 70 ").grid(row=10, column=2, sticky=W) # Label(frame, text=" ").grid(row=9, column=6, sticky=W) self.PercentageIdentity_BF = tk.Entry(frame) self.PercentageIdentity_BF.grid(row=10, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT YOUR THREADS. DEFAULT IS 4 IF YOUR COMPUTER HAS ONLY 4 CPU's, YOU CAN CHANGE it -g 4 # Row 11 Label(frame, text="Number of threads: ").grid(row=11, column=0, sticky=W) Label(frame, text="Example: 4 ").grid(row=11, column=2, sticky=W) self.NumberOfThreads_BF = tk.Entry(frame) self.NumberOfThreads_BF.grid(row=11, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT OUTPUT FORMAT. DEFAULT IS 6 AS A TABLE. YOU CAN SELECT OTHER FORMATS. -i 6 # Row 12 Label(frame, text="Output format: ").grid(row=12, column=0, sticky=W) Label(frame, text="Recommended format: 6 ").grid(row=12, column=2, sticky=W) self.OutputBLAST_Format_BF = tk.Entry(frame) self.OutputBLAST_Format_BF.grid(row=12, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT YOUR PERCENTAGE OVERLAP. DEFAULT IS 80%. -j 0.8 # Row 13 Label(frame, text="Percentage Overlap: ").grid(row=13, column=0, sticky=W) Label(frame, text="Recommended value: 0.8 ").grid(row=13, column=2, sticky=W) self.PercentageOverlap_Format_BF = tk.Entry(frame) self.PercentageOverlap_Format_BF.grid(row=13, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT YOUR BITSCORE VALUE, DEFAULT IS 50. IF YOU ARE UNSURE DON'T USE THIS PARAMETER -k 50 # Row 14 Label(frame, text="Bitscore: ").grid(row=14, column=0, sticky=W) Label(frame, text="Recommended value: 50 ").grid(row=14, column=2, sticky=W) self.Bitscore_BF = tk.Entry(frame) self.Bitscore_BF.grid(row=14, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # tick menu # Database type # Row 15 Label(frame, text="Discontiguous Mega BLAST: ").grid(row=15, column=0, sticky=W) self.checked = tk.IntVar() self.CheckBox_BF = tk.Checkbutton(frame, text="dc-megablast", variable=self.checked, onvalue=1, offvalue=0) self.CheckBox_BF.grid(row=15, column=1, columnspan=1, padx=5, pady=5, sticky=W) # Button clear tk.Button(frame, text="Clear all", command=lambda: [self.Button_clear_all_Blast_and_Filtering()]).grid(row=17, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Run tk.Button(frame, text="Run", command=self.Button_run_BF).grid(row=18, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # BUTTON Close tk.Button(frame, text="Close", command=self.close_window).grid(row=19, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) def Button_run_BF(self): TextFrom_Path_To_NCBI_Bin_BF = self.NCBIPath_to_BIN_in_BF.get() TextFrom_DC_MEGABLAST_BF = self.checked.get() TextFrom_Path_To_Database_BF = self.SelectDataBase_BF.get() Text_from_Path_to_Query_BF = self.SelectPathToQuery_in_BF.get() TextFrom_Path_Output_BF = self.OutputPathFor_BF.get() TextFrom_OutputFile_name_BF = self.OutputFileName_BF.get() Text_From_InputFile_UniqueID_BF = self.InputFile_UniqueID_BF.get() TextFromWordSize_BF = self.WordSize_BF.get() TextFromPercentageID_BF = self.PercentageIdentity_BF.get() TextFromNumberOfThreads_BF = self.NumberOfThreads_BF.get() TextFromOutputBLAST_BF = self.OutputBLAST_Format_BF.get() TextFromPercentageOverlap_BF = self.PercentageOverlap_Format_BF.get() TextFromBitscore_BF = self.Bitscore_BF.get() if (len(TextFrom_Path_To_NCBI_Bin_BF) != 0 and len(TextFrom_Path_To_Database_BF) != 0 and len(Text_from_Path_to_Query_BF) != 0 and len(TextFrom_Path_Output_BF) != 0 and len(TextFrom_OutputFile_name_BF) != 0 and len(TextFromWordSize_BF) != 0 and len(TextFromPercentageID_BF) != 0 and len(TextFromNumberOfThreads_BF) != 0 and len(TextFromOutputBLAST_BF) != 0 and len(TextFromPercentageOverlap_BF) != 0 and len(TextFromBitscore_BF) != 0): # Here TextFrom_Path_To_NCBI_Bin_BF_MOD = TextFrom_Path_To_NCBI_Bin_BF + doubleBackSlash Script4.x_Path_to_NCBI_Directory_BF = TextFrom_Path_To_NCBI_Bin_BF_MOD Script4.y_DC_MegaBlast_BF = TextFrom_DC_MEGABLAST_BF TextFrom_Path_To_Database_BF_MOD = TextFrom_Path_To_Database_BF #+ doubleBackSlash Script4.a_Data_Base_fasta = TextFrom_Path_To_Database_BF_MOD Text_from_Path_to_Query_BF_MOD = Text_from_Path_to_Query_BF #+ doubleBackSlash Script4.b_Query_fasta_file = Text_from_Path_to_Query_BF_MOD TextFrom_Path_Output_BF_MOD = TextFrom_Path_Output_BF + doubleBackSlash Script4.c_Output_Path_ = TextFrom_Path_Output_BF_MOD Script4.d_Output_file_name = TextFrom_OutputFile_name_BF Script4.e_word_size = TextFromWordSize_BF Script4.f_Percentage_identity = TextFromPercentageID_BF Script4.g_number_of_threads = TextFromNumberOfThreads_BF Script4.i_OutputFormat = TextFromOutputBLAST_BF Script4.j_Percentage_overlap = TextFromPercentageOverlap_BF Script4.k_bitscore = TextFromBitscore_BF Script4.l_InputFile_with_unique_ID = Text_From_InputFile_UniqueID_BF print(Script4.x_Path_to_NCBI_Directory_BF) print("") print(Script4.a_Data_Base_fasta) print(Script4.b_Query_fasta_file) print(Script4.c_Output_Path_) print(Script4.d_Output_file_name) print(Script4.e_word_size) print(Script4.f_Percentage_identity) print(Script4.g_number_of_threads) print(Script4.i_OutputFormat) print(Script4.j_Percentage_overlap) print(Script4.k_bitscore) print(Script4.l_InputFile_with_unique_ID) print(Script4.y_DC_MegaBlast_BF) # Output Messages self.message = Label(text="Running", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) Script4.main(self) self.message1 = Label(text="Completed!", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) messagebox.showinfo('Information', "Completed!") else: messagebox.showerror("Error", "All required parameters must be filled") self.wind.lift() def Select_button_for_Select_NCBI_path_in_BF(self): Input_file_path_NCBI_Bin_Directory_BF = filedialog.askdirectory(initialdir='.') print(Input_file_path_NCBI_Bin_Directory_BF) self.NCBIPath_to_BIN_in_BF.delete(0, END) self.NCBIPath_to_BIN_in_BF.insert(0, Input_file_path_NCBI_Bin_Directory_BF) self.wind.lift() return def Select_button_for_Select_NCBI_Database_in_BF(self): Input_file_Database_BF = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=( ("Fasta files", "*.fasta"), ("all files", "*.*"))) print(Input_file_Database_BF) self.SelectDataBase_BF.delete(0, END) self.SelectDataBase_BF.insert(0, Input_file_Database_BF) self.wind.lift() return def Select_button_for_Select_Query_in_BF(self): Input_file_query_BF = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=(("Fasta files", "*.fasta"), ("all files", "*.*"))) print(Input_file_query_BF) self.SelectPathToQuery_in_BF.delete(0, END) self.SelectPathToQuery_in_BF.insert(0, Input_file_query_BF) self.wind.lift() return def Button_select_directory_output_path_BF(self): Output_path_in_BF = filedialog.askdirectory(initialdir='.') print(Output_path_in_BF) self.OutputPathFor_BF.delete(0, END) self.OutputPathFor_BF.insert(0, Output_path_in_BF) self.wind.lift() return def Button_select_file_Unique_ID(self): Path_to_file_with_Unique_ID = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=( ("Text files", "*.txt"), ("all files", "*.*"))) print(Path_to_file_with_Unique_ID) self.InputFile_UniqueID_BF.delete(0, END) self.InputFile_UniqueID_BF.insert(0, Path_to_file_with_Unique_ID) self.wind.lift() return def Button_clear_all_Blast_and_Filtering(self): self.NCBIPath_to_BIN_in_BF.delete(0, END) self.SelectDataBase_BF.delete(0, END) self.SelectPathToQuery_in_BF.delete(0, END) self.OutputPathFor_BF.delete(0, END) self.OutputFileName_BF.delete(0, END) self.InputFile_UniqueID_BF.delete(0, END) self.PercentageIdentity_BF.delete(0, END) self.NumberOfThreads_BF.delete(0, END) self.OutputBLAST_Format_BF.delete(0, END) self.PercentageOverlap_Format_BF.delete(0, END) self.Bitscore_BF.delete(0, END) self.WordSize_BF.delete(0, END) def close_window(self): self.wind.destroy() #Filtering class Win55(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("Filtering") self.wind.wm_iconbitmap( 'Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="Complete the following parameters") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # Select path to the query in F # Row 2 Label(frame, text="Select path to your BLAST file(*): ").grid(row=2, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/MyBLAST_results.txt ").grid(row=2, column=6, sticky=W) self.SelectPathToQuery_in_F = tk.Entry(frame) self.SelectPathToQuery_in_F.grid(row=2, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Select_button_for_Select_Query_in_F).grid(row=2, column=5, padx=2, pady=2) # Select output path in F # Row 3 Label(frame, text="Output path directory: ").grid(row=3, column=0, sticky=W) Label(frame, text="Example: " + "C:" + doubleBackSlash + doubleBackSlash + "Users/MyDocuments/ ").grid(row=3, column=6, sticky=W) self.OutputPathFor_F = tk.Entry(frame) self.OutputPathFor_F.grid(row=3, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Button Select tk.Button(frame, text="Select", command=self.Button_select_directory_output_path_F).grid(row=3, column=5, padx=2, pady=2) # Select output name in F # Row 4 Label(frame, text="Output file name: ").grid(row=4, column=0, sticky=W) Label(frame, text=" Example: My_filtered_BLAST_results").grid(row=4, column=6, sticky=W) Label(frame, text="").grid(row=4, column=6, sticky=W) self.OutputFileName_F = tk.Entry(frame) self.OutputFileName_F.grid(row=4, column=1, columnspan=2, ipadx=200, padx=5, pady=5, ) # Row 6 Label(frame, text="Percentage Overlap: ").grid(row=6, column=0, sticky=W) Label(frame, text="Recommended value: 0.8 ").grid(row=6, column=2, sticky=W) self.PercentageOverlap_Format_F = tk.Entry(frame) self.PercentageOverlap_Format_F.grid(row=6, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) # SELECT YOUR BITSCORE VALUE, DEFAULT IS 50. IF YOU ARE UNSURE DON'T USE THIS PARAMETER -k 50 # Row 7 Label(frame, text="Bitscore: ").grid(row=7, column=0, sticky=W) Label(frame, text="Recommended value: 50 ").grid(row=7, column=2, sticky=W) self.Bitscore_F = tk.Entry(frame) self.Bitscore_F.grid(row=7, column=1, columnspan=1, ipadx=100, padx=5, pady=5, sticky=W) #Rown 8 Notes Label(frame, text="").grid(row=8, column=0, sticky=W) Label(frame, text="* The BLASTn output format: ").grid(row=9, column=0, sticky=W) Label(frame, text=" TABULAR OUTPUT FORMAT: 6").grid(row=10, column=0, sticky=W) Label(frame, text=" COLUMN HEADERS:").grid(row=11, column=0, sticky=W) Label(frame, text="qseqid sacc stitle qseq sseq ").grid(row=12, column=0, sticky=W) Label(frame, text="nident mismatch pident length ").grid(row=13, column=0, sticky=W) Label(frame, text=" evalue bitscore qstart qend sstart send").grid(row=14, column=0, sticky=W) Label(frame, text=" gapopen gaps qlen slen").grid(row=15, column=0, sticky=W) # Button clear tk.Button(frame, text="Clear all", command=lambda: [self.Button_clear_all_Filtering()]).grid(row=17, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # Button Run tk.Button(frame, text="Run", command=self.Button_run_F).grid(row=18, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) # BUTTON Close tk.Button(frame, text="Close", command=self.close_window).grid(row=19, column=6, columnspan=1, padx=5, pady=5, sticky=W + E) def Button_run_F(self): Text_from_Path_to_Query_F = self.SelectPathToQuery_in_F.get() TextFrom_Path_Output_F = self.OutputPathFor_F.get() TextFrom_OutputFile_name_F = self.OutputFileName_F.get() #TextFromPercentageID_F = self.PercentageIdentity_F.get() TextFromPercentageOverlap_F = self.PercentageOverlap_Format_F.get() TextFromBitscore_F = self.Bitscore_F.get() if (len(Text_from_Path_to_Query_F) != 0 and len(TextFrom_Path_Output_F) != 0 and len(TextFrom_OutputFile_name_F) != 0 and len(TextFromPercentageOverlap_F) != 0 and len(TextFromBitscore_F) != 0): # Here Text_from_Path_to_Query_F_MOD = Text_from_Path_to_Query_F #+ doubleBackSlash Script55.a_BLAST_input_path_and_file_ = Text_from_Path_to_Query_F_MOD TextFrom_Path_Output_F_MOD = TextFrom_Path_Output_F + doubleBackSlash Script55.b_Output_Path_ = TextFrom_Path_Output_F_MOD Script55.c_Output_file_name = TextFrom_OutputFile_name_F #Script55.f_Percentage_identity = TextFromPercentageID_F Script55.d_Percentage_overlap = TextFromPercentageOverlap_F Script55.e_bitscore = TextFromBitscore_F print(Script55.a_BLAST_input_path_and_file_) print(Script55.b_Output_Path_) print(Script55.c_Output_file_name) #print(Script55.f_Percentage_identity) print(Script55.d_Percentage_overlap) print(Script55.e_bitscore) # Output Messages self.message = Label(text="Running", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) Script55.main(self) self.message1 = Label(text="Completed!", fg='red').grid(row=19, column=2, columnspan=2, sticky=W + E, padx=5, pady=5) messagebox.showinfo('Information', "Completed!") else: messagebox.showerror("Error", "All required parameters must be filled") self.wind.lift() def Select_button_for_Select_Query_in_F(self): Input_file_query_F = filedialog.askopenfilename(initialdir='/', title="Select file", filetypes=(("Text file", "*.txt"), ("all files", "*.*"))) print(Input_file_query_F) self.SelectPathToQuery_in_F.delete(0, END) self.SelectPathToQuery_in_F.insert(0, Input_file_query_F) self.wind.lift() return def Button_select_directory_output_path_F(self): Output_path_in_F = filedialog.askdirectory(initialdir='.') print(Output_path_in_F) self.OutputPathFor_F.delete(0, END) self.OutputPathFor_F.insert(0, Output_path_in_F) self.wind.lift() return def Button_clear_all_Filtering(self): self.SelectPathToQuery_in_F.delete(0, END) self.OutputPathFor_F.delete(0, END) self.OutputFileName_F.delete(0, END) self.PercentageOverlap_Format_F.delete(0, END) self.Bitscore_F.delete(0, END) def close_window(self): self.wind.destroy() # Help or instructions class Win6(Win1): def __init__(self, window): # Initializations self.window = window self.wind = window self.wind.title("Instructions") self.wind.wm_iconbitmap( 'Currito.ico') # Creating a Frame Container frame = LabelFrame(self.wind, text="User guide") frame.grid(row=0, column=0, columnspan=3, padx=20, pady=20) # Select your NCBI Path Label(frame, text="").grid(row=1, column=0) Label(frame, text="Installation requirements:").grid( row=2, column=0, sticky=W) #Label(frame, text=" *Python 3 or above (https://www.python.org/downloads/)").grid(row=3, column=0, sticky=W) #3Label(frame, text=" *Python module BioPython (https://biopython.org/wiki/Download) ").grid(row=4, column=0, sticky=W) Label(frame, text=" *BLAST+ greater than v2.6 or the latest version (ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/)").grid( row=5, column=0, sticky=W) Label(frame, text="").grid( row=6, column=0, sticky=W) Label(frame, text="Recommendations:").grid( row=8, column=0, sticky=W) #Label(frame, text=" *Add Python to the environment path to be able to run this script.").grid( row=9, column=0, sticky=W) #Label(frame, text=" -To add python to the path in Windows, you can do it by modifying it in: ").grid(row=10, column=0, sticky=W) #Label(frame, text=" Control Panel > System and Security > System > Advanced System Settings > Environment Variables > System Variables > Path").grid( row=11, column=0, sticky=W) Label(frame, text=" *Do not install BLAST in the 'Program Files' directory. The space between words will make this script to crash.").grid( row=12, column=0, sticky=W) Label(frame, text=" *Do not save any document using names with spaces between words, use underscores. Example: My_file.").grid( row=13, column=0, sticky=W) Label(frame, text=" *The NCBI BLAST also takes .fna files, in addition to .fasta files.").grid( row=14, column=0, sticky=W) Label(frame, text=" ").grid( row=15, column=0, sticky=W) Label(frame, text="").grid( row=16, column=0, sticky=W) Label(frame, text="Questions or comments: berenicetalamantes@yahoo.fr").grid( row=20, column=0, sticky=W) Label(frame, text="Developed by : Berenice Talamantes-Becerra, Jason Carling, Arthur Georges").grid( row=21, column=0, sticky=W) # BUTTON Close tk.Button(frame, text="Close", command=self.close_window).grid(row=23, column=2, columnspan=1, padx=5, pady=5, sticky=W + E) def close_window(self): self.wind.destroy() Script_1_Instance = Script1() if __name__ == "__main__": window = Tk() application = Win1(window) window.mainloop()
nilq/baby-python
python
import torch import torch.nn as nn import torch.optim as optim import numpy as np class DeepModel(nn.Module): def __init__( self, num_states, num_actions, ): super(DeepModel, self).__init__() self.conv1 = nn.Conv2d(1,20,(1,1)) self.conv2 = nn.Conv2d(1,20,(1,7)) self.conv3 = nn.Conv2d(1,20,(6,1)) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(20*55,128) self.output_layer = nn.Linear(128, num_actions) def forward(self, x): x = x.view(-1,1,6,7) self.input_x = x x1 = self.relu(self.conv1(x)) x2 = self.relu(self.conv2(x)) x3 = self.relu(self.conv3(x)) x1 = x1.view(-1,20,42) x2 = x2.view(-1,20,6) x3 = x3.view(-1,20,7) x_cat = torch.cat((x1,x2,x3),2) x = x_cat.view(-1,20*55) x = self.relu(self.fc(x)) x = self.output_layer(x) return x class DQN: def __init__( self, num_states=0, num_actions=7, gamma=0, max_experiences=0, min_experiences=0, batch_size=0, lr=0, ): self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu' )) self.num_actions = num_actions self.batch_size = batch_size self.gamma = gamma self.model = DeepModel(num_states, num_actions).to(self.device) print(self.model) # self.model.conv1.register_backward_hook(self.backward_hook) self.optimizer = optim.Adam(self.model.parameters(), lr=lr) self.criterion = nn.MSELoss().to(self.device) self.experience = { 's': [], 'a': [], 'r': [], 's2': [], 'done': [], } self.max_experiences = max_experiences self.min_experiences = min_experiences def predict(self, inputs): return self.model(torch.from_numpy(inputs).float().to(self.device)) def train(self, TargetNet): if len(self.experience['s']) < self.min_experiences: # only start training process if enough experiences in buffer return 0 # randomly select n experiences in buffer to form batch ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size) states = np.asarray([self.preprocess(self.experience['s'][i]) for i in ids]) actions = np.asarray([self.experience['a'][i] for i in ids]) rewards = np.asarray([self.experience['r'][i] for i in ids]) # prepare labels states_next = np.asarray([self.preprocess(self.experience['s2' ][i]) for i in ids]) dones = np.asarray([self.experience['done'][i] for i in ids]) value_next = \ np.max(TargetNet.predict(states_next).detach().cpu().numpy(), axis=1) actual_values = np.where(dones, rewards, rewards + self.gamma * value_next) actions = np.expand_dims(actions, axis=1) actions_one_hot = torch.FloatTensor(self.batch_size, self.num_actions).zero_() actions_one_hot = actions_one_hot.scatter_(1, torch.LongTensor(actions), 1).to(self.device) selected_action_values = torch.sum(self.predict(states) * actions_one_hot, dim=1).to(self.device) actual_values = torch.FloatTensor(actual_values).to(self.device) self.optimizer.zero_grad() loss = self.criterion(selected_action_values, actual_values) loss.backward() self.optimizer.step() def get_action(self, state, epsilon): # to get an action by using epsilon-greedy if np.random.random() < epsilon: return int(np.random.choice([c for c in range(self.num_actions) if state['board'][c] == 0])) else: prediction = \ self.predict(np.atleast_2d(self.preprocess(state)))[0].detach().cpu().numpy() for i in range(self.num_actions): if state['board'][i] != 0: prediction[i] = -1e7 return int(np.argmax(prediction)) def add_experience(self, exp): if len(self.experience['s']) >= self.max_experiences: for key in self.experience.keys(): self.experience[key].pop(0) for (key, value) in exp.items(): self.experience[key].append(value) def copy_weights(self, TrainNet): self.model.load_state_dict(TrainNet.model.state_dict()) def save_weights(self, path): torch.save(self.model.state_dict(), path) def load_weights(self, path): self.model.load_state_dict(torch.load(path, map_location=self.device)) def preprocess(self, state): # each state consists of overview of the board and the mark in the obsevations # results = (state['board'])[:] # results.append(state.mark) # return results board = (state['board'])[:] if state.mark == 1: board[board == 2] = -1 else: board[board == 1] = -1 board[board == 2] = 1 return board def backward_hook(self, module, grad_in, grad_out): print(grad_out[0].shape) model = DQN(num_actions=7) model.load_weights('weights-deepqconv.pth') def my_agent(observation, configuration): return model.get_action(observation, 0.0)
nilq/baby-python
python
import logging import traceback lgr = logging.getLogger('datalad.revolution.create') _tb = [t[2] for t in traceback.extract_stack()] if '_generate_extension_api' not in _tb: # pragma: no cover lgr.warn( "The module 'datalad_revolution.revcreate' is deprecated. " 'The `RevCreate` class can be imported with: ' '`from datalad.core.local.create import Create as RevCreate`') from datalad.interface.base import ( build_doc, ) from datalad.interface.utils import eval_results from .dataset import ( rev_datasetmethod, ) from datalad.core.local.create import Create @build_doc class RevCreate(Create): @staticmethod @rev_datasetmethod(name='rev_create') @eval_results def __call__(path=None, initopts=None, force=False, description=None, dataset=None, no_annex=False, fake_dates=False, cfg_proc=None): for r in Create.__call__(path=path, initopts=initopts, force=force, description=description, dataset=dataset, no_annex=no_annex, fake_dates=fake_dates, cfg_proc=cfg_proc, result_renderer=None, result_xfm=None, on_failure="ignore", return_type='generator'): yield r
nilq/baby-python
python
# coding: utf-8 # Written by Lucas W. for Python 3.7.0 """Initialisation""" from time import time from threading import Timer from copy import deepcopy from random import choice name = "Terminal Chess (by Lucas W. 2019)" board_template = [ #standard setup ['wR','wN','wB','wQ','wK','wB','wN','wR'], ['wP','wP','wP','wP','wP','wP','wP','wP'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['bP','bP','bP','bP','bP','bP','bP','bP'], ['bR','bN','bB','bQ','bK','bB','bN','bR'], ] board_temp = [ #for temporary setups ['xX','xX','xX','xX','xX','xX','xX','bK'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','wK','wQ','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ['xX','xX','xX','xX','xX','xX','xX','xX'], ] board = deepcopy(board_template) languages = { #UI language 'english':{ 'welcome':" Welcome <user>!", 'main':"\n What do you want to do? play/settings/quit > ", 'settings':""" Type one of the following options to modify: - 'time': change time limit (+increment) per player - 'size': choose size of pieces and board - 'color': choose an appropriate color palette - 'invert': invert the palettes under 'color' - 'language': change interface language - 'fischerandom': play with a randomised home rank!""", 'play':""" How to play: - Type '<start> <end>' to make a move (e.g. 'e2 e4') - Type 'resign' to resign - Type 'draw' to propose a draw - The game can be paused using 'pause' Enjoy playing!""", 'w':'White', 'b':'Black', 'turn':"{}'s turn", 'check':"{} is in check.", 'checkmate':"{}'s king is checkmated. {} wins the game!", 'stalemate':"{} is in stalemate.", 'draw_query':' {} proposes a draw. Does {} agree? (yes/no) > ', 'resign':"{} resigns. {} wins the game.", 'pause':"The game has been paused.", 'draw':"The game is drawn.", 'draw_material':"Neither player has sufficient material to mate the other.", 'draw_threefold':"Threefold repetition has occured.", 'draw_50moverule_piece':"No piece has been taken for 50 moves.", 'draw_50moverule_pawn':"No pawn has been moved for 50 moves.", 'color_query':" The LEFTMOST icon should always appear as a BLACK KING on a DARK SQUARE\n (Should it instead appear as a white king on a light square, invert the colors using -->'invert')\n Insert name of desired color palette > ", 'color_success':" Color palette was changed.", 'color_fail':" Such a palette doesn't exist.", 'sizes':" The following sizes are available (square rasterized fonts are recommended for optimal display):", 'size_success':" Successfully changed size.\n", 'size_fail':" That size wasn't found!", 'flip_on':" The board will now flip after each player's turn.", 'flip_off':" The board stops flipping.", 'inverted':" The colors have been inverted, see changes under -->'color'", 'language':" Choose one of the following languages:", 'language_fail':" That language doesn't exist.", 'language_success':" Language successfully changed.", 'time_query':" How much time (s) should one player have? (increment as a second variable)\n (0 for infinite, current time/increment: {}s/{}s) > ", 'time_success':" Time per player was set to {}s (increment: {}s).", 'time_fail':" Times were not updated.", 'time_up':"{} ran out of time. {} wins the game.", 'time_left':"{} has {:.1f} seconds left on his clock.", 'make_move':" Make a move ¦ ", 'invalid_move':"Invalid Move!", 'fischerandom':" Home rank has been randomised.", 'conversion':" To what piece do you want to promote your pawn? (Queen/Rook/Bishop/Knight) >" }, 'deutsch':{ 'welcome':" Willkommen!", 'main':"\n Was wollen Sie machen? Spielen/Einstellungen/Schliessen > ", 'settings':""" Geben Sie eines der folgenden ein um es zu bearbeiten: - 'Zeit': Zeitlimit pro Spieler einstellen - 'Grösse': Wählen Sie, wie gross die Figuren und das Brett sein sollen - 'Farbe': Wählen Sie die passenden Farben für das Schachbrett - 'umkehren': Kehren Sie die Farben um, sollten die Paletten unter 'Farbe' nicht stimmen - 'Sprache': Sprache ändern - '960': Spielen Sie mit einer von 960 zufälligen Anfangsreihen!""", 'play':""" Wie man spielt: - Schreiben Sie '<start> <end>' um zu ziehen (z.B. 'e2 e4') - 'aufgeben' um aufzugeben - 'remis' um ihrem Gegner ein Remis anzubieten - Mittels 'pause' kann das spiel pausiert werden Viel Spass beim Spielen!""", 'w':'Weiss', 'b':'Schwarz', 'turn':"{} ist am Zug.", 'check':"{} steht im Schach.", 'checkmate':"Der König von {} steht schachmatt. {} gewinnt die Partie!", 'stalemate':"{} steht im Patt.", 'draw_query':' {} schlägt ein Remis vor. Akzeptiert {}? (ja/nein) > ', 'resign':"{} gibt auf. {} gewinnt die Partie.", 'pause':"Die Partie wurde pausiert.", 'draw':"Die Partie endet in einem Remis.", 'draw_material':"Keiner der beiden Spieler hat genug Material, um zu gewinnen.", 'draw_threefold':"Dieselbe Position hat sich dreimal wiederholt.", 'draw_50moverule_piece':"Es wurde keine Figur während 50 Zügen geschlagen.", 'draw_50moverule_pawn':"Es wurde kein Bauer während 50 Zügen bewegt.", 'color_query':" Die äusserste linke Ikone sollte einen SCHWARZEN KÖNIG auf einem DUNKELN FELD darstellen.\n (Sollte stattdessen ein weisser König auf hellem Feld erscheinen, kehren sie die Farben um mittels -->'umkehren') \n Geben Sie den Namen der gewünschten Palette ein > ", 'color_success':" Farbeinstellungen wurden angepasst.", 'color_fail':" Eine solche Farbpalette existiert nicht.", 'sizes':" Wählen Sie eine der folgenden Grössen (quadratische Rasterschriftarten sind für optimale Darstellung empfohlen):", 'size_success':" Die Grösse wurde erfolgreich aktualisiert\n", 'size_fail':" Die eingegebene Grösse existiert nicht!", 'flip_on':" Das Brett dreht sich nach jedem Zug dem entsprechenden Spieler.", 'flip_off':" Das Brett dreht sich nicht mehr.", 'inverted':" Die Farben wurden umgekehrt, siehe -->'Farbe'", 'language':" Die folgenden Sprachen stehen zur Verfügung:", 'language_fail':" Die gewünschte Sprache wurde nicht gefunden.", 'language_success':" Sprache erfolgreich geändert.", 'time_query':" Wieviel Zeit (s) sollte jeder Spieler haben? (Inkrement als zweite Variable)\n (0 für Unendlich, bisherige Zeit/Inkrement: {}s/{}s) > ", 'time_success':" Zeitlimit wurde auf {}s pro Spieler gesetzt. (Inkrement: {}s)", 'time_fail':" Zeitlimit wurde nicht geändert.", 'time_up':"{} hat das Zeitlimit erreicht. {} gewinnt die Partie.", 'time_left':"{} hat {:.1f} Sekunden übrig.", 'make_move':" Machen Sie einen Zug ¦ ", 'invalid_move':"Ungültiger Zug!", 'fischerandom':" Anfangsreihen wurden gemischt.", 'conversion':" In welche Figur wollen Sie Ihren Bauern umwandeln? (Dame/Turm/Läufer/Springer) >" }, } lang = languages['english'] #language used styles = { #board styles/sizes '2x2':{ 'K':( '----', '-XX-', '-XX-', '----' ), 'Q':( '----', '-X--', '-XX-', '----' ), 'R':( '----', '-X--', '-X--', '----' ), 'B':( '----', '-X--', '--X-', '----' ), 'N':( '----', '-XX-', '--X-', '----' ), 'P':( '----', '----', '-X--', '----' ), 'X':( '----', '----', '----', '----' ), }, '3x3':{ 'K':( '-----', '-XXX-', '-XXX-', '-XXX-', '-----' ), 'Q':( '-----', '--X--', '-XXX-', '-XXX-', '-----' ), 'R':( '-----', '-X-X-', '-XXX-', '-XXX-', '-----' ), 'B':( '-----', '-X-X-', '--X--', '-X-X-', '-----' ), 'N':( '-----', '-XX--', '-XXX-', '--XX-', '-----' ), 'P':( '-----', '-----', '-XX--', '-XX--', '-----' ), 'X':( '-----', '-----', '-----', '-----', '-----' ) }, '6x3':{ 'K':( '----------', '--XX--XX--', '--X-XX-X--', '---XXXX---', '----------' ), 'Q':( '----------', '--X-XX-X--', '--X-XX-X--', '---XXXX---', '----------' ), 'R':( '----------', '--X-XX-X--', '---XXXX---', '--XXXXXX--', '----------' ), 'B':( '----------', '---XXXX---', '----XX----', '-XXX--XXX-', '----------' ), 'N':( '----------', '--XXXX-X--', '-----XX---', '--XXXXXX--', '----------' ), 'P':( '----------', '----XX----', '----XX----', '---XXXX---', '----------' ), 'X':( '----------', '----------', '----------', '----------', '----------' ) }, '5x5':{ 'K':( '-------', '---X---', '-XX-XX-', '-X-X-X-', '-X-X-X-', '--XXX--', '-------', ), 'Q':( '-------', '---X---', '-X-X-X-', '-X-X-X-', '-XXXXX-', '--XXX--', '-------', ), 'R':( '-------', '--X-X--', '--XXX--', '--XXX--', '--XXX--', '--XXX--', '-------', ), 'B':( '-------', '---X---', '--X-X--', '--XXX--', '---X---', '-XX-XX-', '-------', ), 'N':( '-------', '--XX-X-', '-XXXX--', '---XX--', '--XX---', '-XXXX--', '-------', ), 'P':( '-------', '-------', '---X---', '--XXX--', '---X---', '--XXX--', '-------', ), 'X':( '-------', '-------', '-------', '-------', '-------', '-------', '-------', ) }, '10x5':{ 'K':( '--------------', '------XX------', '---XXX--XXX---', '--XX--XX--XX--', '---XX-XX-XX---', '----XXXXXX----', '--------------', ), 'Q':( '--------------', '-----X--X-----', '--X--X--X--X--', '--XX-XXXX-XX--', '---XX-XX-XX---', '----XXXXXX----', '--------------', ), 'R':( '--------------', '---X--XX--X---', '---XXXXXXXX---', '----XXXXXX----', '----XXXXXX----', '---XXXXXXXX---', '--------------', ), 'B':( '--------------', '------XX------', '-----XX-X-----', '----XX-XXX----', '------XX------', '--XXXX--XXXX--', '--------------', ), 'N':( '--------------', '-----XXXXX-X--', '---XXXXXXXX---', '-------XXX----', '-----XXXX-----', '---XXXXXXXX---', '--------------', ), 'P':( '--------------', '--------------', '------XX------', '-----XXXX-----', '------XX------', '----XXXXXX----', '--------------', ), 'X':( '--------------', '--------------', '--------------', '--------------', '--------------', '--------------', '--------------', ) }, '7x7':{ 'K':( '---------', '----X----', '--X-X-X--', '-X-XXX-X-', '-X--X--X-', '--XXXXX--', '---XXX---', '--XXXXX--', '---------', ), 'Q':( '---------', '---X-X---', '-X-X-X-X-', '-X-XXX-X-', '--XX-XX--', '--XXXXX--', '---XXX---', '--XXXXX--', '---------', ), 'R':( '---------', '--X-X-X--', '--XXXXX--', '---XXX---', '---XXX---', '---XXX---', '--XXXXX--', '--XXXXX--', '---------', ), 'B':( '---------', '---XXX---', '---X-X---', '--X---X--', '--XX-XX--', '----X----', '--XX-XX--', '-XX---XX-', '---------', ), 'N':( '---------', '---XX-X--', '--XXXX---', '-XXXXXX--', '--X--XX--', '----XXX--', '---XXX---', '--XXXXX--', '---------', ), 'P':( '---------', '---------', '----X----', '---XXX---', '---XXX---', '----X----', '---XXX---', '--XXXXX--', '---------', ), 'X':( '---------', '---------', '---------', '---------', '---------', '---------', '---------', '---------', '---------', ) }, '14x7':{ 'K':( '------------------', '--------XX--------', '---XXX--XX--XXX---', '--XX--XXXXXX--XX--', '--XX----XX----XX--', '---XXXXXXXXXXXX---', '-----XXXXXXXX-----', '---XXXXXXXXXXXX---', '------------------', ), 'Q':( '------------------', '------X----X------', '-XX---XX--XX---XX-', '--XX--XXXXXX--XX--', '---XXXXX--XXXXX---', '----XXXXXXXXXX----', '-----XXXXXXXX-----', '---XXXXXXXXXXXX---', '------------------', ), 'R':( '------------------', '----XX--XX--XX----', '----XXXXXXXXXX----', '-----XXXXXXXX-----', '------XXXXXX------', '-----XXXXXXXX-----', '----XXXXXXXXXX----', '----XXXXXXXXXX----', '------------------', ), 'B':( '------------------', '-------XXXX-------', '-----XXX--XXX-----', '----XX------XX----', '-----XXX--XXX-----', '-------XXXX-------', '-----XXX--XXX-----', '--XXXX------XXXX--', '------------------', ), 'N':( '------------------', '-------XXXX-XX----', '----XXXXX-XXX-----', '--XXXXXXXXXXXX----', '---XXX---XXXXX----', '-------XXXXXX-----', '-----XXXXXXX------', '----XXXXXXXXXX----', '------------------', ), 'P':( '------------------', '------------------', '--------XX--------', '-------XXXX-------', '------XXXXXX------', '-------XXXX-------', '------XXXXXX------', '----XXXXXXXXXX----', '------------------', ), 'X':( '------------------', '------------------', '------------------', '------------------', '------------------', '------------------', '------------------', '------------------', '------------------', ) }, '9x9':{ 'K':( '-----------', '-----X-----', '---X-X-X---', '--X-XXX-X--', '--X--X--X--', '--X--X--X--', '---XXXXX---', '----XXX----', '---XXXXX---', '--XXXXXXX--', '-----------', ), 'Q':( '-----------', '----X-X----', '--X-X-X-X--', '--X-X-X-X--', '--X-XXX-X--', '--XXX-XXX--', '--X-XXX-X--', '---XXXXX---', '---XXXXX---', '--XXXXXXX--', '-----------', ), 'R':( '-----------', '-----------', '---X-X-X---', '---XXXXX---', '---XXXXX---', '----XXX----', '----XXX----', '---XXXXX---', '---XXXXX---', '--XXXXXXX--', '-----------', ), 'B':( '-----------', '-----X-----', '----XXX----', '---XX-XX---', '---X---X---', '---XX-XX---', '----XXX----', '----XXX----', '--XX-X-XX--', '-XX-----XX-', '-----------', ), 'N':( '-----------', '-----XX-X--', '----X-XX---', '---XXXXXX--', '--XXX-XXX--', '---X--XXX--', '-----XXX---', '----XXX----', '---XXXXX---', '---XXXXX---', '-----------', ), 'P':( '-----------', '-----------', '-----------', '-----X-----', '----XXX----', '----XXX----', '-----X-----', '----XXX----', '----XXX----', '---XXXXX---', '-----------', ), 'X':( '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', '-----------', ), }, } style = styles['5x5'] #style used width = 8*len(style['K'][0])+2 #board width col_palettes = { 0:{ #for dark terminals with light glyphs 'darker':{'w':'▓', 'b':' ', 'd':'░', 'l':'▒', 'x':' '}, 'dark':{'w':'█', 'b':' ', 'd':'░', 'l':'▒', 'x':' '}, 'contrast':{'w':'█', 'b':' ', 'd':'░', 'l':'▓', 'x':' '}, 'light':{'w':'█', 'b':' ', 'd':'▒', 'l':'▓', 'x':' '}, 'lighter':{'w':'█', 'b':'░', 'd':'▒', 'l':'▓', 'x':' '}, }, 1:{ #for light terminals with dark glyphs 'lighter':{'w':' ', 'b':'▓', 'd':'▒', 'l':'░', 'x':' '}, 'light':{'w':' ', 'b':'█', 'd':'▒', 'l':'░', 'x':' '}, 'contrast':{'w':' ', 'b':'█', 'd':'▓', 'l':'░', 'x':' '}, 'dark':{'w':' ', 'b':'█', 'd':'▓', 'l':'▒', 'x':' '}, 'darker':{'w':'░', 'b':'█', 'd':'▓', 'l':'▒', 'x':' '}, } } palette_type = 0 col = col_palettes[palette_type]['lighter'] # colors used flip = {0:1,1:-1} #flip board after each turn history = [] #records moves board_history = [] #records positions turn = 0 #turn counter time_s = 0 #time given to each player times = {'w':time_s, 'b':time_s} #individual times increment = 0 #increment time time_up = False piece_taken = 0 #how many moves since piece taken pawn_moved = 0 #how many moves since pawn taken s_rank, s_file, e_rank, e_file = 0, 0, 0, 0 #start rank, start file; end rank, end file a_to_n = dict(zip('abcdefgh', range(8))) #convert file letters to numbers n_to_a = dict(zip(range(8), 'abcdefgh')) #convert numbers to file letters """Functions""" def sign(x): #returns -1 or 1 for negative or positive numbers and zero try: return int(x/abs(x)) except: return 1 def display_board_single(board): #smallest display of chess board in terminal using provided Unicode characters/letters Unicode = {'w':{'K':'♔', 'Q':'♕', 'R':'♖', 'B':'♗', 'N':'♘', 'P':'♙'}, 'b':{'K':'♚', 'Q':'♛', 'R':'♜', 'B':'♝', 'N':'♞', 'P':'♟'}, 'x':{'X':'-'}} Ascii = {'w':{'K':'k', 'Q':'q', 'R':'r', 'B':'b', 'N':'n', 'P':'p'}, 'b':{'K':'K', 'Q':'Q', 'R':'R', 'B':'B', 'N':'N', 'P':'P'}, 'x':{'X':'-'}} style_used = Ascii for rank_num, rank in enumerate(board[::-flip[turn%2]]): print(str((8-rank_num if flip[turn%2]==1 else rank_num+1)), end=' ') for file_num, square in enumerate(rank[::flip[turn%2]]): print(style_used[square[0]][square[1]].replace('-', {0:col['l'], 1:col['d']}[(rank_num+file_num)%2] ), end='') print() print('\n '+"abcdefgh"[::flip[turn%2]]) def display_board(board): # displays board in terminal print() for rank_num, rank in enumerate(board[::-flip[turn%2]]): #for each rank for row_num, row in enumerate(style['K']): #for each row in a tile (e.g. 5 rows for size 5x5) print(' '+str((8-rank_num if flip[turn%2]==1 else rank_num+1)) if row_num==int(len(style['K'])/2) else ' ', end='') #rank numbers print(''.join([style[square[1]][row_num].replace('X', col[square[0]]).replace('-', {0:col['l'], 1:col['d']}[(rank_num+file_num)%2]) for file_num, square in enumerate(rank[::flip[turn%2]])])) #VERY unpythonic but faster(?) code: essentially prints the whole line at once (correct tiles and colors) #for file_num, square in enumerate(rank[::flip[turn%2]]): #old/more readable version of the previous unpythonic line # print(style[square[1]][row_num].replace('X', col[square[0]]).replace('-', {0:col['l'], 1:col['d']}[(rank_num+file_num)%2]), end='') #print() print(' '+"{s2}A{s}B{s}C{s}D{s}E{s}F{s}G{s}H{s2}".format(s=(len(style['K'][0])-1)*' ', s2=int((len(style['K'][0])-1)/2)*' ')[::flip[turn%2]]) def display_any(board, style, col): #display any board/tiles (rectangles) for rank_num, rank in enumerate(board[::-1]): for row_num, row in enumerate(style['K']): print(''.join([style[square[1]][row_num].replace('X', col[square[0]]).replace('-', {1:col['l'], 0:col['d']}[(rank_num+file_num)%2]) for file_num, square in enumerate(rank)]).center(width)) def reset(): #reset game (board, time, other statistics) global board, board_history, history, time_up, times, piece_taken, pawn_moved, turn, time_limit board = deepcopy(board_template) board_history = [] history = [] time_up = False times = {'w':time_s, 'b':time_s} piece_taken = 0 pawn_moved = 0 turn = 0 if time_s: time_limit.cancel() def time_up_toggle(): #change global time_up print("Time is up.") global time_up time_up = True def find_piece(color, board, piece_type, depth=1): #finds a specific piece of a player on the board for rank_num, rank in enumerate(board): for file_num, square in enumerate(rank): if square==color+piece_type: depth -= 1 if not depth: return rank_num, file_num return (-1,-1) def not_attacked(playercol, rank, file, board): #checks, whether a certain square is attacked by a certain player enemy_color = {'w':'b','b':'w'}[playercol] forward = {'w':-1, 'b':1}[enemy_color] for x,y in ((1,0), (-1,0), (0,1), (0,-1)): #queen/rook steps = 1 while -1<rank+steps*x<8 and -1<file+steps*y<8: #on board if board[rank+steps*x][file+steps*y] in [enemy_color+'Q', enemy_color+'R']: #attacked return False if board[rank+steps*x][file+steps*y] != 'xX': #path blocked otherwise break steps += 1 for x,y in ((1,1), (-1,1), (1,-1), (-1,-1)): #queen/bishop steps = 1 while -1<rank+steps*x<8 and -1<file+steps*y<8: if board[rank+steps*x][file+steps*y] in [enemy_color+'Q', enemy_color+'B']: #attacked return False if board[rank+steps*x][file+steps*y] != 'xX': #path blocked otherwise break steps += 1 for x,y in ((1,2), (2,1), (2,-1), (1,-2), (-1,-2), (-2,-1), (-2,1), (-1,2)): #knight if -1<rank+x<8 and -1<file+y<8: if board[rank+x][file+y]==enemy_color+'N': #attacked return False for sidestep in (1,-1): #pawn if -1<rank+forward<8 and -1<file+sidestep<8: if board[rank+forward][file+sidestep]==enemy_color+'P': #attacked return False for x,y in [(x,y) for x in (0,1,-1) for y in (0,1,-1)]: #king if not -1<rank+x<8 or not -1<file+y<8: continue if board[rank+x][file+y]==enemy_color+'K': #attacked return False return True def validate_move(s_rank, s_file, e_rank, e_file, playercol, history=history): #checks, whether a given move is legal if not all([-1<i<8 for i in [s_file, s_rank, e_file, e_rank]]): return 'invalid' #on the board s_piece = board[s_rank][s_file] #piece on the starting square e_piece = board[e_rank][e_file] #piece on the end square rank_diff = e_rank - s_rank file_diff = e_file - s_file forward = {'w':1, 'b':-1}[playercol] own_figure = s_piece[0]==playercol #own figure being moved not_occupied_own = s_piece[0] != e_piece[0] #piece on th end square has a different color (also keeps a piece from staying on same square) move_in_domain = True #assumption path_available = True #assumption special_move = '' if s_piece[1]=='R': #Rook /Turm move_in_domain = bool(rank_diff) ^ bool(file_diff) #either move vertically xor horizontally if rank_diff!=0: #moved along a file for steps in range(1, abs(rank_diff)): path_available = board[s_rank+sign(rank_diff)*steps][s_file]=="xX" #False if blocked if not path_available: break elif file_diff!=0: #moved along a rank for steps in range(1, abs(file_diff)): path_available = board[s_rank][s_file+sign(file_diff)*steps]=="xX" #False if blocked if not path_available: break elif s_piece[1]=='N': #Knight /Springer move_in_domain = (abs(rank_diff), abs(file_diff))==(1,2) or (abs(rank_diff), abs(file_diff))==(2,1) #L-shape elif s_piece[1]=='B': #Bishop /Läufer move_in_domain = abs(rank_diff)==abs(file_diff) #on a diagonal for steps in range(1, abs(rank_diff)): path_available = board[s_rank+sign(rank_diff)*steps][s_file+sign(file_diff)*steps]=="xX" #False if blocked if not path_available: break elif s_piece[1]=='Q': #Queen /Dame move_in_domain = bool(rank_diff)^bool(file_diff) or abs(rank_diff)==abs(file_diff) #along rank, file or diagonal (combination of rook and bishop) if bool(rank_diff)^bool(file_diff) and rank_diff!=0: #along a file for steps in range(1, abs(rank_diff)): path_available = board[s_rank+sign(rank_diff)*steps][s_file]=="xX" if not path_available: break elif bool(rank_diff)^bool(file_diff) and file_diff!=0: #along a rank for steps in range(1, abs(file_diff)): path_available = board[s_rank][s_file+sign(file_diff)*steps]=="xX" if not path_available: break elif abs(rank_diff)==abs(file_diff): #on a diagonal for steps in range(1, abs(rank_diff)): path_available = board[s_rank+sign(rank_diff)*steps][s_file+sign(file_diff)*steps]=="xX" if not path_available: break elif s_piece[1]=='K': #King /König move_in_domain_castling = (rank_diff, abs(file_diff))==(0,2) #detects castling if K is moved 2 squares to the side squares_free = board[e_rank][e_file]=='xX' and board[e_rank][s_file+int(file_diff/2)]=='xX' squares_not_attacked = not_attacked(playercol, e_rank, e_file, board) and not_attacked(playercol, e_rank, s_file+int(file_diff/2), board) king_unmoved = False if (s_rank, s_file)=={'w':(0,4), 'b':(7,4)}[playercol]: #K on starting square king_unmoved = True for move in history: if move[0]=={'w':'e1', 'b':'e8'}[playercol]: #king not moved during game king_unmoved = False break rook_unmoved = False if file_diff>0 and board[s_rank][7]==playercol+'R': #kingside castling rook_unmoved = True for move in history: if move[0]=={'w':'h1', 'b':'h8'}[playercol]: #rook never moved rook_unmoved = False break if rook_unmoved: special_move = 'castling_kingside' if file_diff<0 and board[s_rank][0]==playercol+'R': #queenside castling rook_unmoved = True for move in history: if move[0]=={'w':'a1', 'b':'a8'}[playercol]: #rook never moved rook_unmoved = False break if rook_unmoved: special_move = 'castling_queenside' move_in_domain_king1 = abs(file_diff)<=1 and abs(rank_diff)<=1 #normal king move: max. one square in every direction if move_in_domain_king1: special_move = '' move_in_domain_king2 = all([move_in_domain_castling, squares_free, squares_not_attacked, king_unmoved, rook_unmoved]) #castling move_in_domain = any([move_in_domain_king1, move_in_domain_king2]) #if any normal move or castling elif s_piece[1]=='P': #Pawn /Bauer move_in_domain_pawn1 = (rank_diff, file_diff) == (forward, 0) #one square forward move_in_domain_pawn2 = (rank_diff, file_diff, s_rank) == (2*forward, 0, {'w':1, 'b':6}[playercol]) #two squares forward move_in_domain_pawn3 = (rank_diff, abs(file_diff)) == (forward, 1) and board[e_rank][e_file]!='xX' #diagonal capture if move_in_domain_pawn1 and e_rank == {'w':7,'b':0}[playercol]: #promotion special_move = 'conversion' elif (rank_diff, abs(file_diff)) == (forward, 1) and history[-1]==(n_to_a[e_file]+str(e_rank+1+forward), n_to_a[e_file]+str(e_rank+1-forward)) and board[e_rank-forward][e_file]=={'w':'bP', 'b':'wP'}[playercol]: #unpythonic en-passant special_move = 'en_passant' move_in_domain = any([move_in_domain_pawn1, move_in_domain_pawn2, move_in_domain_pawn3, special_move]) #if any legal move available if any([move_in_domain_pawn1, move_in_domain_pawn2]): #if forward move path_available = board[e_rank][e_file]=='xX' #path must be free new_board = deepcopy(board) #check for any king checks if move was performed on a separate board new_board[e_rank][e_file] = s_piece new_board[s_rank][s_file] = 'xX' if special_move=='en_passant': new_board[e_rank-forward][e_file] = 'xX' king_rank, king_file = find_piece(playercol, new_board, 'K') #find king on new board not_in_check = not_attacked(playercol, king_rank, king_file, new_board) #check whether king still in check if move was performed if all([own_figure, not_occupied_own, move_in_domain, path_available, not_in_check]): return special_move if special_move else 'valid' else: return 'invalid' def can_make_move(playercol, board): #whether a player can make a move at all forward = {'w':1, 'b':-1}[playercol] for rank_num, rank in enumerate(board): for file_num, square in enumerate(rank): if square[0]!=playercol: continue #checks every player piece if square[1] in 'RQ': #checks if any rook/orthogonal queen moves are possible steps = 1 for x,y in ((0,1), (0,-1), (1,0), (-1,0)): while -1<rank_num+x*steps<8 and -1<file_num+y*steps<8: if validate_move(rank_num, file_num, rank_num+x*steps, file_num+y*steps, playercol)!='invalid': return True steps += 1 if square[1] in 'BQ': #checks if any bishop/diagonal queen moves are possible steps = 1 for x,y in ((1,1), (1,-1), (-1,1), (-1,-1)): while -1<rank_num+x*steps<8 and -1<file_num+y*steps<8: if validate_move(rank_num, file_num, rank_num+x*steps, file_num+y*steps, playercol)!='invalid': return True steps += 1 elif square[1]=='K': #checks if any king moves are possible for x,y in ((0,0), (0,1), (0,-1), (1,0), (1,1), (1,-1), (-1,0), (-1,1), (-1,-1)): if validate_move(rank_num, file_num, rank_num+x, file_num+y, playercol)!='invalid': return True elif square[1]=='N': #checks if any knight moves are possible for x,y in ((1,2), (1,-2), (2,1), (2,-1), (-1,2), (-1,-2), (-2,1), (-2,-1)): if validate_move(rank_num, file_num, rank_num+x, file_num+y, playercol)!='invalid': return True elif square[1]=='P': #checks if any pawn moves are possible for x,y in ((1,0), (forward,1), (forward,-1), (2*forward,0)): if validate_move(rank_num, file_num, rank_num+x, file_num+y, playercol)!='invalid': return True return False """Main Loop""" ui = 'none' #user input print("\n", name.center(width), "\n", (len(name)*"-").center(width), "\n") while True: if ui in ['settings', 'e', 'einstellungen']: #settings print(lang['settings']) #any text is accessed through the 'lang' dictionary, return the appropriate text for a given language elif ui in ['color','c','colour','farbe']: for palette in col_palettes[palette_type]: #showcase color palettes print("\n", palette.center(width)) display_any([['bK','bB','wP','wQ']], style, col_palettes[palette_type][palette]) try: col = col_palettes[palette_type][input(lang['color_query']).lower()].copy() print(lang['color_success']) except: print(lang['color_fail']) elif ui in ['invert', 'i', 'umkehren']: palette_type ^= 1 print(lang['inverted']) elif ui in ['size', 's', 'grösse']: #size options try: style = styles[input(f" {lang['sizes']}\n {' / '.join([i for i in styles])} > ").lower()] width = 8*len(style['K'][0])+2 print(lang['size_success']) except: print(lang['size_fail']) elif ui in ['flip', 'f', 'drehen']: #flip toggle if flip[1]==1: flip = {0:1,1:-1} print(lang['flip_on']) else: flip = {0:1,1:1} print(lang['flip_off']) elif ui in ['language', 'l', 'sprache']: #language preferences try: lang = languages[input(f"{lang['language']}\n {' / '.join([i for i in languages])} > ").lower()] print(lang['language_success']) except: print(lang['language_fail']) elif ui in ['time', 't', 'zeit']: #time configuration try: time_s_temp, increment_temp, *rest = input(lang['time_query'].format(time_s, increment)).split()+[0,0] time_s_temp, increment_temp = abs(int(time_s_temp)), abs(int(increment_temp)) #positive integer times time_s, increment = time_s_temp, increment_temp print(lang['time_success'].format(time_s, increment)) times = {'w':time_s, 'b':time_s} except: print(lang['time_fail']) elif ui in ['fischerandom', 'r', '960']: reset() slots = [0,1,2,3,4,5,6,7] bishop1, bishop2 = choice(slots[::2]), choice(slots[::-2]) #choose bishops for one white/black square respectively slots.remove(bishop1) slots.remove(bishop2) queen = choice(slots) #queen position slots.remove(queen) knight1 = choice(slots) #first knight slots.remove(knight1) knight2 = choice(slots) #second knight slots.remove(knight2) rook1, king, rook2 = tuple(slots) #the two rooks and king are assigned the remaining 3 positions (king in the middle) board[0][rook1], board[7][rook1] = 'wR', 'bR' #... change all the pieces accordingly board[0][rook2], board[7][rook2] = 'wR', 'bR' board[0][king], board[7][king] = 'wK', 'bK' board[0][bishop1], board[7][bishop1] = 'wB', 'bB' board[0][bishop2], board[7][bishop2] = 'wB', 'bB' board[0][knight1], board[7][knight1] = 'wN', 'bN' board[0][knight2], board[7][knight2] = 'wN', 'bN' board[0][queen], board[7][queen] = 'wQ', 'bQ' print(lang['fischerandom']) elif ui in ['', 'play', 'p', 'spielen']: #the actual game print(lang['play']) exit_game = '' while True: # loops for each playerturn playercol = {0:'w',1:'b'}[turn%2] #whose turn it is display_board(board) print("\n"+lang['turn'].format(lang[playercol]).center(width)) #print whose turn it is if time_s: #how much time is left, starting the timer print(lang['time_left'].format(lang[playercol], times[playercol]).center(width)) #print how much time is left time_start = time() time_limit = Timer(times[playercol], time_up_toggle) #timer time_limit.start() king_rank, king_file = find_piece(playercol, board, 'K') king_not_in_check = not_attacked(playercol, *find_piece(playercol, board, 'K'), board) #whether the player's king is (not) in check if not can_make_move(playercol, board): #no legal move available... if king_not_in_check: #...king not in check -> stalemate print(lang['stalemate'].format(lang[playercol]).center(width)) print(lang['draw'].center(width)) else: #...king in check -> checkmate print(lang['checkmate'].format(lang[playercol],lang[{'w':'b','b':'w'}[playercol]]).center(width)) reset() break if not king_not_in_check: #player is in check print("\n"+lang['check'].format(lang[playercol]).center(width)) #print player is in check while True: #loops until a valid move is entered by the user try: move_start, move_end, move_force, *rest = input(lang['make_move']).lower().split()+[0,0,0] if time_up: #time runs out break if move_start in ['resign', 'r','aufgeben']: #resigning exit_game ='resign' break elif move_start in ['draw', 'd', 'remis'] and input(lang['draw_query'].format(lang[playercol], lang[{'w':'b','b':'w'}[playercol]])).lower() in ['yes', 'ja']: #agreed draw (python cries when seeing such long if clauses) exit_game = 'draw' break elif move_start in ['pause', 'p']: #pausing game exit_game = 'pause' break s_file, s_rank = a_to_n[move_start[0]], int(move_start[1])-1 #if numbers are entered e_file, e_rank = a_to_n[move_end[0]], int(move_end[1])-1 move_type = validate_move(s_rank, s_file, e_rank, e_file, playercol) #valid move (if so, what type) assert move_type!='invalid' or move_force=='force' #move not invalid break except: #print("That isn't a valid move.") wasn't necessary here continue if time_s: #if timed game time_limit.cancel() times[playercol] -= time() - time_start #reduce player time if time_up: #win on time print("\n\n",lang['time_up'].format(lang[playercol], lang[{'w':'b','b':'w'}[playercol]]).center(width)) reset() break if exit_game=='pause': #game paused print(lang['pause'].center(width)) break elif exit_game=='resign': #resignation print(lang['resign'].format(lang[playercol], lang[{'w':'b','b':'w'}[playercol]]).center(width)) reset() break elif exit_game=='draw': #agreed draw print(lang['draw'].center(width)) reset() break if board[e_rank][e_file] != 'xX': #piece taken this round? piece_taken = 0 else: piece_taken += 1 if board[s_rank][s_file][1] == 'P': #pawn moved this round? pawn_moved = 0 else: pawn_moved += 1 if move_type=='valid' or move_force=='force': #normal move found board[s_rank][s_file], board[e_rank][e_file] = 'xX', board[s_rank][s_file] elif move_type=='conversion': #pawn promotion conversion_dict = {'queen':'Q','rook':'R','bishop':'B','knight':'N', 'dame':'Q','turm':'R','läufer':'B','springer':'N',} while True: #loops until pawn promoted ui = input(lang['conversion']).lower() if ui not in conversion_dict: continue break board[s_rank][s_file] = 'xX' board[e_rank][e_file] = playercol+conversion_dict[ui] elif move_type=='en_passant': #en passant board[e_rank-{'w':1, 'b':-1}[playercol]][e_file] = 'xX' board[e_rank][e_file] = board[s_rank][s_file] board[s_rank][s_file] = 'xX' elif move_type=='castling_kingside': #kingside castling board[s_rank][s_file] = 'xX' board[{'w':0, 'b':7}[playercol]][6] = playercol+'K' board[e_rank][7] = 'xX' board[{'w':0, 'b':7}[playercol]][5] = playercol+'R' elif move_type=='castling_queenside': #queenside castling board[s_rank][s_file] = 'xX' board[{'w':0, 'b':7}[playercol]][2] = playercol+'K' board[e_rank][0] = 'xX' board[{'w':0, 'b':7}[playercol]][3] = playercol+'R' history.append((move_start, move_end)) #add move to history board_history.append(tuple(tuple(rank) for rank in board)) #add board to board history if board_history.count(board_history[-1])>2: #draw by threefold repetition - DISCLAIMER: WILL NOT TAKE INTO ACCOUNT THE CHANGES OF MOVE POSSIBILITIES, i.e. 'en passant' (more than one turn passed) OR 'castling' (king/rook moved) !! print(lang['draw_threefold'].center(width)) print(lang['draw'].center(width)) reset() break elif pawn_moved>=50 or piece_taken>=50: #draw by 50-move rule print(lang[ 'draw_50moverule'+('_pawn' if pawn_moved>=50 else '_piece') ].center(width)) print(lang['draw'].center(width)) reset() break material = [] for rank in board: for square in rank: if square[1] not in ['K', 'X']: #counts material (excluding King) material.append(square) if tuple(material) in [(),('bB'),('wB'), ('bN'), ('wN')]: #draw by insufficient material print(lang['draw_material'].center(width)) print(lang['draw'].center(width)) reset() break if tuple(material) in [('bB','wB'), ('wB','bB')]: #draw by insufficient material (both players have one bishop) B1_rank, B1_file = find_piece(material[0][0], board, 'B') B2_rank, B2_file = find_piece(material[1][0], board, 'B') if ((B1_rank+B1_file)%2)==((B2_rank+B2_file)%2): #... (those 2 bishops have to be on the same color) print(lang['draw_material'].center(width)) print(lang['draw'].center(width)) reset() break if time_s: #if timed game times[playercol] += increment #try adding the increment turn += 1 elif ui in ['quit', 'q', 'exit', 'schliessen']: #close script break ui = input(lang['main']).lower() #main menu user input #!! Personal ToDo list(German) #Dekoratives # Figuren-/Punkte-Anzeige #Praktisches # Rochade für Chess960 (!) #Mögliche Zukunftspläne ? # 'Undo' # Simpler Schachcomputer # Notation / Position laden
nilq/baby-python
python
from hubcheck.pageobjects.widgets.item_list_item import ItemListItem from hubcheck.pageobjects.basepageelement import TextReadOnly, Link class TagsBrowseResultsRow1(ItemListItem): def __init__(self, owner, locatordict={}, row_number=0): super(TagsBrowseResultsRow1,self).__init__(owner,locatordict,row_number) # load hub's classes TagsBrowseResultsRow_Locators = self.load_class('TagsBrowseResultsRow_Locators') # update this object's locator self.locators.update(TagsBrowseResultsRow_Locators.locators) # update the locators with those from the owner self.update_locators_from_owner() # setup page object's components self.name = Link(self,{'base':'name'}) self.count = TextReadOnly(self,{'base':'count'}) # update the component's locators with this objects overrides self._updateLocators() def value(self): """return a dictionary with the name and count properties of the tag""" return({'name':self.name.text(), 'count':int(self.count.value)}) def goto_tag(self): """click the tag""" self.name.click() class TagsBrowseResultsRow1_Locators_Base_1(object): """locators for TagsBrowseResultsRow1 object""" locators = { 'base' : "css=#taglist tbody tr:nth-of-type({row_num})", 'name' : "css=#taglist tbody tr:nth-of-type({row_num}) td:nth-of-type(1) a", 'count' : "css=#taglist tbody tr:nth-of-type({row_num}) td:nth-of-type(2)", } class TagsBrowseResultsRow2(ItemListItem): """ In HUBzero version 1.2, the row changed to provide the name and alias of the tagi """ def __init__(self, owner, locatordict={}, row_number=0): super(TagsBrowseResultsRow2,self).__init__(owner,locatordict,row_number) # load hub's classes TagsBrowseResultsRow_Locators = self.load_class('TagsBrowseResultsRow_Locators') # update this object's locator self.locators.update(TagsBrowseResultsRow_Locators.locators) # update the locators with those from the owner self.update_locators_from_owner() # setup page object's components self.name = Link(self,{'base':'name'}) self.alias = TextReadOnly(self,{'base':'alias'}) # update the component's locators with this objects overrides self._updateLocators() def value(self): """return a dictionary with the name and count properties of the tag""" return({'name':self.name.text(), 'alias':self.alias.value}) def goto_tag(self): """click the tag""" self.name.click() class TagsBrowseResultsRow2_Locators_Base_1(object): """locators for TagsBrowseResultsRow2 object""" locators = { 'base' : "css=#taglist tbody tr:nth-of-type({row_num})", 'name' : "css=#taglist tbody tr:nth-of-type({row_num}) td:nth-of-type(1) a", 'alias' : "css=#taglist tbody tr:nth-of-type({row_num}) td:nth-of-type(2)", }
nilq/baby-python
python
import argparse import csv import sqlite3 import sys import random import time import sys import math #c.execute(UPDATE {} SET member=? WHERE callsign LIKE ? find_parent_sql = "SELECT * FROM orgs WHERE parentcallsign LIKE ?" org_insert_sql = "INSERT INTO orgs VALUES(?,?,?)" def update_org_id(table): return "UPDATE {} SET org=?, member=? WHERE id=?".format(table) def update_org_callsign(table): return "UPDATE {} SET org=? WHERE callsign=? or callsign=?".format(table) def update_org(db, c, parent_callsign, ptable, ctable, id_to_update, status): c.execute(find_parent_sql, (parent_callsign+'%',)) orgs = c.fetchall() if id_to_update == 1695: print 'YES' if len(orgs) == 1: c.execute(update_org_id(ctable), (orgs[0][0], status, id_to_update,)) c.execute(update_org_callsign(ptable), (orgs[0][0], parent_callsign.split('-')[0], parent_callsign)) elif len(orgs) == 0: #make new org entry c.execute(org_insert_sql, (None, parent_callsign, None)) #get id c.execute(find_parent_sql, (parent_callsign+'%',)) org = c.fetchone() c.execute(update_org_id(ctable), (org[0], status, id_to_update,)) #also have to set the parent's org c.execute(update_org_callsign(ptable), (org[0], parent_callsign.split('-')[0], parent_callsign)) else: #shouldn't happen print "ORG TABLE MESSED UP" pass #db.commit() def set_orgs(): db = sqlite3.connect('fcc.db') c = db.cursor() with open('data/orgs.csv') as csvfile: list_reader = csv.DictReader(csvfile) for line in list_reader: splitup = line['associate calletter'].split('-') ctable = splitup[1].lower().strip() if splitup[0] == 'WBAA': print splitup ptable = line['parent calletter'].split('-')[1].lower().strip() c.execute("SELECT * FROM {} WHERE callsign=?" .format(ctable), (splitup[0],)) output = c.fetchall() if len(output) == 1: ''' EXACTLY ONE MATCH GOOD''' update_org(db, c, line['parent calletter'], ptable, ctable, output[0][0] , line['stationstatus']) elif len(output) > 1: c.execute('SELECT * FROM {} WHERE callsign=? and service=? and status=?''' .format(ctable), (splitup[0], ctable.upper(), 'LIC')) new_output = c.fetchall() #gonna need to update both if len(new_output) > 1: ''' s = "SELECT * FROM {} WHERE callsign=? and service=? and status=?" .format(splitup[1].lower().strip()) c.execute(s, (splitup[0],splitup[1].strip(),'LIC')) o = c.fetchall() if len(o) == 0: c.execute('SELECT * FROM {} WHERE callsign=? and service= .format(splitup[1].lower().strip()), (line['associate calletter'],splitup[1].strip())) if len(c.fetchall()) == 0: print 'bad3333' elif len(o) > 1: print len(o) print splitup print 'uh oh' ''' '''UPATING FIRST OF THEM''' for n in xrange(0, len(new_output)): update_org(db, c, line['parent calletter'], ptable, ctable, new_output[n][0], line['stationstatus']) elif len(new_output) == 0: c.execute('SELECT * FROM {} WHERE callsign=? and service=?''' .format(ctable), (splitup[0], ctable.upper())) print c.fetchall() print splitup print output print 'bad1' else: ''' EXACTLY ONE MATCH GOOD''' update_org(db, c, line['parent calletter'], ptable, ctable, output[0][0], line['stationstatus']) else: c.execute("SELECT * FROM {} WHERE callsign LIKE ?" .format(ctable), (splitup[0]+'%',)) output = c.fetchall() if len(output) == 1: ''' EXACTLY ONE MATCH GOOD''' update_org(db, c, line['parent calletter'], ptable, ctable, output[0][0], line['stationstatus']) elif len(output) > 1: c.execute('SELECT * FROM {} WHERE callsign=? and service=? and status=?''' .format(ctable), (splitup[0], ctable.upper(), 'LIC')) new_output = c.fetchall() #gonna need to update both if len(new_output) > 1: '''UPATING FIRST OF THEM''' update_org(db, c, line['parent calletter'], ptable, ctable, new_output[0][0], line['stationstatus']) else: print "Not in database:{}".format(splitup[0]) print "What do we do here?" print # Explicitly check "member-less" parents print "FM:" c.execute("SELECT * FROM fm WHERE member ISNULL and org NOT NULL and status='LIC'") print c.fetchall() print "AM:" c.execute("SELECT * FROM am WHERE member ISNULL and org NOT NULL and status='LIC'") print c.fetchall() db.commit() db.close() if __name__ == '__main__': set_orgs()
nilq/baby-python
python
import logging import time from celery import shared_task from django.db import transaction from pontoon.checks.utils import ( bulk_run_checks, get_translations, ) log = logging.getLogger(__name__) @shared_task(bind=True) def check_translations(self, translations_pks): """ Run checks on translations :arg list[int] translations_pks: list of primary keys for translations that should be processed """ start_time = time.time() with transaction.atomic(): translations = get_translations(pk__in=translations_pks) warnings, errors = bulk_run_checks(translations) log.info("Task[{}]: Processed items: {}, Warnings({}) Errors({}) in {}".format( self.request.id, len(translations), len(warnings), len(errors), time.time() - start_time ))
nilq/baby-python
python
from datetime import datetime import pytest from sqlalchemy import create_engine from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from optuna.storages.rdb.models import BaseModel from optuna.storages.rdb.models import StudyModel from optuna.storages.rdb.models import StudySystemAttributeModel from optuna.storages.rdb.models import TrialModel from optuna.storages.rdb.models import TrialSystemAttributeModel from optuna.storages.rdb.models import TrialUserAttributeModel from optuna.storages.rdb.models import VersionInfoModel from optuna.structs import StudyDirection from optuna.structs import TrialState @pytest.fixture def session(): # type: () -> Session engine = create_engine('sqlite:///:memory:') BaseModel.metadata.create_all(engine) return Session(bind=engine) class TestStudySystemAttributeModel(object): @staticmethod def test_find_by_study_and_key(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study') session.add( StudySystemAttributeModel(study_id=study.study_id, key='sample-key', value_json='1')) session.commit() attr = StudySystemAttributeModel.find_by_study_and_key(study, 'sample-key', session) assert attr is not None and '1' == attr.value_json assert StudySystemAttributeModel.find_by_study_and_key(study, 'not-found', session) is None @staticmethod def test_where_study_id(session): # type: (Session) -> None sample_study = StudyModel(study_id=1, study_name='test-study') empty_study = StudyModel(study_id=2, study_name='test-study') session.add( StudySystemAttributeModel( study_id=sample_study.study_id, key='sample-key', value_json='1')) assert 1 == len(StudySystemAttributeModel.where_study_id(sample_study.study_id, session)) assert 0 == len(StudySystemAttributeModel.where_study_id(empty_study.study_id, session)) # Check the case of unknown study_id. assert 0 == len(StudySystemAttributeModel.where_study_id(-1, session)) @staticmethod def test_cascade_delete_on_study(session): # type: (Session) -> None study_id = 1 study = StudyModel(study_id=study_id, study_name='test-study', direction=StudyDirection.MINIMIZE) study.system_attributes.append(StudySystemAttributeModel( study_id=study_id, key='sample-key1', value_json='1')) study.system_attributes.append(StudySystemAttributeModel( study_id=study_id, key='sample-key2', value_json='2')) session.add(study) session.commit() assert 2 == len(StudySystemAttributeModel.where_study_id(study_id, session)) session.delete(study) session.commit() assert 0 == len(StudySystemAttributeModel.where_study_id(study_id, session)) class TestTrialModel(object): @staticmethod def test_default_datetime(session): # type: (Session) -> None datetime_1 = datetime.now() session.add(TrialModel(state=TrialState.RUNNING)) session.commit() datetime_2 = datetime.now() trial_model = session.query(TrialModel).first() assert datetime_1 < trial_model.datetime_start < datetime_2 assert trial_model.datetime_complete is None @staticmethod def test_count(session): # type: (Session) -> None study_1 = StudyModel(study_id=1, study_name='test-study-1') study_2 = StudyModel(study_id=2, study_name='test-study-2') session.add(TrialModel(study_id=study_1.study_id, state=TrialState.COMPLETE)) session.add(TrialModel(study_id=study_1.study_id, state=TrialState.RUNNING)) session.add(TrialModel(study_id=study_2.study_id, state=TrialState.RUNNING)) session.commit() assert 3 == TrialModel.count(session) assert 2 == TrialModel.count(session, study=study_1) assert 1 == TrialModel.count(session, state=TrialState.COMPLETE) @staticmethod def test_count_past_trials(session): # type: (Session) -> None study_1 = StudyModel(study_id=1, study_name='test-study-1') study_2 = StudyModel(study_id=2, study_name='test-study-2') trial_1_1 = TrialModel(study_id=study_1.study_id, state=TrialState.COMPLETE) session.add(trial_1_1) session.commit() assert 0 == trial_1_1.count_past_trials(session) trial_1_2 = TrialModel(study_id=study_1.study_id, state=TrialState.RUNNING) session.add(trial_1_2) session.commit() assert 1 == trial_1_2.count_past_trials(session) trial_2_1 = TrialModel(study_id=study_2.study_id, state=TrialState.RUNNING) session.add(trial_2_1) session.commit() assert 0 == trial_2_1.count_past_trials(session) @staticmethod def test_cascade_delete_on_study(session): # type: (Session) -> None study_id = 1 study = StudyModel(study_id=study_id, study_name='test-study', direction=StudyDirection.MINIMIZE) study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.COMPLETE)) study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.RUNNING)) session.add(study) session.commit() assert 2 == len(TrialModel.where_study(study, session)) session.delete(study) session.commit() assert 0 == len(TrialModel.where_study(study, session)) class TestTrialUserAttributeModel(object): @staticmethod def test_find_by_trial_and_key(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study') trial = TrialModel(study_id=study.study_id) session.add( TrialUserAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() attr = TrialUserAttributeModel.find_by_trial_and_key(trial, 'sample-key', session) assert attr is not None assert '1' == attr.value_json assert TrialUserAttributeModel.find_by_trial_and_key(trial, 'not-found', session) is None @staticmethod def test_where_study(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add(study) session.add(trial) session.add( TrialUserAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() user_attributes = TrialUserAttributeModel.where_study(study, session) assert 1 == len(user_attributes) assert 'sample-key' == user_attributes[0].key assert '1' == user_attributes[0].value_json @staticmethod def test_where_trial(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add( TrialUserAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() user_attributes = TrialUserAttributeModel.where_trial(trial, session) assert 1 == len(user_attributes) assert 'sample-key' == user_attributes[0].key assert '1' == user_attributes[0].value_json @staticmethod def test_all(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add( TrialUserAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() user_attributes = TrialUserAttributeModel.all(session) assert 1 == len(user_attributes) assert 'sample-key' == user_attributes[0].key assert '1' == user_attributes[0].value_json @staticmethod def test_cascade_delete_on_trial(session): # type: (Session) -> None trial_id = 1 study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=trial_id, study_id=study.study_id, state=TrialState.COMPLETE) trial.user_attributes.append(TrialUserAttributeModel( trial_id=trial_id, key='sample-key1', value_json='1')) trial.user_attributes.append(TrialUserAttributeModel( trial_id=trial_id, key='sample-key2', value_json='2')) study.trials.append(trial) session.add(study) session.commit() assert 2 == len(TrialUserAttributeModel.where_trial_id(trial_id, session)) session.delete(trial) session.commit() assert 0 == len(TrialUserAttributeModel.where_trial_id(trial_id, session)) class TestTrialSystemAttributeModel(object): @staticmethod def test_find_by_trial_and_key(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study') trial = TrialModel(study_id=study.study_id) session.add( TrialSystemAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() attr = TrialSystemAttributeModel.find_by_trial_and_key(trial, 'sample-key', session) assert attr is not None assert '1' == attr.value_json assert TrialSystemAttributeModel.find_by_trial_and_key(trial, 'not-found', session) is None @staticmethod def test_where_study(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add(study) session.add(trial) session.add( TrialSystemAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() system_attributes = TrialSystemAttributeModel.where_study(study, session) assert 1 == len(system_attributes) assert 'sample-key' == system_attributes[0].key assert '1' == system_attributes[0].value_json @staticmethod def test_where_trial(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add( TrialSystemAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() system_attributes = TrialSystemAttributeModel.where_trial(trial, session) assert 1 == len(system_attributes) assert 'sample-key' == system_attributes[0].key assert '1' == system_attributes[0].value_json @staticmethod def test_all(session): # type: (Session) -> None study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE) session.add( TrialSystemAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1')) session.commit() system_attributes = TrialSystemAttributeModel.all(session) assert 1 == len(system_attributes) assert 'sample-key' == system_attributes[0].key assert '1' == system_attributes[0].value_json @staticmethod def test_cascade_delete_on_trial(session): # type: (Session) -> None trial_id = 1 study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE) trial = TrialModel(trial_id=trial_id, study_id=study.study_id, state=TrialState.COMPLETE) trial.system_attributes.append(TrialSystemAttributeModel( trial_id=trial_id, key='sample-key1', value_json='1')) trial.system_attributes.append(TrialSystemAttributeModel( trial_id=trial_id, key='sample-key2', value_json='2')) study.trials.append(trial) session.add(study) session.commit() assert 2 == len(TrialSystemAttributeModel.where_trial_id(trial_id, session)) session.delete(trial) session.commit() assert 0 == len(TrialSystemAttributeModel.where_trial_id(trial_id, session)) class TestVersionInfoModel(object): @staticmethod def test_version_info_id_constraint(session): # type: (Session) -> None session.add(VersionInfoModel(schema_version=1, library_version='0.0.1')) session.commit() # Test check constraint of version_info_id. session.add(VersionInfoModel(version_info_id=2, schema_version=2, library_version='0.0.2')) pytest.raises(IntegrityError, lambda: session.commit())
nilq/baby-python
python
from typing import Any, Dict, Iterator, List, Optional from loguru import logger from pydantic import Field from ..metadata_source import ColumnMetadata from .external_metadata_source import ( ExternalMetadataSource, ExternalMetadataSourceException, ) try: import boto3 import botocore from mypy_boto3_athena.client import AthenaClient from mypy_boto3_glue.client import GlueClient AWS_INSTALLED = True except ImportError: logger.debug("AWS optional dependency is not installed.") AWS_INSTALLED = False if AWS_INSTALLED: class AthenaSource(ExternalMetadataSource): """Athena Source instance.""" s3_staging_dir: str catalog_name: str = "AWSDataCatalog" region_name: Optional[str] = None aws_access_key_id: Optional[str] = None aws_secret_access_key: Optional[str] = None extra_connection_args: Dict[str, Any] = Field(default_factory=dict) def create_connection(self) -> None: """ Create Athena connection. :return: """ self._connection = boto3.client( "athena", region_name=self.region_name, aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, **self.extra_connection_args, ) def close_connection(self) -> None: pass def get_column_names( self, database_name: str, table_name: str, include_comment: bool = False ) -> Iterator[ColumnMetadata]: """ Get the column names from the table. :param database_name: the database name :param table_name: the table name :param include_comment: include the comment :return: the list of the column names """ try: if not self._connection: self.create_connection() response = self._connection.get_table_metadata( CatalogName=self.catalog_name, DatabaseName=database_name, TableName=table_name, ) for row in response["TableMetadata"]["Columns"]: column_name = row["Name"] column_comment = None if include_comment: if "Comment" in row: column_comment = row["Comment"] yield ColumnMetadata( column_name=column_name, column_comment=column_comment ) except botocore.exceptions.ClientError as error: logger.exception( f"Error in getting columns name from AWS Athena {database_name}.{table_name} for catalog {self.catalog_name}" ) raise ExternalMetadataSourceException(error) def get_table_names_list(self, database_name: str) -> Iterator[str]: """ Get the table names list from the database in AWS Athena. :param database_name: the database name :return: the list of the table names of the database """ try: if not self._connection: self.create_connection() response = self._connection.list_table_metadata( CatalogName=self.catalog_name, DatabaseName=database_name, ) for table in response["TableMetadataList"]: yield table["Name"] while "NextToken" in response: response = self._connection.list_table_metadata( CatalogName=self.catalog_name, DatabaseName=database_name, NextToken=response["NextToken"], ) for table in response["TableMetadataList"]: yield table["Name"] except botocore.exceptions.ClientError as exception: logger.exception( f"Error in getting table names list from AWS Athena from the database {database_name} for catalog {self.catalog_name}" ) raise ExternalMetadataSourceException(exception) @classmethod def type(cls) -> str: """ The type of the source. :return: the name o of the source. """ return "AWS Athena" class GlueSource(ExternalMetadataSource): """Glue Source instance.""" region_name: Optional[str] = None aws_access_key_id: Optional[str] = None aws_secret_access_key: Optional[str] = None extra_connection_args: Dict[str, Any] = Field(default_factory=dict) def create_connection(self) -> None: """ Create the Glue connection. :return: """ self._connection = boto3.client( "glue", region_name=self.region_name, aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, **self.extra_connection_args, ) def close_connection(self) -> None: pass def get_column_names( self, database_name: str, table_name: str, include_comment: bool = False ) -> Iterator[ColumnMetadata]: """ Get the column names from AWS Glue table. :param database_name: the name of the database :param table_name: the name of the table :param include_comment: include the comments :return: the list of the column names """ try: if not self._connection: self.create_connection() response = self._connection.get_table( DatabaseName=database_name, Name=table_name ) for row in response["Table"]["StorageDescriptor"]["Columns"]: column_name = row["Name"] column_comment = None if include_comment: if "Comment" in row: column_comment = row["Comment"] yield ColumnMetadata( column_name=column_name, column_comment=column_comment ) except botocore.exceptions.ClientError as exception: logger.exception( f"Error in getting columns name from AWS Glue from the table {database_name}.{table_name}" ) raise ExternalMetadataSourceException(exception) def get_table_names_list(self, database_name: str) -> Iterator[str]: """ Get the table names list from the database in AWS Glue. :param database_name: the database name :return: the list of the table names of the database """ try: if not self._connection: self.create_connection() response = self._connection.get_tables( DatabaseName=database_name, ) for table in response["TableList"]: yield table["Name"] while "NextToken" in response: response = self._connection.get_tables( DatabaseName=database_name, NextToken=response["NextToken"] ) for table in response["TableList"]: yield table["Name"] except botocore.exceptions.ClientError as error: logger.exception( f"Error in getting table names list from AWS Glue from the database {database_name}" ) raise error @classmethod def type(cls) -> str: """ The type of the source. :return: the name of the source. """ return "AWS Glue"
nilq/baby-python
python
from django.conf.urls import patterns, include, url #from polls import views from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'django_angularjs_rest.views.home', name='home'), # url(r'^django_angularjs_rest/', include('django_angularjs_rest.foo.urls')), url(r'^polls/', include('polls.urls', namespace="polls")), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), #url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), #url(r'^', include('quickstart.urls')), url(r'^api/', include('snippets.urls')), ) """ if is_installed('api'): from api import api api.autodiscover() urlpatterns += patterns('', url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'), url(r'^api/', include(api.urls)),) """
nilq/baby-python
python
from vk import VKAPI class Photos(VKAPI): method_class = 'photos' def __init__(self, access_token=''): super(Photos, self).__init__(access_token=access_token) def confirm_tag(self, **params): self.set_method('confirmTag') return self.send(params) def copy(self, **params): self.set_method('copy') return self.send(params) def create_album(self, **params): self.set_method('createAlbum') return self.send(params) def create_comment(self, **params): self.set_method('createComment') return self.send(params) def delete(self, **params): self.set_method('delete') return self.send(params) def delete_album(self, **params): self.set_method('deleteAlbum') return self.send(params) def delete_comment(self, **params): self.set_method('deleteComment') return self.send(params) def edit(self, **params): self.set_method('edit') return self.send(params) def edit_album(self, **params): self.set_method('editAlbum') return self.send(params) def edit_comment(self, **params): self.set_method('editComment') return self.send(params) def get(self, **params): self.set_method('get') return self.send(params) def get_albums(self, **params): self.set_method('getAlbums') return self.send(params) def get_albums_count(self, **params): self.set_method('getAlbumsCount') return self.send(params) def get_all(self, **params): self.set_method('getAll') return self.send(params) def get_all_comments(self, **params): self.set_method('getAllComments') return self.send(params) def get_by_id(self, **params): self.set_method('getById') return self.send(params) def get_chat_upload_server(self, **params): self.set_method('getChatUploadServer') return self.send(params) def get_comments(self, **params): self.set_method('getComments') return self.send(params) def get_market_album_upload_server(self, **params): self.set_method('getMarketAlbumUploadServer') return self.send(params) def get_market_upload_server(self, **params): self.set_method('getMarketUploadServer') return self.send(params) def get_messages_upload_server(self, **params): self.set_method('getMessagesUploadServer') return self.send(params) def get_new_tags(self, **params): self.set_method('getNewTags') return self.send(params) def get_owner_photo_upload_server(self, **params): self.set_method('getOwnerPhotoUploadServer') return self.send(params) def get_tags(self, **params): self.set_method('getTags') return self.send(params) def get_upload_server(self, **params): self.set_method('getUploadServer') return self.send(params) def get_user_photos(self, **params): self.set_method('getUserPhotos') return self.send(params) def get_wall_upload_server(self, **params): self.set_method('getWallUploadServer') return self.send(params) def make_cover(self, **params): self.set_method('makeCover') return self.send(params) def move(self, **params): self.set_method('move') return self.send(params) def put_tag(self, **params): self.set_method('putTag') return self.send(params) def remove_tag(self, **params): self.set_method('removeTag') return self.send(params) def reorder_albums(self, **params): self.set_method('reorderAlbums') return self.send(params) def reorder_photos(self, **params): self.set_method('reorderPhotos') return self.send(params) def report(self, **params): self.set_method('report') return self.send(params) def report_comment(self, **params): self.set_method('reportComment') return self.send(params) def restore(self, **params): self.set_method('restore') return self.send(params) def restore_comment(self, **params): self.set_method('restoreComment') return self.send(params) def save(self, **params): self.set_method('save') return self.send(params) def save_market_album_photo(self, **params): self.set_method('saveMarketAlbumPhoto') return self.send(params) def save_market_photo(self, **params): self.set_method('saveMarketPhoto') return self.send(params) def save_messages_photo(self, **params): self.set_method('saveMessagesPhoto') return self.send(params) def save_owner_photo(self, **params): self.set_method('saveOwnerPhoto') return self.send(params) def save_wall_photo(self, **params): self.set_method('saveWallPhoto') return self.send(params) def search(self, **params): self.set_method('search') return self.send(params)
nilq/baby-python
python
from setuptools import setup, find_packages from distutils.util import convert_path long_description =""" # Virtual Pi The easiest way to use this package is to install using pip3 for python 3 ```bash $ sudo pip3 install VPi ``` To use the mock or virtual pi just type the following at the beginning of your script. ```python try: from RPi.GPIO import GPIO import board import busio except: from VPi.GPIO import GPIO import VPi.board as board import VPi.busio as busio ``` ## Works with - [python 3.6.8](https://www.python.org/downloads/release/3.6.8) """ pkg_ns = {} ver_path = convert_path('VPi/__init__.py') with open(ver_path) as ver_file: exec(ver_file.read(), pkg_ns) setup( name='VPi', version=pkg_ns['__version__'], description='Virtual Pi Library for Raspberry Pi', url='https://github.com/codenio/', author='Aananth K', author_email='aananthraj1995@gmail.com', license='GPL-3.0', packages=find_packages(exclude=[]), install_requires=["numpy==1.19.5"], zip_safe=False, long_description_content_type="text/markdown", long_description=long_description, )
nilq/baby-python
python
# Concatenate strings in a (nested) list # 1. concatenate strings in a non-nested list # 2. list are themselves lists def concat_str(string_list): """ Concatenate all the strings in a possibly-nested list of strings @param str|list(str|list(...)) string_list: this string list. @rtype: str >>> list_ = ['the', 'cow', 'goes', 'moo', '!'] >>> concat_str(list_) 'the cow goes moo !' >>> list_ = ['this', 'string', 'is', 'actually', [['made'], 'up'], 'of', 'several', 'strings'] 'this string is actually made up of several strings' """ if isinstance(string_list, str): return string_list else: return ''.join([concat_str(elem) for elem in string_list]) def distribute_papers(pile): """ Recursive function to distribute papers in 148 @param list[int] pile: our remaining pile of paper @rtype: None """ if len(pile) == 1: pile = pile[1:] return elif len(pile) == 0 return else: print()
nilq/baby-python
python
from __future__ import absolute_import, unicode_literals import os from setuptools import find_packages, setup version = __import__('logtailer').__version__ def read(fname): # read the contents of a text file return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="django-logtailer", version=version, url='https://github.com/thaeny-dev/django-logtailer', license='BSD', platforms=['OS Independent'], description="Allows to read log files from disk with a tail like web " "console on Django admin interface. ", long_description=read('README.rst'), author='Thomas Haeny', author_email='dev@haeny.de', packages=find_packages(), install_requires=( 'Django>=2.2', ), include_package_data=True, zip_safe=False, # https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Framework :: Django', 'Framework :: Django :: 1.8', 'Framework :: Django :: 1.9', 'Framework :: Django :: 1.10', 'Framework :: Django :: 1.11', 'Framework :: Django :: 2.2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
nilq/baby-python
python
##TODO: move this to a common location from db_password import DB_PASSWORD DB_ENGINE = "postgresql_psycopg2" DB_NAME = "testdb"# "ConceptNet" DB_HOST = "localhost" # or whatever server it's on DB_PORT = "5432" # or whatever port it's on DB_USER = "pat" # change this to your PostgreSQL username DB_SCHEMAS = "public"
nilq/baby-python
python
from flask import Blueprint from flask import request from flask import jsonify from dock.common.exceptions import AppBaseException blueprint = Blueprint('transaction', __name__, url_prefix='/transaction') class Provision(object): def __init__(self): pass @classmethod def create(cls, p): return cls() def to_dict(self): return {'id': 2} class Transaction(object): @classmethod def from_provisions(cls, *provisions): return [] @classmethod def create(cls): return cls() def to_dict(self): return {'id': 1} class ProvisionsBelongsToDifferentTransactionsException(Exception): pass error_provisions_from_different_transactions = AppBaseException(1000, 'Provisions no belong to the same transaction') @blueprint.route('/create', methods=['GET', 'POST']) def create(): data = request.get_json(force=True, silent=True) provisions= data['provisions'] provisions = [Provision.create(p) for p in provisions] try: transaction = Transaction.from_provisions(provisions) except ProvisionsBelongsToDifferentTransactionsException: raise error_provisions_from_different_transactions if not transaction: transaction = Transaction.create() return jsonify(meta=dict(code=200), data=transaction.to_dict())
nilq/baby-python
python
#!/usr/bin/env python3 import argparse import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader import learn2learn as l2l from learn2learn.data.transforms import NWays, KShots, LoadData, RemapLabels def pairwise_distances_logits(a, b): n = a.shape[0] m = b.shape[0] logits = -((a.unsqueeze(1).expand(n, m, -1) - b.unsqueeze(0).expand(n, m, -1))**2).sum(dim=2) return logits def accuracy(predictions, targets): predictions = predictions.argmax(dim=1).view(targets.shape) return (predictions == targets).sum().float() / targets.size(0) class Convnet(nn.Module): def __init__(self, x_dim=3, hid_dim=64, z_dim=64): super().__init__() self.encoder = l2l.vision.models.ConvBase(output_size=z_dim, hidden=hid_dim, channels=x_dim, max_pool=True) self.out_channels = 1600 def forward(self, x): x = self.encoder(x) return x.view(x.size(0), -1) def fast_adapt(model, batch, ways, shot, query_num, metric=None, device=None): if metric is None: metric = pairwise_distances_logits if device is None: device = model.device() data, labels = batch data = data.to(device) labels = labels.to(device) n_items = shot * ways # Sort data samples by labels # TODO: Can this be replaced by ConsecutiveLabels ? sort = torch.sort(labels) data = data.squeeze(0)[sort.indices].squeeze(0) labels = labels.squeeze(0)[sort.indices].squeeze(0) # Compute support and query embeddings embeddings = model(data) support_indices = np.zeros(data.size(0), dtype=bool) selection = np.arange(ways) * (shot + query_num) for offset in range(shot): support_indices[selection + offset] = True query_indices = torch.from_numpy(~support_indices) support_indices = torch.from_numpy(support_indices) support = embeddings[support_indices] support = support.reshape(ways, shot, -1).mean(dim=1) query = embeddings[query_indices] labels = labels[query_indices].long() logits = pairwise_distances_logits(query, support) loss = F.cross_entropy(logits, labels) acc = accuracy(logits, labels) return loss, acc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--max-epoch', type=int, default=250) parser.add_argument('--shot', type=int, default=1) parser.add_argument('--test-way', type=int, default=5) parser.add_argument('--test-shot', type=int, default=1) parser.add_argument('--test-query', type=int, default=30) parser.add_argument('--train-query', type=int, default=15) parser.add_argument('--train-way', type=int, default=30) parser.add_argument('--gpu', default=0) args = parser.parse_args() print(args) device = torch.device('cpu') if args.gpu and torch.cuda.device_count(): print("Using gpu") torch.cuda.manual_seed(43) device = torch.device('cuda') model = Convnet() model.to(device) path_data = '~/data' train_dataset = l2l.vision.datasets.MiniImagenet( root=path_data, mode='train') valid_dataset = l2l.vision.datasets.MiniImagenet( root=path_data, mode='validation') test_dataset = l2l.vision.datasets.MiniImagenet( root=path_data, mode='test') train_dataset = l2l.data.MetaDataset(train_dataset) train_transforms = [ NWays(train_dataset, args.train_way), KShots(train_dataset, args.train_query + args.shot), LoadData(train_dataset), RemapLabels(train_dataset), ] train_tasks = l2l.data.TaskDataset(train_dataset, task_transforms=train_transforms) train_loader = DataLoader(train_tasks, pin_memory=True, shuffle=True) valid_dataset = l2l.data.MetaDataset(valid_dataset) valid_transforms = [ NWays(valid_dataset, args.test_way), KShots(valid_dataset, args.test_query + args.test_shot), LoadData(valid_dataset), RemapLabels(valid_dataset), ] valid_tasks = l2l.data.TaskDataset(valid_dataset, task_transforms=valid_transforms, num_tasks=200) valid_loader = DataLoader(valid_tasks, pin_memory=True, shuffle=True) test_dataset = l2l.data.MetaDataset(test_dataset) test_transforms = [ NWays(test_dataset, args.test_way), KShots(test_dataset, args.test_query + args.test_shot), LoadData(test_dataset), RemapLabels(test_dataset), ] test_tasks = l2l.data.TaskDataset(test_dataset, task_transforms=test_transforms, num_tasks=2000) test_loader = DataLoader(test_tasks, pin_memory=True, shuffle=True) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=20, gamma=0.5) for epoch in range(1, args.max_epoch + 1): model.train() loss_ctr = 0 n_loss = 0 n_acc = 0 for i in range(100): batch = next(iter(train_loader)) loss, acc = fast_adapt(model, batch, args.train_way, args.shot, args.train_query, metric=pairwise_distances_logits, device=device) loss_ctr += 1 n_loss += loss.item() n_acc += acc optimizer.zero_grad() loss.backward() optimizer.step() lr_scheduler.step() print('epoch {}, train, loss={:.4f} acc={:.4f}'.format( epoch, n_loss/loss_ctr, n_acc/loss_ctr)) model.eval() loss_ctr = 0 n_loss = 0 n_acc = 0 for i, batch in enumerate(valid_loader): loss, acc = fast_adapt(model, batch, args.test_way, args.test_shot, args.test_query, metric=pairwise_distances_logits, device=device) loss_ctr += 1 n_loss += loss.item() n_acc += acc print('epoch {}, val, loss={:.4f} acc={:.4f}'.format( epoch, n_loss/loss_ctr, n_acc/loss_ctr)) loss_ctr = 0 n_acc = 0 for i, batch in enumerate(test_loader, 1): loss, acc = fast_adapt(model, batch, args.test_way, args.test_shot, args.test_query, metric=pairwise_distances_logits, device=device) loss_ctr += 1 n_acc += acc print('batch {}: {:.2f}({:.2f})'.format( i, n_acc/loss_ctr * 100, acc * 100))
nilq/baby-python
python
"""Shared pytest fixtures.""" import os import re import unittest.mock as mock from http.server import HTTPServer import pytest from pywemo import SubscriptionRegistry @pytest.fixture(scope='module') def vcr_config(): """VCR Configuration.""" def scrub_identifiers(response): body = response['body']['string'] body = re.sub( b'<serialNumber>[^<]+</serialNumber>', b'<serialNumber>SERIALNUMBER</serialNumber>', body, ) body = re.sub( b'<SerialNo>[^<]+</SerialNo>', b'<SerialNo>SERIALNUMBER</SerialNo>', body, ) body = re.sub( br'uuid:([A-Z][a-z]+-\d_\d)-[A-Za-z0-9]+', br'uuid:\1-SERIALNUMBER', body, ) body = re.sub( b'<macAddress>[^<]+</macAddress>', b'<macAddress>001122334455</macAddress>', body, ) body = re.sub( b'<MacAddr>[^<]+</MacAddr>', b'<MacAddr>001122334455</MacAddr>', body, ) body = re.sub( b'<friendlyName>[^<]+</friendlyName>', b'<friendlyName>WeMo Device</friendlyName>', body, ) body = re.sub( b'<hkSetupCode>[^<]+</hkSetupCode>', b'<hkSetupCode>012-34-567</hkSetupCode>', body, ) response['body']['string'] = body return response return { 'before_record_response': scrub_identifiers, 'match_on': [ 'method', 'scheme', 'host', 'port', 'path', 'query', 'body', ], } @pytest.fixture(scope='module') def vcr_cassette_dir(request): """Specify the location for the VCR cassettes.""" # Put all cassettes in tests/vcr/{module}/{test}.yaml return os.path.join('tests/vcr', request.module.__name__) @pytest.fixture def subscription_registry(): """Fixture to simulate HTTPServer for the SubscriptionRegistry.""" registry = SubscriptionRegistry() server = mock.create_autospec(HTTPServer, instance=True) server.server_address = ('localhost', 8989) with mock.patch("pywemo.subscribe._start_server", return_value=server): registry.start() yield registry registry.stop()
nilq/baby-python
python
import json import pickle import sqlite3 import time import pandas as pd from old.src.Model.Redis_connecter import RedisConn def picklify(df): dt_bytes = pickle.dumps(df) return dt_bytes def res_depicklify_to_list(): r = RedisConn().r db = sqlite3.connect("t0419.db") res = [] count = r.scard('res_dfs') # temp_list=[] temp_list = r.spop('res_dfs', count=count) print(temp_list) time_start = time.time() for item in temp_list: temp = json.loads(item) print(item) res.append(temp) time_end = time.time() print('de_json used time:', time_end - time_start) # pipe=r.pipeline() # for i in range(count): # temp=json.loads(r.spop('res_dfs')) # print(temp) # temp_list.append(temp) # pipe.execute() # temp_list = r.spop(name='res_dfs',count=r.scard('res_dfs')) # print(type(temp_list),temp_list) # for temp in temp_list: # print(temp) # res.extend(pickle.load(temp.decode('latin1'))) time_start = time.time() data_ = pd.DataFrame(res) time_end = time.time() print('dataframe used time:', time_end - time_start) data_.to_sql("dataset", db, if_exists="append") db.close() print('Successfully Writen')
nilq/baby-python
python
""" gcae.py PyTorch-Lightning Module Definition for the No-Language Latent Actions GELU Conditional Auto-Encoding (GCAE) Model. """ from pathlib import Path from typing import Any, List, Tuple import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim class GCAE(pl.LightningModule): def __init__( self, state_dim: int = 7, action_dim: int = 7, latent_dim: int = 2, hidden_dim: int = 30, lr: float = 0.01, lr_step_size: int = 200, lr_gamma: float = 0.1, zaug: bool = True, zaug_lambda: float = 10.0, run_dir: Path = None, ): super(GCAE, self).__init__() # Save Hyperparameters self.state_dim, self.action_dim = state_dim, action_dim self.latent_dim, self.hidden_dim = latent_dim, hidden_dim self.lr, self.lr_step_size, self.lr_gamma = lr, lr_step_size, lr_gamma # If True, Train Dataset will have augmented data batch --> combine losses! self.zaug, self.zaug_lambda = zaug, zaug_lambda # Pointer to Run Directory (just in case) self.run_dir = run_dir # Build Model self.build_model() def build_model(self) -> None: # Encoder --> Takes (State, Action) --> Encodes to `z` latent space self.enc = nn.Sequential( nn.Linear(self.state_dim + self.action_dim, self.hidden_dim), nn.GELU(), nn.Linear(self.hidden_dim, self.hidden_dim), nn.GELU(), nn.Linear(self.hidden_dim, self.latent_dim), ) # Decoder --> Takes State + Latent Action --> Decodes to Action Space self.dec = nn.Sequential( nn.Linear(self.state_dim + self.latent_dim, self.hidden_dim), nn.GELU(), nn.Linear(self.hidden_dim, self.hidden_dim), nn.GELU(), nn.Linear(self.hidden_dim, self.action_dim), ) def configure_optimizers(self) -> Tuple[List[optim.Optimizer], List[optim.lr_scheduler.StepLR]]: optimizer = optim.Adam(self.parameters(), lr=self.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.lr_step_size, gamma=self.lr_gamma) return [optimizer], [scheduler] def decoder(self, s: torch.Tensor, z: torch.Tensor) -> torch.Tensor: # Create Input to Decoder --> (s, z) y = torch.cat([s, z], 1) # Return Predicted Action return self.dec(y) def forward(self, s: torch.Tensor, a: torch.Tensor) -> torch.Tensor: """ Default forward pass --> encode (s, a) --> z; decode (s, z) --> a. """ x = torch.cat([s, a], 1) z = self.enc(x) # Return Predicted Action via Decoder return self.decoder(s, z) def training_step(self, batch: Any, batch_idx: int) -> torch.Tensor: # Regular Pipeline if not self.zaug: # Extract Batch state, action = batch # Get Predicted Action predicted_action = self.forward(state, action) # Measure MSE Loss loss = F.mse_loss(predicted_action, action) # Log Loss self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss # Augmentation Pipeline else: # Extract Batches (state, action), (aug_state, zero_action) = batch # First, "regular" pipeline predicted_action = self.forward(state, action) loss = F.mse_loss(predicted_action, action) # Next, "augmented" (decoder-only) pipeline predicted_zero_action = self.decoder(aug_state, torch.zeros_like(aug_state)[:, : self.latent_dim]) loss += self.zaug_lambda * F.mse_loss(predicted_zero_action, zero_action) # Log Loss self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, batch: Any, batch_idx: int) -> None: # Extract Batch state, action = batch # Get Predicted Action predicted_action = self.forward(state, action) # Measure MSE Loss loss = F.mse_loss(predicted_action, action) # Log Loss self.log("val_loss", loss, prog_bar=True)
nilq/baby-python
python
# Sample file to deploy Cubes slicer as a WSGI application import sys import os.path import ConfigParser CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) CONFIG_PATH = os.path.join(CURRENT_DIR, "slicer.ini") try: config = ConfigParser.SafeConfigParser() config.read(CONFIG_PATH) except Exception as e: raise Exception("Unable to load configuration: %s" % e) import cubes.server application = cubes.server.slicer(config)
nilq/baby-python
python
# # PySNMP MIB module ADAPTECSCSI-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADAPTECSCSI-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:13:33 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") enterprises, Counter32, Gauge32, ModuleIdentity, Unsigned32, Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, ObjectIdentity, MibIdentifier, TimeTicks, NotificationType, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "Counter32", "Gauge32", "ModuleIdentity", "Unsigned32", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "ObjectIdentity", "MibIdentifier", "TimeTicks", "NotificationType", "IpAddress", "Counter64") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") class DmiCounter(Counter32): pass class DmiInteger(Integer32): pass class DmiDisplaystring(DisplayString): pass class DmiComponentIndex(Integer32): pass adaptec = MibIdentifier((1, 3, 6, 1, 4, 1, 795)) products = MibIdentifier((1, 3, 6, 1, 4, 1, 795, 2)) scsi = MibIdentifier((1, 3, 6, 1, 4, 1, 795, 2, 6)) dmtfGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 795, 2, 6, 1)) tComponentid = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1), ) if mibBuilder.loadTexts: tComponentid.setStatus('mandatory') if mibBuilder.loadTexts: tComponentid.setDescription('This group defines the attributes common to all components. This group is required.') eComponentid = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex")) if mibBuilder.loadTexts: eComponentid.setStatus('mandatory') if mibBuilder.loadTexts: eComponentid.setDescription('') a1Manufacturer = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 1), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a1Manufacturer.setStatus('mandatory') if mibBuilder.loadTexts: a1Manufacturer.setDescription('Manufacturer of this system.') a1Product = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 2), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a1Product.setStatus('mandatory') if mibBuilder.loadTexts: a1Product.setDescription('Product name for this system.') a1Version = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 3), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a1Version.setStatus('mandatory') if mibBuilder.loadTexts: a1Version.setDescription('Version number of this system.') a1SerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 4), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a1SerialNumber.setStatus('mandatory') if mibBuilder.loadTexts: a1SerialNumber.setDescription('Serial number for this system.') a1Installation = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 5), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a1Installation.setStatus('mandatory') if mibBuilder.loadTexts: a1Installation.setDescription('The time and date for the last time this component was installed ') a1Verify = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("vAnErrorOccurredCheckStatusCode", 0), ("vThisComponentDoesNotExist", 1), ("vTheVerificationIsNotSupported", 2), ("vReserved", 3), ("vThisComponentExistsButTheFunctionalityI", 4), ("vThisComponentExistsButTheFunctionality1", 5), ("vThisComponentExistsAndIsNotFunctioningC", 6), ("vThisComponentExistsAndIsFunctioningCorr", 7)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a1Verify.setStatus('mandatory') if mibBuilder.loadTexts: a1Verify.setDescription('A code that provides a level of verification that the component is still installed and working.') tOperationGroup = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 2), ) if mibBuilder.loadTexts: tOperationGroup.setStatus('mandatory') if mibBuilder.loadTexts: tOperationGroup.setDescription('The Operation group controls the stystem. ') eOperationGroup = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 2, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex")) if mibBuilder.loadTexts: eOperationGroup.setStatus('mandatory') if mibBuilder.loadTexts: eOperationGroup.setDescription('') a2PollDevices = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 2, 1, 1), DmiInteger()).setMaxAccess("readwrite") if mibBuilder.loadTexts: a2PollDevices.setStatus('mandatory') if mibBuilder.loadTexts: a2PollDevices.setDescription('Writing a non-zero value to this variable causes an immediate one time poll of all currently known devices. This variable will always return the value zero when it is read.') a2ScanDevices = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 2, 1, 2), DmiInteger()).setMaxAccess("readwrite") if mibBuilder.loadTexts: a2ScanDevices.setStatus('mandatory') if mibBuilder.loadTexts: a2ScanDevices.setDescription('Writing a non-zero value to this variable causes an immediate one time scan of the SCSI busses for all possible devices. This variable will always return the value zero when it is read.') a2IndicationControl = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vOff", 0), ("vOn", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: a2IndicationControl.setStatus('mandatory') if mibBuilder.loadTexts: a2IndicationControl.setDescription('Controls whether any indications are enabled') tHostAdapterGroup = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3), ) if mibBuilder.loadTexts: tHostAdapterGroup.setStatus('mandatory') if mibBuilder.loadTexts: tHostAdapterGroup.setDescription('The Host Adapter Description group describes the logical Host Adapters installed in the system . ') eHostAdapterGroup = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex"), (0, "ADAPTECSCSI-MIB", "a3HostAdapterIndex")) if mibBuilder.loadTexts: eHostAdapterGroup.setStatus('mandatory') if mibBuilder.loadTexts: eHostAdapterGroup.setDescription('') a3HostAdapterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 1), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a3HostAdapterIndex.setStatus('mandatory') if mibBuilder.loadTexts: a3HostAdapterIndex.setDescription('An index into the host adatper table') a3HostAdapterDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 2), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a3HostAdapterDescription.setStatus('mandatory') if mibBuilder.loadTexts: a3HostAdapterDescription.setDescription('The description string returned from an SCSI Inquiry Command.') a3HostAdapterVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 3), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a3HostAdapterVersion.setStatus('mandatory') if mibBuilder.loadTexts: a3HostAdapterVersion.setDescription('The version string returned from an SCSI Inquiry Command.') a3ChannelCount = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 4), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a3ChannelCount.setStatus('mandatory') if mibBuilder.loadTexts: a3ChannelCount.setDescription('The number of SCSI channels provided by this host adapter.') a3Errorcontrolid = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 5), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a3Errorcontrolid.setStatus('mandatory') if mibBuilder.loadTexts: a3Errorcontrolid.setDescription('Identifies the row in the errorControl table providing error control & status for this group.') a3EventStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vDiscovered", 0), ("vChanged", 1), ("vFailed", 2), ("vRecovered", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a3EventStatus.setStatus('mandatory') if mibBuilder.loadTexts: a3EventStatus.setDescription('Identifies the reason an indication was sent.') tLogicalUnitGroup = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4), ) if mibBuilder.loadTexts: tLogicalUnitGroup.setStatus('mandatory') if mibBuilder.loadTexts: tLogicalUnitGroup.setDescription('The logical units attached to host adapters, ') eLogicalUnitGroup = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex"), (0, "ADAPTECSCSI-MIB", "a4HostAdapterIndex"), (0, "ADAPTECSCSI-MIB", "a4ScsiId"), (0, "ADAPTECSCSI-MIB", "a4LogicalUnitId")) if mibBuilder.loadTexts: eLogicalUnitGroup.setStatus('mandatory') if mibBuilder.loadTexts: eLogicalUnitGroup.setDescription('') a4HostAdapterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 1), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a4HostAdapterIndex.setStatus('mandatory') if mibBuilder.loadTexts: a4HostAdapterIndex.setDescription('The index of the host adapter to which this Logical Unit is attached.') a4ScsiId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 2), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a4ScsiId.setStatus('mandatory') if mibBuilder.loadTexts: a4ScsiId.setDescription('The SCSI target ID of the Logical Unit') a4LogicalUnitId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 3), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a4LogicalUnitId.setStatus('mandatory') if mibBuilder.loadTexts: a4LogicalUnitId.setDescription('The ID of this Logical Unit.') a4LogicalUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("vDirectaccess", 1), ("vTape", 2), ("vPrinter", 3), ("vProcessor", 4), ("vWriteonce", 5), ("vCdrom", 6), ("vScanner", 7), ("vOpticalmemory", 8), ("vJukebox", 9), ("vComdevice", 10), ("vHostadapter", 11), ("vOther", 12)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a4LogicalUnitType.setStatus('mandatory') if mibBuilder.loadTexts: a4LogicalUnitType.setDescription('The type of this Logical Unit.') a4LogicalUnitDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 5), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a4LogicalUnitDescription.setStatus('mandatory') if mibBuilder.loadTexts: a4LogicalUnitDescription.setDescription('The description string returned from an SCSI Inquiry Command.') a4Errorcontrolid = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 6), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a4Errorcontrolid.setStatus('mandatory') if mibBuilder.loadTexts: a4Errorcontrolid.setDescription('Identifies the row in the errorControl table providing error control & status for this group.') a4EventStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vDiscovered", 0), ("vChanged", 1), ("vFailed", 2), ("vRecovered", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a4EventStatus.setStatus('mandatory') if mibBuilder.loadTexts: a4EventStatus.setDescription('Identifies the reason an indication was sent.') tErrorcontrol = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5), ) if mibBuilder.loadTexts: tErrorcontrol.setStatus('mandatory') if mibBuilder.loadTexts: tErrorcontrol.setDescription('Indication control and status for the parent group') eErrorcontrol = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex"), (0, "ADAPTECSCSI-MIB", "a5Selfid")) if mibBuilder.loadTexts: eErrorcontrol.setStatus('mandatory') if mibBuilder.loadTexts: eErrorcontrol.setDescription('') a5Selfid = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 1), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Selfid.setStatus('mandatory') if mibBuilder.loadTexts: a5Selfid.setDescription('Instance identifer. A unique number that identifies this row.') a5Fatalcount = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 2), DmiCounter()).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Fatalcount.setStatus('mandatory') if mibBuilder.loadTexts: a5Fatalcount.setDescription('Count of all fatal errors since system startup.') a5Majorcount = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 3), DmiCounter()).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Majorcount.setStatus('mandatory') if mibBuilder.loadTexts: a5Majorcount.setDescription('Count of all major errors since system startup.') a5Warningcount = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 4), DmiCounter()).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Warningcount.setStatus('mandatory') if mibBuilder.loadTexts: a5Warningcount.setDescription('Count of all warning errors since system startup.') a5Errstatus = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vOk", 0), ("vWarning", 1), ("vMajor", 2), ("vFatal", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Errstatus.setStatus('mandatory') if mibBuilder.loadTexts: a5Errstatus.setDescription('current error status') a5Errstatustype = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("vPost", 0), ("vRuntime", 1), ("vDiagnosticTest", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: a5Errstatustype.setStatus('mandatory') if mibBuilder.loadTexts: a5Errstatustype.setDescription('Indicates the type of detection that set the current error status.') a5Indicationcontrol = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vOff", 0), ("vOn", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: a5Indicationcontrol.setStatus('mandatory') if mibBuilder.loadTexts: a5Indicationcontrol.setDescription('Enables or disables generation of indications') tMiftomib = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 99), ) if mibBuilder.loadTexts: tMiftomib.setStatus('mandatory') if mibBuilder.loadTexts: tMiftomib.setDescription('This group defines attributes required for DMI to SNMP translati n.') eMiftomib = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 99, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex")) if mibBuilder.loadTexts: eMiftomib.setStatus('mandatory') if mibBuilder.loadTexts: eMiftomib.setDescription('') a99MibName = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 99, 1, 1), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a99MibName.setStatus('mandatory') if mibBuilder.loadTexts: a99MibName.setDescription('The MIB name that defines this MIF') a99MibOid = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 99, 1, 2), DmiDisplaystring()).setMaxAccess("readonly") if mibBuilder.loadTexts: a99MibOid.setStatus('mandatory') if mibBuilder.loadTexts: a99MibOid.setDescription('The MIB Object Identifier that corresponds to this MIF') a99DisableTrap = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 99, 1, 3), DmiInteger()).setMaxAccess("readwrite") if mibBuilder.loadTexts: a99DisableTrap.setStatus('mandatory') if mibBuilder.loadTexts: a99DisableTrap.setDescription('This attribute can be changed to disable sending of traps from this component') tTrapGroup = MibTable((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999), ) if mibBuilder.loadTexts: tTrapGroup.setStatus('mandatory') if mibBuilder.loadTexts: tTrapGroup.setDescription('This group defines attributes needed for Trap definition. This group does not exist in MIF file') eTrapGroup = MibTableRow((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1), ).setIndexNames((0, "ADAPTECSCSI-MIB", "DmiComponentIndex")) if mibBuilder.loadTexts: eTrapGroup.setStatus('mandatory') if mibBuilder.loadTexts: eTrapGroup.setDescription('') a9999ErrorTime = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 1), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ErrorTime.setStatus('mandatory') if mibBuilder.loadTexts: a9999ErrorTime.setDescription('The Date & Time when the error occured') a9999ErrorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 2), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ErrorStatus.setStatus('mandatory') if mibBuilder.loadTexts: a9999ErrorStatus.setDescription('Error Status Code') a9999ErrorGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 3), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ErrorGroupId.setStatus('mandatory') if mibBuilder.loadTexts: a9999ErrorGroupId.setDescription('Group ID of the errorControl Group') a9999ErrorInstanceId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 4), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ErrorInstanceId.setStatus('mandatory') if mibBuilder.loadTexts: a9999ErrorInstanceId.setDescription('Instance ID of the errorControl Group') a9999ComponentId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 5), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ComponentId.setStatus('mandatory') if mibBuilder.loadTexts: a9999ComponentId.setDescription('Component ID of the component that caused this error') a9999GroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 6), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999GroupId.setStatus('mandatory') if mibBuilder.loadTexts: a9999GroupId.setDescription('Group ID of the Group that caused this error') a9999InstanceId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 7), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999InstanceId.setStatus('mandatory') if mibBuilder.loadTexts: a9999InstanceId.setDescription('Instance ID of the Group that caused this error') a9999VendorCode1 = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 8), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999VendorCode1.setStatus('mandatory') if mibBuilder.loadTexts: a9999VendorCode1.setDescription('Vendor specific code 1') a9999VendorCode2 = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 9), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999VendorCode2.setStatus('mandatory') if mibBuilder.loadTexts: a9999VendorCode2.setDescription('Vendor specific code 2') a9999VendorText = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 10), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999VendorText.setStatus('mandatory') if mibBuilder.loadTexts: a9999VendorText.setDescription('Vendor specific octet string info') a9999ParentGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 11), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ParentGroupId.setStatus('mandatory') if mibBuilder.loadTexts: a9999ParentGroupId.setDescription('Group ID of parent of Group that caused this error') a9999ParentInstanceId = MibTableColumn((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1, 12), DmiInteger()).setMaxAccess("readonly") if mibBuilder.loadTexts: a9999ParentInstanceId.setStatus('mandatory') if mibBuilder.loadTexts: a9999ParentInstanceId.setDescription('Instance ID of parent of Group that caused this error') AdaptecEventError = NotificationType((1, 3, 6, 1, 4, 1, 795, 2, 6, 1, 9999, 1) + (0,1)).setObjects(("ADAPTECSCSI-MIB", "a9999ErrorTime"), ("ADAPTECSCSI-MIB", "a9999ErrorStatus"), ("ADAPTECSCSI-MIB", "a9999ErrorGroupId"), ("ADAPTECSCSI-MIB", "a9999ErrorInstanceId"), ("ADAPTECSCSI-MIB", "a9999ComponentId"), ("ADAPTECSCSI-MIB", "a9999GroupId"), ("ADAPTECSCSI-MIB", "a9999InstanceId"), ("ADAPTECSCSI-MIB", "a9999VendorCode1"), ("ADAPTECSCSI-MIB", "a9999VendorCode2"), ("ADAPTECSCSI-MIB", "a9999VendorText"), ("ADAPTECSCSI-MIB", "a9999ParentGroupId"), ("ADAPTECSCSI-MIB", "a9999ParentInstanceId")) if mibBuilder.loadTexts: AdaptecEventError.setDescription('DMI Service Layer generated event for Adaptec Scsi') mibBuilder.exportSymbols("ADAPTECSCSI-MIB", a1Verify=a1Verify, a9999ErrorGroupId=a9999ErrorGroupId, eErrorcontrol=eErrorcontrol, a5Errstatustype=a5Errstatustype, a5Indicationcontrol=a5Indicationcontrol, a9999InstanceId=a9999InstanceId, a4HostAdapterIndex=a4HostAdapterIndex, eOperationGroup=eOperationGroup, a5Errstatus=a5Errstatus, eComponentid=eComponentid, dmtfGroups=dmtfGroups, a5Selfid=a5Selfid, a9999GroupId=a9999GroupId, a4ScsiId=a4ScsiId, a3HostAdapterDescription=a3HostAdapterDescription, a4EventStatus=a4EventStatus, tMiftomib=tMiftomib, a1SerialNumber=a1SerialNumber, tHostAdapterGroup=tHostAdapterGroup, a5Warningcount=a5Warningcount, DmiComponentIndex=DmiComponentIndex, scsi=scsi, a9999ErrorTime=a9999ErrorTime, tOperationGroup=tOperationGroup, eLogicalUnitGroup=eLogicalUnitGroup, a2PollDevices=a2PollDevices, a9999ErrorInstanceId=a9999ErrorInstanceId, a9999VendorText=a9999VendorText, a99MibOid=a99MibOid, eHostAdapterGroup=eHostAdapterGroup, adaptec=adaptec, DmiCounter=DmiCounter, a3EventStatus=a3EventStatus, a2ScanDevices=a2ScanDevices, a1Manufacturer=a1Manufacturer, a4LogicalUnitId=a4LogicalUnitId, a99DisableTrap=a99DisableTrap, a1Version=a1Version, a3Errorcontrolid=a3Errorcontrolid, a9999ComponentId=a9999ComponentId, a9999ParentGroupId=a9999ParentGroupId, DmiInteger=DmiInteger, a4LogicalUnitDescription=a4LogicalUnitDescription, a2IndicationControl=a2IndicationControl, a4Errorcontrolid=a4Errorcontrolid, a5Majorcount=a5Majorcount, a9999VendorCode1=a9999VendorCode1, eTrapGroup=eTrapGroup, AdaptecEventError=AdaptecEventError, products=products, a3ChannelCount=a3ChannelCount, a9999ParentInstanceId=a9999ParentInstanceId, a3HostAdapterIndex=a3HostAdapterIndex, a1Product=a1Product, a1Installation=a1Installation, eMiftomib=eMiftomib, tErrorcontrol=tErrorcontrol, a99MibName=a99MibName, DmiDisplaystring=DmiDisplaystring, a4LogicalUnitType=a4LogicalUnitType, tComponentid=tComponentid, a5Fatalcount=a5Fatalcount, tTrapGroup=tTrapGroup, a9999VendorCode2=a9999VendorCode2, a3HostAdapterVersion=a3HostAdapterVersion, a9999ErrorStatus=a9999ErrorStatus, tLogicalUnitGroup=tLogicalUnitGroup)
nilq/baby-python
python
import requests, zipfile, io, subprocess, os, sys from datetime import datetime from dotenv import load_dotenv load_dotenv() ZIP_FILE_URL = os.getenv('ZIP_FILE_URL') if not ZIP_FILE_URL: sys.exit('Error getting zip file url from env') try: r = requests.get(ZIP_FILE_URL) except: sys.exit('Error getting file from dropbox. Make sure that ZIP_FILE_URL is a valid download URL') z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(path='Report') commit_message = 'thesis backup: ' + datetime.now().isoformat() subprocess.call(["git", "add", "."]) subprocess.call(["git", "commit", "-m", commit_message]) subprocess.call(["git", "push"])
nilq/baby-python
python
from behave import then @then("The response status is {response_status:d}") def check_response_status(context, response_status): assert context.status == response_status
nilq/baby-python
python
import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Layer from tensorflow.keras.layers import (Input, Concatenate, Dense, Activation, BatchNormalization, Reshape, Dropout, Flatten, LeakyReLU, Conv2D, Conv3D, UpSampling2D, UpSampling3D) from tensorflow.keras.optimizers import Adam from ..utilities import InstanceNormalization import numpy as np import os import matplotlib.pyplot as plot import ants class CycleGanModel(object): """ Cycle GAN model Cycle generative adverserial network from the paper: https://arxiv.org/pdf/1703.10593 and ported from the Keras (python) implementation: https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/cyclegan.py Arguments --------- input_image_size : tuple Used for specifying the input tensor shape. The shape (or dimension) of that tensor is the image dimensions followed by the number of channels (e.g., red, green, and blue). latent_dimension : integer Returns ------- Keras model A Keras model defining the network. """ def __init__(self, input_image_size, lambda_cycle_loss_weight=10.0, lambda_identity_loss_weight=1.0, number_of_filters_at_base_layer=(32, 64)): super(CycleGanModel, self).__init__() self.input_image_size = input_image_size self.number_of_channels = self.input_image_size[-1] self.discriminator_patch_size = None self.lambda_cycle_loss_weight = lambda_cycle_loss_weight self.lambda_identity_loss_weight = lambda_identity_loss_weight self.number_of_filters_at_base_layer = number_of_filters_at_base_layer self.dimensionality = None if len(self.input_image_size) == 3: self.dimensionality = 2 elif len(self.input_image_size) == 4: self.dimensionality = 3 else: raise ValueError("Incorrect size for input_image_size.") optimizer = Adam(lr=0.0002, beta_1=0.5) # Build discriminators for domains A and B self.discriminatorA = self.build_discriminator() self.discriminatorA.compile(loss='mse', optimizer=optimizer, metrics=['acc']) self.discriminatorA.trainable = False self.discriminatorB = self.build_discriminator() self.discriminatorB.compile(loss='mse', optimizer=optimizer, metrics=['acc']) self.discriminatorB.trainable = False # Build u-net like generators self.generatorAtoB = self.build_generator() self.generatorBtoA = self.build_generator() imageA = Input(shape=input_image_size) imageB = Input(shape=input_image_size) fake_imageA = self.generatorBtoA(imageB) fake_imageB = self.generatorAtoB(imageA) reconstructed_imageA = self.generatorBtoA(fake_imageB) reconstructed_imageB = self.generatorAtoB(fake_imageA) identity_imageA = self.generatorBtoA(imageA) identity_imageB = self.generatorAtoB(imageB) # Check images validityA = self.discriminatorA(fake_imageA) validityB = self.discriminatorB(fake_imageB) # Combined models self.combined_model = Model(inputs=[imageA, imageB], outputs=[validityA, validityB, reconstructed_imageA, reconstructed_imageB, identity_imageA, identity_imageB]) self.combined_model.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'], loss_weights=[1.0, 1.0, self.lambda_cycle_loss_weight, self.lambda_cycle_loss_weight, self.lambda_identity_loss_weight, self.lambda_identity_loss_weight], optimizer=optimizer) def build_generator(self): def build_encoding_layer(input, number_of_filters, kernel_size=4): encoder = input if self.dimensionality == 2: encoder = Conv2D(filters=number_of_filters, kernel_size=kernel_size, strides=2, padding='same')(encoder) else: encoder = Conv3D(filters=number_of_filters, kernel_size=kernel_size, strides=2, padding='same')(encoder) encoder = LeakyReLU(alpha=0.2)(encoder) encoder = InstanceNormalization()(encoder) return(encoder) def build_decoding_layer(input, skip_input, number_of_filters, kernel_size=4, dropout_rate=0.0): decoder = input if self.dimensionality == 2: decoder = UpSampling2D(size=2)(decoder) decoder = Conv2D(filters=number_of_filters, kernel_size=kernel_size, strides=1, padding='same', activation='relu')(decoder) else: decoder = UpSampling3D(size=2)(decoder) decoder = Conv3D(filters=number_of_filters, kernel_size=kernel_size, strides=1, padding='same', activation='relu')(decoder) if dropout_rate > 0.0: decoder = Dropout(dropout_rate=dropout_rate)(decoder) decoder = LeakyReLU(alpha=0.2)(decoder) decoder = Concatenate()([decoder, skip_input]) return(decoder) input = Input(shape=self.input_image_size) encoding_layers = list() encoding_layers.append(build_encoding_layer(input, int(self.number_of_filters_at_base_layer[0]))) encoding_layers.append(build_encoding_layer(encoding_layers[0], int(self.number_of_filters_at_base_layer[0] * 2))) encoding_layers.append(build_encoding_layer(encoding_layers[1], int(self.number_of_filters_at_base_layer[0] * 4))) encoding_layers.append(build_encoding_layer(encoding_layers[2], int(self.number_of_filters_at_base_layer[0] * 8))) decoding_layers = list() decoding_layers.append(build_decoding_layer(encoding_layers[3], encoding_layers[2], int(self.number_of_filters_at_base_layer[0] * 4))) decoding_layers.append(build_decoding_layer(decoding_layers[0], encoding_layers[1], int(self.number_of_filters_at_base_layer[0] * 2))) decoding_layers.append(build_decoding_layer(decoding_layers[1], encoding_layers[0], int(self.number_of_filters_at_base_layer[0]))) if self.dimensionality == 2: decoding_layers.append(UpSampling2D(size=2)(decoding_layers[-1])) decoding_layers[-1] = Conv2D(filters=self.number_of_channels, kernel_size=4, strides=1, padding='same', activation='tanh')(decoding_layers[-1]) else: decoding_layers.append(UpSampling3D(size=2)(decoding_layers[-1])) decoding_layers[-1] = Conv2D(filters=self.number_of_channels, kernel_size=4, strides=1, padding='same', activation='tanh')(decoding_layers[-1]) generator = Model(inputs=input, outputs=decoding_layers[-1]) return(generator) def build_discriminator(self): def build_layer(input, number_of_filters, kernel_size=4, normalization=True): layer = input if self.dimensionality == 2: layer = Conv2D(filters=number_of_filters, kernel_size=kernel_size, strides=2, padding='same')(layer) else: layer = Conv3D(filters=number_of_filters, kernel_size=kernel_size, strides=2, padding='same')(layer) layer = LeakyReLU(alpha=0.2)(layer) if normalization == True: layer = InstanceNormalization()(layer) return(layer) input = Input(shape=self.input_image_size) layers = list() layers.append(build_layer(input, int(self.number_of_filters_at_base_layer[1]))) layers.append(build_layer(layers[0], int(self.number_of_filters_at_base_layer[1] * 2))) layers.append(build_layer(layers[1], int(self.number_of_filters_at_base_layer[1] * 4))) layers.append(build_layer(layers[2], int(self.number_of_filters_at_base_layer[1] * 8))) validity = None if self.dimensionality == 2: validity = Conv2D(filters=1, kernel_size=4, strides=1, padding='same')(layers[3]) else: validity = Conv3D(filters=1, kernel_size=4, strides=1, padding='same')(layers[3]) if self.discriminator_patch_size is None: self.discriminator_patch_size = K.int_shape(validity)[1:] discriminator = Model(inputs=input, outputs=validity) return(discriminator) def train(self, X_trainA, X_trainB, number_of_epochs, batch_size=128, sample_interval=None, sample_file_prefix='sample'): valid = np.ones((batch_size, *self.discriminator_patch_size)) fake = np.zeros((batch_size, *self.discriminator_patch_size)) for epoch in range(number_of_epochs): indicesA = np.random.randint(0, X_trainA.shape[0] - 1, batch_size) imagesA = X_trainA[indicesA] indicesB = np.random.randint(0, X_trainB.shape[0] - 1, batch_size) imagesB = X_trainB[indicesB] # train discriminator fake_imagesA = self.generatorAtoB.predict(imagesA) fake_imagesB = self.generatorBtoA.predict(imagesB) dA_loss_real = self.discriminatorA.train_on_batch(imagesA, valid) dA_loss_fake = self.discriminatorA.train_on_batch(fake_imagesA, fake) dB_loss_real = self.discriminatorB.train_on_batch(imagesB, valid) dB_loss_fake = self.discriminatorB.train_on_batch(fake_imagesB, fake) d_loss = list() for i in range(len(dA_loss_real)): d_loss.append(0.25 * (dA_loss_real[i] + dA_loss_fake[i] + dB_loss_real[i] + dB_loss_fake[i])) # train generator g_loss = self.combined_model.train_on_batch([imagesA, imagesB], [valid, valid, imagesA, imagesB, imagesA, imagesB]) print("Epoch ", epoch, ": [Discriminator loss: ", d_loss[0], " acc: ", d_loss[1], "] ", "[Generator loss: ", g_loss[0], ", ", np.mean(g_loss[1:3]), ", ", np.mean(g_loss[3:5]), ", ", np.mean(g_loss[5:6]), "]") if self.dimensionality == 2: if sample_interval != None: if epoch % sample_interval == 0: # Do a 2x3 grid # # imageA | translated( imageA ) | reconstructed( imageA ) # imageB | translated( imageB ) | reconstructed( imageB ) indexA = np.random.randint(0, X_trainA.shape[0] - 1, 1) indexB = np.random.randint(0, X_trainB.shape[0] - 1, 1) imageA = X_trainA[indexA,:,:,:] imageB = X_trainB[indexB,:,:,:] X = list() X.append(imageA) X.append(self.generatorAtoB.predict(X[0])) X.append(self.generatorBtoA.predict(X[1])) X.append(imageB) X.append(self.generatorAtoB.predict(X[3])) X.append(self.generatorBtoA.predict(X[4])) plot_images = np.concatenate(X) plot_images = 0.5 * plot_images + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] figure, axes = plot.subplots(2, 3) count = 0 for i in range(2): for j in range(3): axes[i, j].imshow(plot_images[count]) axes[i, j].set_title(titles[j]) axes[i, j].axis('off') count += 1 image_file_name = sample_file_prefix + "_iteration" + str(epoch) + ".jpg" dir_name = os.path.dirname(sample_file_prefix) if not os.path.exists(dir_name): os.mkdir(dir_name) figure.savefig(image_file_name) plot.close()
nilq/baby-python
python
import os import sys import dicom import numpy as np # import SimpleITK as sitk from matplotlib import use use("Qt4Agg") from matplotlib import pyplot as plt from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib.patches import Polygon from matplotlib.lines import Line2D from matplotlib.mlab import dist_point_to_segment from PyQt4 import QtCore from PyQt4 import QtGui import io import algorithm from pprint import pprint class MainFrame(QtGui.QWidget): _tidx = 0 # active t slice index _zidx = 0 # active z slice index _loadflag = False _tslicenum = 100 # original index range [0, _tslicenum) _zslicenum = 100 # original index range [0, _zslicenum) _tmin, _tmax = 0, 100 # index range [_tmin, _tmax) for t index in use _zmin, _zmax = 0, 100 # index range [_zmin, _zmax) for z index in use cine_img = None cine_mask = None mask_slice = None img_slice = None # gui-variables btn = {} spinbox = {} slider = {} title = {} # ClickerClass connected with given axis cc = None valueChanged = QtCore.pyqtSignal(int) def __init__(self, master=None): super(MainFrame, self).__init__() self.grid = QtGui.QGridLayout() self.fig1 = Figure(figsize=(6, 6), dpi=65) self.ax1 = self.fig1.add_subplot(111) self.canvas1 = FigureCanvas(self.fig1) self.canvas1.setParent(self) self.fig2 = Figure(figsize=(6, 6), dpi=65) self.ax2 = self.fig2.add_subplot(111) self.canvas2 = FigureCanvas(self.fig2) self.canvas2.setParent(self) # connect axis activities self.cc = ClickerClass(self.ax1, self.ax2, self.canvas1, self.canvas2) # gui setup self.set_button() self.set_title() self.set_slider() self.set_spinbox() self.add_widget() self.connect_activity() self.setLayout(self.grid) def set_button(self): self.btn["load"] = QtGui.QPushButton("Load Subject Directory") self.btn["save"] = QtGui.QPushButton("Save") self.btn["endo1"] = QtGui.QPushButton("Multiple") self.btn["endo2"] = QtGui.QPushButton("Singular") self.btn["epic1"] = QtGui.QPushButton("Multiple") self.btn["epic2"] = QtGui.QPushButton("Singular") def set_title(self): self.setWindowTitle("Border Detection") self.title["endo"] = QtGui.QLabel("Endocardial detection: ") self.title["endo"].setStyleSheet("font: bold") self.title["endo"].setAlignment(QtCore.Qt.AlignCenter) self.title["epic"] = QtGui.QLabel("Epicardial detection: ") self.title["epic"].setStyleSheet("font: bold") self.title["epic"].setAlignment(QtCore.Qt.AlignCenter) self.title["tslice"] = QtGui.QLabel("Time slice [0, 30): ") self.title["tslice"].setStyleSheet("font: bold") self.title["tslice"].setAlignment(QtCore.Qt.AlignCenter) self.title["zslice"] = QtGui.QLabel("Z slice [0, 15): ") self.title["zslice"].setStyleSheet("font: bold") self.title["zslice"].setAlignment(QtCore.Qt.AlignCenter) self.title["tmax"] = QtGui.QLabel("T maximum: ") self.title["tmax"].setStyleSheet("font: bold") self.title["tmax"].setAlignment(QtCore.Qt.AlignCenter) self.title["tmin"] = QtGui.QLabel("T minimum: ") self.title["tmin"].setStyleSheet("font: bold") self.title["tmin"].setAlignment(QtCore.Qt.AlignCenter) self.title["zmax"] = QtGui.QLabel("Z maximum: ") self.title["zmax"].setStyleSheet("font: bold") self.title["zmax"].setAlignment(QtCore.Qt.AlignCenter) self.title["zmin"] = QtGui.QLabel("Z minimum: ") self.title["zmin"].setStyleSheet("font: bold") self.title["zmin"].setAlignment(QtCore.Qt.AlignCenter) def set_slider(self): # slides on the time-axis self.slider["tidx"] = QtGui.QSlider(QtCore.Qt.Horizontal) self.slider["tidx"].setFocusPolicy(QtCore.Qt.StrongFocus) self.slider["tidx"].setTickPosition(QtGui.QSlider.TicksBothSides) self.slider["tidx"].setTickInterval(5) self.slider["tidx"].setSingleStep(1) self.slider["tidx"].setTracking(True) self.slider["tidx"].setRange(0, 29) # slides on the z-axis self.slider["zidx"] = QtGui.QSlider(QtCore.Qt.Horizontal) self.slider["zidx"].setFocusPolicy(QtCore.Qt.StrongFocus) self.slider["zidx"].setTickPosition(QtGui.QSlider.TicksBothSides) self.slider["zidx"].setTickInterval(5) self.slider["zidx"].setSingleStep(1) self.slider["zidx"].setTracking(True) self.slider["zidx"].setRange(0, 14) def set_spinbox(self): # sets active t indices of self.cine_img self.spinbox["tidx"] = QtGui.QSpinBox() self.spinbox["tidx"].setRange(0, 29) self.spinbox["tidx"].setSingleStep(1) # sets active z indices of self.cine_img self.spinbox["zidx"] = QtGui.QSpinBox() self.spinbox["zidx"].setRange(0, 14) self.spinbox["zidx"].setSingleStep(1) # sets lower t-index limit of slices in effect self.spinbox["tmin"] = QtGui.QSpinBox() self.spinbox["tmin"].setRange(0, 29) self.spinbox["tmin"].setSingleStep(1) # sets upper t-index limit of slices in effect self.spinbox["tmax"] = QtGui.QSpinBox() self.spinbox["tmax"].setRange(0, 29) self.spinbox["tmax"].setSingleStep(1) self.spinbox["tmax"].setValue(1) # sets lower z-index limit of slices in effect self.spinbox["zmin"] = QtGui.QSpinBox() self.spinbox["zmin"].setRange(0, 14) self.spinbox["zmin"].setSingleStep(1) # sets upper z-index limit of slices in effect self.spinbox["zmax"] = QtGui.QSpinBox() self.spinbox["zmax"].setRange(0, 14) self.spinbox["zmax"].setSingleStep(1) self.spinbox["zmax"].setValue(1) def connect_activity(self): # connect buttons self.btn["load"].clicked.connect(self.load_directory) self.btn["save"].clicked.connect(self.save_img) self.btn["endo1"].clicked.connect(self.multiple_endocardial_detection) self.btn["endo2"].clicked.connect(self.singular_endocardial_detection) self.btn["epic1"].clicked.connect(self.multiple_epicardial_detection) self.btn["epic2"].clicked.connect(self.singular_epicardial_detection) # connect spinboxes self.spinbox["tidx"].valueChanged.connect(self.slider["tidx"].setValue) self.spinbox["zidx"].valueChanged.connect(self.slider["zidx"].setValue) self.spinbox["tmin"].valueChanged.connect(self.update_tmin) self.spinbox["tmax"].valueChanged.connect(self.update_tmax) self.spinbox["zmin"].valueChanged.connect(self.update_zmin) self.spinbox["zmax"].valueChanged.connect(self.update_zmax) # connect sliders self.slider["tidx"].valueChanged.connect(self.spinbox["tidx"].setValue) self.slider["tidx"].valueChanged.connect(self.update_tidx) self.slider["zidx"].valueChanged.connect(self.spinbox["zidx"].setValue) self.slider["zidx"].valueChanged.connect(self.update_zidx) def add_widget(self): # add buttons self.grid.addWidget(self.btn["load"], 0, 0) self.grid.addWidget(self.btn["save"], 1, 0) self.grid.addWidget(self.btn["endo1"], 0, 2) self.grid.addWidget(self.btn["endo2"], 0, 3) self.grid.addWidget(self.btn["epic1"], 1, 2) self.grid.addWidget(self.btn["epic2"], 1, 3) # add titles self.grid.addWidget(self.title["endo"], 0, 1) self.grid.addWidget(self.title["epic"], 1, 1) self.grid.addWidget(self.title["tslice"], 7, 0) self.grid.addWidget(self.title["zslice"], 8, 0) self.grid.addWidget(self.title["tmin"], 9, 0) self.grid.addWidget(self.title["tmax"], 9, 2) self.grid.addWidget(self.title["zmin"], 10, 0) self.grid.addWidget(self.title["zmax"], 10, 2) # add sliders self.grid.addWidget(self.slider["tidx"], 7, 2, 1, 2) self.grid.addWidget(self.slider["zidx"], 8, 2, 1, 2) # add spinboxes self.grid.addWidget(self.spinbox["tidx"], 7, 1) self.grid.addWidget(self.spinbox["zidx"], 8, 1) self.grid.addWidget(self.spinbox["tmin"], 9, 1) self.grid.addWidget(self.spinbox["tmax"], 9, 3) self.grid.addWidget(self.spinbox["zmin"], 10, 1) self.grid.addWidget(self.spinbox["zmax"], 10, 3) # add canvas for image display self.grid.addWidget(self.canvas1, 2, 0, 5, 2) self.grid.addWidget(self.canvas2, 2, 2, 5, 2) def reset_setting(self): self._tslicenum = self.cine_img.shape[2] self._zslicenum = self.cine_img.shape[3] self._tidx, self._zidx = 0, 0 self._tmin, self._zmin = 0, 0 self._tmax = self._tslicenum-1 self._zmax = self._zslicenum-1 self.slider["tidx"].setRange(self._tmin, self._tmax) self.slider["zidx"].setRange(self._zmin, self._zmax) self.spinbox["tidx"].setRange(self._tmin, self._tmax) self.spinbox["zidx"].setRange(self._zmin, self._zmax) self.spinbox["tmin"].setRange(0, self._tmax-1) self.spinbox["zmin"].setRange(0, self._zmax-1) self.spinbox["tmax"].setRange(self._tmin+1, self._tslicenum-1) self.spinbox["zmax"].setRange(self._zmin+1, self._zslicenum-1) self.slider["tidx"].setValue(self._tidx) self.slider["zidx"].setValue(self._zidx) self.spinbox["tidx"].setValue(self._tidx) self.spinbox["zidx"].setValue(self._zidx) self.spinbox["tmin"].setValue(0) self.spinbox["zmin"].setValue(0) self.spinbox["tmax"].setValue(self._tmax) self.spinbox["zmax"].setValue(self._zmax) # update slider titles to fit current slicenums self.grid.removeWidget(self.title["tslice"]) self.grid.removeWidget(self.title["zslice"]) self.title["tslice"].deleteLater() self.title["zslice"].deleteLater() del self.title["tslice"] del self.title["zslice"] # set new titles self.title["tslice"] = QtGui.QLabel("Time slice [0, {}): ".format(self._tslicenum)) self.title["tslice"].setStyleSheet("font: bold") self.title["tslice"].setAlignment(QtCore.Qt.AlignCenter) self.title["zslice"] = QtGui.QLabel("Z slice [0, {}): ".format(self._zslicenum)) self.title["zslice"].setStyleSheet("font: bold") self.title["zslice"].setAlignment(QtCore.Qt.AlignCenter) # add title widgets self.grid.addWidget(self.title["tslice"], 7, 0) self.grid.addWidget(self.title["zslice"], 8, 0) # update cc settings self.cc.reset_setting() self.cc.init_mask(self.cine_mask) self.cc.init_img(self.cine_img) self.cc.init_vertex() self.cc.update_tlimit(self._tmin, self._tmax) self.cc.update_zlimit(self._zmin, self._zmax) # self.canvas1.draw() # self.canvas2.draw() def update_tidx(self, value): if self._loadflag == True: self._tidx = value self.update_slice() self.cc.update_index(self._tidx, self._zidx) self.redraw_img() def update_zidx(self, value): if self._loadflag == True: self._zidx = value self.update_slice() self.cc.update_index(self._tidx, self._zidx) self.redraw_img() def update_tmin(self, value): self._tmin = value self.spinbox["tmin"].setValue(value) self.spinbox["tmin"].setRange(0, self._tmax-1) self.slider["tidx"].setRange(self._tmin, self._tmax) self.spinbox["tidx"].setRange(self._tmin, self._tmax) self.cc.update_tlimit(self._tmin, self._tmax) def update_tmax(self, value): self._tmax = value self.spinbox["tmax"].setValue(value) self.spinbox["tmax"].setRange(self._tmin+1, self._tslicenum-1) self.slider["tidx"].setRange(self._tmin, self._tmax) self.spinbox["tidx"].setRange(self._tmin, self._tmax) self.cc.update_tlimit(self._tmin, self._tmax) def update_zmin(self, value): self._zmin = value self.spinbox["zmin"].setValue(value) self.spinbox["zmin"].setRange(0, self._zmax-1) self.slider["zidx"].setRange(self._zmin, self._zmax) self.spinbox["zidx"].setRange(self._zmin, self._zmax) self.cc.update_zlimit(self._zmin, self._zmax) def update_zmax(self, value): self._zmax = value self.spinbox["zmax"].setValue(value) self.spinbox["zmax"].setRange(self._zmin+1, self._zslicenum-1) self.slider["zidx"].setRange(self._zmin, self._zmax) self.spinbox["zidx"].setRange(self._zmin, self._zmax) self.cc.update_zlimit(self._zmin, self._zmax) def update_slice(self): self.img_slice = self.cine_img[:, :, self._tidx, self._zidx] self.mask_slice = self.cine_mask[:, :, self._tidx, self._zidx] def load_directory(self): dirname = io.get_directory() # directory not chosen if len(dirname) == 0: return # invalid directory chosen if "cine" not in os.listdir(dirname): print("Subject directory must contain 'cine/'\n") return # print("\n======start of new session") print("\nSubject directory: [%s]" % dirname) cinedir = dirname + "/cine/" temp = io.load_cine_from_directory(cinedir) if(len(temp.shape) != 4): print("Inavlid cine image") return elif(temp is None): print("Failed to load cine image") return self.cine_img = temp self._loadflag = True self.cine_img = algorithm.resize(self.cine_img, mode=256) self.img_slice = self.cine_img[:, :, 0, 0] self.cine_mask = np.zeros(self.cine_img.shape) self.mask_slice = self.cine_mask[:, :, 0, 0] self.reset_setting() self.redraw() def redraw_img(self): self.ax1.imshow(self.img_slice, cmap=plt.cm.gray) self.canvas1.draw() def redraw_mask(self): self.ax2.imshow(self.mask_slice, cmap=plt.cm.gray) self.canvas2.draw() def redraw(self): self.redraw_img() self.redraw_mask() def save_img(self): if self._loadflag == False: return fname = io.save_file_dialog() print(fname) def singular_endocardial_detection(self): if self._loadflag == False: return print("\nInitializing singular endocardial detection..... ", end="") self.cc.set_singular() self.cc.switch2seed() # print("complete") def multiple_endocardial_detection(self): if self._loadflag == False: return print("\nInitializing multiple endocardial detection..... ", end="") self.cc.set_multiple() self.cc.switch2seed() # print("complete") def singular_epicardial_detection(self): if self._loadflag == False: return self.cc.set_singular() print("sin_epi") def multiple_epicardial_detection(self): if self._loadflag == False: return self.cc.set_multiple() print("com_epi") class ClickerClass(object): _title = {"plot": "LEFT: add landmark, RIGHT: delete landmark\n" "Press 'm' to switch modes", "connect": "'i': insert, 't': toggle vertex, 'RIGHT': delete\n" "Press 'Enter' to crop, 'm' to switch modes", "seed": "LEFT: select seed\n" "Press 'enter' to complete", "mask": "Binary mask\n", "init": "Cine image\n"} _tidx, _zidx = 0, 0 # active slice index _tmin, _tmax = 0, 100 # index range [_tmin, _tmax] for detection _zmin, _zmax = 0, 100 # index range [_zmin, _zmax] for detection _detectionflag = None _loadflag = False _showverts = True _epsilon = 5 # cursor sensitivity in pixels _modes = "init" # True: Place landmarks, False: Connect landmarks _alpha = 0.30 _ind = None # active vertex _seed = [] # seed point for endocardial detection _cid = [] cine_img = None # 4d numpy array cine_mask = None # 4d numpy array mask_slice = None # active mask slice cropped = None # 4d numpy array # artist objects line = None plot = None poly = None verts = None # active position: verts[_tidx][_zidx] position = None background = None def __init__(self, ax1, ax2, canvas1, canvas2): # get axis object self.ax1 = ax1 self.ax2 = ax2 # get figure object self.fig1 = ax1.get_figure() self.fig2 = ax2.get_figure() # get canvas object self.canvas1 = canvas1 self.canvas2 = canvas2 # quick solution for inactive key_press_event self.canvas1.setFocusPolicy(QtCore.Qt.ClickFocus) self.canvas1.setFocus() self.ax1.set_title(self._title["init"]) self.ax2.set_title(self._title["mask"]) # initiate artist objects self.plot = self.ax1.plot([], [], marker='o', markerfacecolor='b', linestyle='none', markersize=5)[0] self.poly = Polygon([(0, 0)], animated=True, alpha=self._alpha) self.line = Line2D([], [], marker='o', markerfacecolor='r', animated=True, markersize=5) # add artist objects to the axis self.ax1.add_patch(self.poly) self.ax1.add_line(self.line) self.connect_activity() def init_vertex(self): tl = self.cine_mask.shape[2] zl = self.cine_mask.shape[3] # access: position[tl][zl] self.position = [[[] for i in range(zl)] for j in range(tl)] self.verts = self.position[self._tidx][self._zidx] def init_img(self, img): self.cine_img = img def init_mask(self, mask): self.cine_mask = mask self.mask_slice = self.cine_mask[:, :, self._tidx, self._zidx] self.cropped = np.zeros((self.cine_mask.shape[2], self.cine_mask.shape[3])) def reset_setting(self): self._showverts = True self._modes = "plot" self.ax1.set_title(self._title[self._modes]) self.cine_mask = None self.mask_slice = None self._seed = [] self._tidx, self._zidx = 0, 0 self._tmin, self._zmax = 0, 100 self._tmin, self._zmax = 0, 100 self._loadflag = True self._detectionflag = None def update_index(self, tidx, zidx): self._tidx = tidx self._zidx = zidx self.switch_slice() def update_tlimit(self, tmin, tmax): self._tmin = tmin self._tmax = tmax def update_zlimit(self, zmin, zmax): self._zmin = zmin self._zmax = zmax def redraw(self): self.ax1.draw_artist(self.poly) self.ax1.draw_artist(self.line) self.canvas1.blit(self.ax1.bbox) def replot(self): if self._modes == "seed": verts = self._seed[:] else: verts = self.verts[:] if len(verts) > 0: x, y = zip(*verts) else: x, y = [], [] if not self._modes == "connect": self.plot.set_xdata(x) self.plot.set_ydata(y) def switch_slice(self): self.verts = self.position[self._tidx][self._zidx] self.mask_slice = self.cine_mask[:, :, self._tidx, self._zidx] self.ax2.imshow(self.mask_slice, cmap=plt.cm.gray) if self._modes == "connect": if len(self.verts) <= 1: self.switch_modes() else: self.poly.xy = np.array(self.verts[:]) self.line.set_data(zip(*self.poly.xy)) else: self.replot() self.poly.xy = [(0, 0)] self.canvas1.draw() self.canvas2.draw() def switch_modes(self): if not self._loadflag: return if not self._showverts: return if self._modes == "seed": return if self._modes == "plot": self.switch2poly() elif self._modes == "connect": self.switch2plot() def switch2seed(self): self._modes = "seed" self.ax1.set_title(self._title["seed"]) self.ax1.set_ylabel("") # clears the existing plot # self.verts.clear() self.replot() if self.poly: self.poly.xy = [(0, 0)] self.canvas1.draw() def switch2plot(self): self._modes = "plot" self.ax1.set_title(self._title["plot"]) self.ax1.set_ylabel("") self.replot() if self.poly: self.poly.xy = [(0, 0)] def switch2poly(self): if len(self.verts) == 0: return self._modes = "connect" self.ax1.set_title(self._title["connect"]) self.ax1.set_ylabel("Alpha: %.2f" %self._alpha) self.poly.xy = np.array(self.verts[:]) self.line.set_data(zip(*self.poly.xy)) self.plot.set_data([], []) def connect_activity(self): self.canvas1.mpl_connect('button_press_event', self.button_press_callback) self.canvas1.mpl_connect('button_release_event', self.button_release_callback) self.canvas1.mpl_connect('scroll_event', self.scroll_callback) self.canvas1.mpl_connect('motion_notify_event', self.motion_notify_callback) self.canvas1.mpl_connect('draw_event', self.draw_callback) self.canvas1.mpl_connect('key_press_event', self.key_press_callback) def button_press_callback(self, event): if not self._showverts: return if not event.inaxes: return if not self._loadflag: return self._ind = self.get_nearest_vertex_idx(event) # Do whichever action corresponds to the mouse button clicked if event.button == 1: self.add_vertex(event) elif event.button == 3: self.remove_vertex(event) # Re-plot the landmarks on canvas self.replot() self.canvas1.draw() def button_release_callback(self, event): if not self._loadflag: return if not self._showverts: return self._ind = None def scroll_callback(self, event): if not self._loadflag: return if not self._showverts: return if not self._modes == "connect": return if event.button == 'up': if self._alpha < 1.00: self._alpha += 0.05 elif event.button == 'down': self._alpha -= 0.05 if self._alpha <= 0.00: self._alpha = 0.00 #print("alpha changed") self.ax1.set_ylabel("Alpha: %.2f" % self._alpha) self.poly.set_alpha(self._alpha) # self.ax1.draw_artist(self.ax1.yaxis) self.canvas1.draw() def motion_notify_callback(self, event): # on mouse movement if self._ind is None: return if not self._showverts: return if self._modes == "seed": return if not self._loadflag: return if event.button != 1: return if not event.inaxes: return self.move_vertex_to(event) self.canvas1.restore_region(self.background) self.redraw() def draw_callback(self, event): if not self._loadflag: return if self._modes == "connect": self.background = self.canvas1.copy_from_bbox(self.ax1.bbox) self.redraw() def key_press_callback(self, event): if not self._loadflag: return if not event.inaxes: return # print("key_press active") if event.key == 't': # self.switch_vis() pass elif event.key == 'm': self.switch_modes() elif event.key == 'i': self.insert_vertex(event) elif event.key == 'enter': if self._modes == "connect": self.poly2mask() elif self._detectionflag == "singular": self.singular_endocardial_detection() elif self._detectionflag == "multiple": self.multiple_endocardial_detection() self.canvas1.draw() def poly2mask(self): if not self._modes == "connect": return for x in range(self.cine_mask.shape[1]): for y in range(self.cine_mask.shape[0]): if self.poly.get_path().contains_point((x,y)): #self.covered_pixels.append((x,y)) self.mask_slice[y][x] = 1 else: self.mask_slice[y][x] = 0 if(len(self.verts) > 2): self.cropped[self._tidx][self._zidx] = True else: self.cropped[self._tidx][self._zidx] = False self.ax2.imshow(self.mask_slice, cmap=plt.cm.gray) self.canvas2.draw() def add_vertex(self, event): # Adds a point at cursor if self._modes == "connect": return if not self._loadflag: return if self._modes == "seed": verts = self._seed verts.clear() else: verts = self.verts verts.append((int(event.xdata), int(event.ydata))) def insert_vertex(self, event): if not self._modes == "connect": return if not self._showverts: return if not self._loadflag: return p = event.xdata, event.ydata # display coords mod = len(self.verts) for i in range(len(self.verts)): s0 = self.verts[i % mod] s1 = self.verts[(i + 1) % mod] d = dist_point_to_segment(p, s0, s1) if d <= 5: self.poly.xy = np.array( list(self.poly.xy[: i+1]) + [(event.xdata, event.ydata)] + list(self.poly.xy[i+1 :])) self.line.set_data(zip(*self.poly.xy)) self.verts = [tup for i, tup in enumerate(self.poly.xy) if i != len(self.poly.xy)-1] break self.position[self._tidx][self._zidx] = self.verts def remove_vertex(self, event): # Removes the point closest to the cursor if not self._loadflag: return if self._modes == "seed": return index = self._ind if not index is None: del self.verts[index] if self._modes == "connect": if len(self.verts) <= 1: self.switch_modes() else: self.poly.xy = [x for x in self.verts] self.line.set_data(zip(*self.poly.xy)) def get_nearest_vertex_idx(self, event): if len(self.verts) > 0: distance = [(v[0] - event.xdata) ** 2 + (v[1] - event.ydata) ** 2 for v in self.verts] if np.sqrt(min(distance)) <= self._epsilon: return distance.index(min(distance)) return None def move_vertex_to(self, event): x, y = event.xdata, event.ydata self.poly.xy[self._ind] = x, y self.verts[self._ind] = x, y if self._ind == 0: self.poly.xy[-1] = self.poly.xy[self._ind] self.line.set_data(zip(*self.poly.xy)) def singular_endocardial_detection(self): if not self._modes == "seed": return img_slice = self.cine_img[:, :, self._tidx, self._zidx] if len(self._seed) == 0: return print("complete") print("seed set at", (int(self._seed[0][0]),\ int(self._seed[0][1]))) print("segmenting mask..... ", end="") self.mask_slice[:, :] = \ algorithm.endocardial_detection(img_slice, (int(self._seed[0][0]), int(self._seed[0][1])))[:, :] # if valid mask if int(np.sum(self.mask_slice)) != 0: self.cropped[self._tidx][self._zidx] = True print("complete") print("calculating hull..... ", end="") try: self.verts[:] = algorithm.convex_hull(self.mask_slice) except: print("failure") print("complete") self.switch2poly() self.poly2mask() else: print("segmentation failure") self.switch2plot() self.cropped[self._tidx][self._zidx] = False self._seed = [] self.canvas1.draw() def multiple_endocardial_detection(self): if not self._modes == "seed": return if len(self._seed) == 0: return print("complete") print("seed set at", (int(self._seed[0][0]),\ int(self._seed[0][1]))) print("segmenting mask ", end="") # mod = int(((self._tmin+self._tmax)/5)+0.5) for t in range(self._tmin, self._tmax+1): # status bar # if mod != 0 && t%mod == 0: # print(".", end="", flush=True) for z in range(self._zmin, self._zmax+1): img_slice = self.cine_img[:, :, t, z] self.mask_slice = self.cine_mask[:, :, t, z] self.mask_slice[:, :] = \ algorithm.endocardial_detection(img_slice, (int(self._seed[0][0]), int(self._seed[0][1])))[:, :] if int(np.sum(self.mask_slice)) != 0: self.cropped[t][z] = True else: self.cropped[t][z] = False print("complete") print("calculating hull", end="") for t in range(self._tmin, self._tmax): # status bar #if t%(mod) == 0: # print(".", end="", flush=True) for z in range(self._zmin, self._zmax): if self.cropped[t][z] == False: continue self.mask_slice = self.cine_mask[:, :, t, z] # self.verts = self.position[t][z] self.position[t][z] = algorithm.convex_hull(self.mask_slice) self.poly.xy = np.array(self.position[t][z]) for x in range(self.mask_slice.shape[1]): for y in range(self.mask_slice.shape[0]): if self.poly.get_path().contains_point((x,y)): self.mask_slice[y][x] = 1 else: self.mask_slice[y][x] = 0 print(" complete") # print("") self.verts = self.position[self._tidx][self._zidx] self.mask_slice = self.cine_mask[:, :, self._tidx, self._zidx] self.ax2.imshow(self.mask_slice, cmap=plt.cm.gray) self._seed = [] if len(self.verts) <= 2: self.switch2plot() else: self.switch2poly() self.canvas1.draw() self.canvas2.draw() def set_singular(self): self._detectionflag = "singular" def set_multiple(self): self._detectionflag = "multiple" class Window(QtGui.QMainWindow): def __init__(self): super(Window, self).__init__() ''' self.fig = MainFrame() _widget = QtGui.QWidget() _layout = QtGui.QVBoxLayout(_widget) _layout.addWidget(self.fig) self.setCentralWidget(_widget) ''' self.setWindowTitle("Fuzzy Rocks!") extractAction = QtGui.QAction("sdg", self) extractAction.setShortcut("Ctrl+K") extractAction.setStatusTip("leave the App") extractAction.triggered.connect(self.close_application) self.statusBar() mainMenu = self.menuBar() fileMenu = mainMenu.addMenu('&File') fileMenu.addMenu("&work") fileMenu.addAction(extractAction) self.show() def download(self): self.completed = 0 while self.completed < 100: self.completed += 0.00001 self.progress.setValue(self.completed) def home(self): btn = QtGui.QPushButton("Quit", self) btn.resize(100, 100) btn.move(100, 100) btn.clicked.connect(self.close_application) self.show() def close_application(self): self.setWindowTitle("QUit") sys.exit() def main(): app = QtGui.QApplication(sys.argv) GUI = Window() sys.exit(app.exec_()) main()
nilq/baby-python
python
import os import torch import shutil import pickle import numpy as np from tqdm import tqdm from pathlib import Path from torch.utils.data import Dataset class P3B3(Dataset): """P3B3 Synthetic Dataset. Args: root: str Root directory of dataset where CANDLE loads P3B3 data. partition: str dataset partition to be loaded. Must be either 'train' or 'test'. """ training_data_file = 'train_X.npy' training_label_file = 'train_Y.npy' test_data_file = 'test_X.npy' test_label_file = 'test_Y.npy' def __init__(self, root, partition, subsite=True, laterality=True, behavior=True, grade=True, transform=None, target_transform=None): self.root = root self.partition = partition self.transform = transform self.target_transform = target_transform self.subsite = subsite self.laterality = laterality self.behavior = behavior self.grade = grade if self.partition == 'train': data_file = self.training_data_file label_file = self.training_label_file elif self.partition == 'test': data_file = self.test_data_file label_file = self.test_label_file else: raise ValueError("Partition must either be 'train' or 'test'.") self.data = np.load(os.path.join(self.root, data_file)) self.targets = self.get_targets(label_file) def __repr__(self): fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) tmp = self.partition fmt_str += ' Split: {}\n'.format(tmp) fmt_str += ' Root Location: {}\n'.format(self.root) return fmt_str def __len__(self): return len(self.data) def load_data(self): return self.data, self.targets def get_targets(self, label_file): """Get dictionary of targets specified by user.""" targets = np.load(os.path.join(self.root, label_file)) tasks = {} if self.subsite: tasks['subsite'] = targets[:, 0] if self.laterality: tasks['laterality'] = targets[:, 1] if self.behavior: tasks['behavior'] = targets[:, 2] if self.grade: tasks['grade'] = targets[:, 3] return tasks def __getitem__(self, idx): """ Parameters ---------- index : int Index of the data to be loaded. Returns ------- (document, target) : tuple where target is index of the target class. """ document = self.data[idx] if self.transform is not None: document = self.transform(document) targets = {} for key, value in self.targets.items(): subset = value[idx] if self.target_transform is not None: subset = self.target_transform(subset) targets[key] = subset return document, targets class Vocabulary: def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 return self.word2idx[word] def __len__(self): return len(self.idx2word) class Tokenizer: def __init__(self, train, valid): self.vocab = Vocabulary() self.train = self.tokenize(train) self.valid = self.tokenize(valid) self.inverse_tokenize() def tokenize(self, data): """Tokenize a dataset""" # Build the vocabulary for doc in tqdm(data): for token in doc: self.vocab.add_word(token) # Tokenize idss = [] for doc in data: ids = [] for token in doc: ids.append(self.vocab.word2idx[token]) idss.append(torch.tensor(ids).type(torch.int64)) return torch.stack(idss) def inverse_tokenize(self): self.vocab.inverse = {v: k for k, v in self.vocab.word2idx.items()} class Egress(Dataset): r"""Static split from HJ's data handler Targets have six classes, with the following number of classes: site: 70, subsite: 325, laterality: 7, histology: 575, behaviour: 4, grade: 9 Args: root: path to store the data split: Split to load. Either 'train' or 'valid' """ store = Path('/gpfs/alpine/proj-shared/med107/NCI_Data/yngtodd/dat.pickle') def __init__(self, root, split): self._check_split(split) self._check_download(root) self._load_data(split) self._load_vocab() def __repr__(self): return f"Egress(root={self.root}, split={self.split})" def _check_split(self, split): assert split in ["train", "valid"], \ f"Split must be in {'train', 'valid'}, got {split}" self.split = split def _check_download(self, root): self.root = Path(root) if not self.root.exists(): self._download() def _download(self): raw = self.root.joinpath("raw") raw.mkdir(parents=True) raw_data = raw.joinpath("raw.pickle") shutil.copy(self.store, raw_data) self._preprocess(raw_data) def _preprocess(self, raw_data): print(f"Preprocessing data...") self._make_processed_dirs() with open(raw_data, 'rb') as f: x_train = np.flip(pickle.load(f), 1) y_train = pickle.load(f) x_valid = np.flip(pickle.load(f), 1) y_valid = pickle.load(f) corpus = Tokenizer(x_train, x_valid) self.num_vocab = len(corpus.vocab) self._save_split('train', corpus.train, y_train) self._save_split('valid', corpus.valid, y_valid) self._save_vocab(corpus.vocab) print(f"Done!") def _save_split(self, split, data, target): target = self._create_target(target) split_path = self.root.joinpath(f'processed/{split}') torch.save(data, split_path.joinpath('data.pt')) torch.save(target, split_path.joinpath('target.pt')) def _save_vocab(self, vocab): torch.save(vocab, self.root.joinpath("vocab.pt")) def _make_processed_dirs(self): processed = self.root.joinpath("processed") processed.joinpath("train").mkdir(parents=True) processed.joinpath("valid").mkdir() def _create_target(self, arry): r"""Convert target dictionary""" target = { 'site': arry[:, 0], 'subsite': arry[:, 1], 'laterality': arry[:, 2], 'histology': arry[:, 3], 'behaviour': arry[:, 4], 'grade': arry[:, 5] } return {task: torch.tensor(arry, dtype=torch.long) for task, arry in target.items()} def _load_data(self, split): split_path = self.root.joinpath(f'processed/{split}') self.data = torch.load(split_path.joinpath('data.pt')) self.target = torch.load(split_path.joinpath('target.pt')) def _load_vocab(self): self.vocab = torch.load(self.root.joinpath("vocab.pt")) self.num_vocab = len(self.vocab) def _index_target(self, idx): return {task: target[idx] for task, target in self.target.items()} def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx], self._index_target(idx)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Natural Convection heat transfer calculation based on Churchill and Chu correlation """ def Churchill_Chu(D, rhof, Prf, kf, betaf, alphaf, muf, Ts, Tinf): """ Natural Convection heat transfer calculation based on Churchill and Chu correlation :param D: [m] Pipe inside diameter :param rhof: [kg/m3] Fluid Density :param Prf: [-] Prandtl number :param kf: [W/(m K)] Thermal conductivity :param betaf: [1/K] Volumetric expansivity (beta) :param alphaf: [m^2/s] Thermal diffusivity :param muf: [Ns/m2] Fluid Dynamic viscosity :param Ts: [°C] Surface temperature :param Tinf: [°C] Fluid temperature :return hconv_out: [W/m^2] Convection heat transfer coefficient """ g = 9.81 # [m/s^2] gravitational acceleration RaD = max(g * betaf * rhof * abs(Ts - Tinf) * D ** 3 / (muf * alphaf), 1000) # [-] Rayleigh number NuD = (0.60 + 0.387 * RaD ** (1 / 6) / ((1 + (0.559 / Prf) ** (9 / 16)) ** (8 / 27))) ** 2 # [-] Nusselt number hconv_out = NuD * kf / D # [W/m^2] Convection heat transfer coefficient return hconv_out
nilq/baby-python
python
import functools import numpy as np import unittest from scipy.stats import kendalltau, pearsonr, spearmanr from sacrerouge.data import Metrics from sacrerouge.stats import convert_to_matrices, summary_level_corr, system_level_corr, global_corr, \ bootstrap_system_sample, bootstrap_input_sample, bootstrap_both_sample, bootstrap_ci, fisher_ci, corr_ci, \ random_bool_mask, permute_systems, permute_inputs, permute_both, bootstrap_diff_test, permutation_diff_test, \ williams_diff_test, corr_diff_test, bonferroni_partial_conjunction_pvalue_test class TestStats(unittest.TestCase): def test_convert_to_matrices(self): metrics_list = [ Metrics('1', 'A', 'peer', {'m1': 1, 'm2': 2, 'm3': 3}), Metrics('2', 'A', 'peer', {'m1': 4, 'm2': 5}), Metrics('1', 'B', 'peer', {'m1': 6, 'm2': 7, 'm3': 8}), Metrics('2', 'B', 'peer', {'m1': 9, 'm2': 10, 'm3': 11}), ] m1 = convert_to_matrices(metrics_list, 'm1') np.testing.assert_array_equal(m1, [[1, 4], [6, 9]]) m1, m2 = convert_to_matrices(metrics_list, 'm1', 'm2') np.testing.assert_array_equal(m1, [[1, 4], [6, 9]]) np.testing.assert_array_equal(m2, [[2, 5], [7, 10]]) m3 = convert_to_matrices(metrics_list, 'm3') np.testing.assert_array_equal(m3, [[3, np.nan], [8, 11]]) metrics_list = [ Metrics('1', 'A', 'peer', {'m1': 1, 'm2': 2}), Metrics('2', 'A', 'peer', {'m1': 4, 'm2': 5}), Metrics('1', 'B', 'peer', {'m1': 6, 'm2': 7}), Metrics('3', 'B', 'peer', {'m1': 2, 'm2': 9}), ] m1 = convert_to_matrices(metrics_list, 'm1') np.testing.assert_array_equal(m1, [[1, 4, np.nan], [6, np.nan, 2]]) def test_summary_level_corr(self): # This will end up skipping the last column because the scores are identical, # so the correlation is NaN X = np.array([ [1, 9, 2], [4, 5, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, 13], [14, 15, 16], [17, 18, 19] ]) r = summary_level_corr(pearsonr, X, Y) self.assertAlmostEqual(r, 0.2466996339, places=4) X = np.array([ [1, 2], [1, 2], ]) Y = np.array([ [11, 12], [14, 15], ]) # This shouldn't have any correlations because both are NaN assert summary_level_corr(pearsonr, X, Y) is None X = np.array([ [1, 9, 2], [np.nan, 5, 4], [6, 7, 7] ]) Y = np.array([ [11, 12, 13], [np.nan, 15, 16], [17, 18, 19] ]) self.assertAlmostEqual(summary_level_corr(pearsonr, X, Y), 0.4977997559) # Fails because they do not have parallel nans X = np.array([ [1, 9, 2], [4, np.nan, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, np.nan], [14, 15, 16], [17, 18, 19] ]) with self.assertRaises(Exception): summary_level_corr(pearsonr, X, Y) def test_system_level_corr(self): X = np.array([ [1, 9, 2], [4, 5, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, 13], [14, 15, 16], [17, 18, 19] ]) r = system_level_corr(pearsonr, X, Y) self.assertAlmostEqual(r, 0.7205766921, places=4) r, pvalue = system_level_corr(pearsonr, X, Y, return_pvalue=True) self.assertAlmostEqual(r, 0.7205766921, places=4) self.assertAlmostEqual(pvalue, 0.48775429164459994, places=4) X = np.array([ [1, 9, 2], [4, 5, np.nan], [6, np.nan, 2] ]) Y = np.array([ [11, 12, 13], [14, 15, np.nan], [17, np.nan, 19] ]) r = system_level_corr(pearsonr, X, Y) self.assertAlmostEqual(r, -0.09578262852, places=4) r, pvalue = system_level_corr(pearsonr, X, Y, return_pvalue=True) self.assertAlmostEqual(r, -0.09578262852, places=4) self.assertAlmostEqual(pvalue, 0.938929260614949, places=4) X = np.array([ [1, 2], [1, 2], ]) Y = np.array([ [11, 12], [14, 15], ]) # This shouldn't have any correlations because the average of X is all the same assert system_level_corr(pearsonr, X, Y) is None assert system_level_corr(pearsonr, X, Y, return_pvalue=True) == (None, None) # Fails because they do not have parallel nans X = np.array([ [1, 9, 2], [4, np.nan, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, np.nan], [14, 15, 16], [17, 18, 19] ]) with self.assertRaises(Exception): system_level_corr(pearsonr, X, Y) def test_global_corr(self): X = np.array([ [1, 9, 2], [4, 5, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, 13], [14, 15, 16], [17, 18, 19] ]) r = global_corr(pearsonr, X, Y) self.assertAlmostEqual(r, 0.06691496051, places=4) r, pvalue = global_corr(pearsonr, X, Y, return_pvalue=True) self.assertAlmostEqual(r, 0.06691496051, places=4) self.assertAlmostEqual(pvalue, 0.8641895868792804, places=4) X = np.array([ [1, 9, 2], [np.nan, 5, 2], [6, 7, np.nan] ]) Y = np.array([ [11, 12, 13], [np.nan, 15, 16], [17, 18, np.nan] ]) r = global_corr(pearsonr, X, Y) self.assertAlmostEqual(r, 0.2897249422, places=4) r, pvalue = global_corr(pearsonr, X, Y, return_pvalue=True) self.assertAlmostEqual(r, 0.2897249422, places=4) self.assertAlmostEqual(pvalue, 0.5285282548518477, places=4) X = np.array([ [1, 1], [1, 1], ]) Y = np.array([ [11, 12], [14, 15], ]) # This shouldn't have any correlations because X is identical assert global_corr(pearsonr, X, Y) is None assert global_corr(pearsonr, X, Y, return_pvalue=True) == (None, None) # Fails because they do not have parallel nans X = np.array([ [1, 9, 2], [4, np.nan, 2], [6, 7, 2] ]) Y = np.array([ [11, 12, np.nan], [14, 15, 16], [17, 18, 19] ]) with self.assertRaises(Exception): global_corr(pearsonr, X, Y) def test_bootstrap_system_sample(self): A = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) B = np.array([ [13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24] ]) # We check what sample should be taken with this random seed np.random.seed(4) np.testing.assert_array_equal(np.random.choice(3, 3, replace=True), [2, 2, 1]) np.random.seed(4) A_s = bootstrap_system_sample(A) np.testing.assert_array_equal(A_s, [[9, 10, 11, 12], [9, 10, 11, 12], [5, 6, 7, 8]]) np.random.seed(4) A_s, B_s = bootstrap_system_sample(A, B) np.testing.assert_array_equal(A_s, [[9, 10, 11, 12], [9, 10, 11, 12], [5, 6, 7, 8]]) np.testing.assert_array_equal(B_s, [[21, 22, 23, 24], [21, 22, 23, 24], [17, 18, 19, 20]]) def test_bootstrap_input_sample(self): A = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) B = np.array([ [13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24] ]) # We check what sample should be taken with this random seed np.random.seed(4) np.testing.assert_array_equal(np.random.choice(4, 4, replace=True), [2, 2, 3, 1]) np.random.seed(4) A_s = bootstrap_input_sample(A) np.testing.assert_array_equal(A_s, [[3, 3, 4, 2], [7, 7, 8, 6], [11, 11, 12, 10]]) np.random.seed(4) A_s, B_s = bootstrap_input_sample(A, B) np.testing.assert_array_equal(A_s, [[3, 3, 4, 2], [7, 7, 8, 6], [11, 11, 12, 10]]) np.testing.assert_array_equal(B_s, [[15, 15, 16, 14], [19, 19, 20, 18], [23, 23, 24, 22]]) def test_bootstrap_both_sample(self): A = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) B = np.array([ [13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24] ]) # We check what sample should be taken with this random seed np.random.seed(4) np.testing.assert_array_equal(np.random.choice(3, 3, replace=True), [2, 2, 1]) np.testing.assert_array_equal(np.random.choice(4, 4, replace=True), [1, 0, 3, 0]) np.random.seed(4) A_s = bootstrap_both_sample(A) np.testing.assert_array_equal(A_s, [[10, 9, 12, 9], [10, 9, 12, 9], [6, 5, 8, 5]]) np.random.seed(4) A_s, B_s = bootstrap_both_sample(A, B) np.testing.assert_array_equal(A_s, [[10, 9, 12, 9], [10, 9, 12, 9], [6, 5, 8, 5]]) np.testing.assert_array_equal(B_s, [[22, 21, 24, 21], [22, 21, 24, 21], [18, 17, 20, 17]]) def test_bootstrap_ci(self): # Regression test np.random.seed(3) X = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) Y = np.array([ [5, 2, 7], [1, 7, 3], [4, 2, 2] ]) corr_func = functools.partial(global_corr, pearsonr) lower, upper = bootstrap_ci(corr_func, X, Y, bootstrap_system_sample) self.assertAlmostEqual(lower, -0.8660254037844388, places=4) self.assertAlmostEqual(upper, 0.39735970711951324, places=4) lower, upper = bootstrap_ci(corr_func, X, Y, bootstrap_system_sample, alpha=0.1) self.assertAlmostEqual(lower, -0.5773502691896258, places=4) self.assertAlmostEqual(upper, 0.32732683535398865, places=4) lower, upper = bootstrap_ci(corr_func, X, Y, bootstrap_input_sample) self.assertAlmostEqual(lower, -0.9449111825230679, places=4) self.assertAlmostEqual(upper, 0.0, places=4) lower, upper = bootstrap_ci(corr_func, X, Y, bootstrap_both_sample) self.assertAlmostEqual(lower, -1.0, places=4) self.assertAlmostEqual(upper, 1.0, places=4) def test_fisher_ci(self): pearson_global = functools.partial(global_corr, pearsonr) spearman_global = functools.partial(global_corr, spearmanr) kendall_global = functools.partial(global_corr, kendalltau) pearson_system = functools.partial(system_level_corr, pearsonr) spearman_system = functools.partial(system_level_corr, spearmanr) kendall_system = functools.partial(system_level_corr, kendalltau) pearson_summary = functools.partial(summary_level_corr, pearsonr) spearman_summary = functools.partial(summary_level_corr, spearmanr) kendall_summary = functools.partial(summary_level_corr, kendalltau) np.random.seed(12) X = np.random.rand(5, 7) Y = np.random.rand(5, 7) self.assertAlmostEqual(fisher_ci(pearson_global, X, Y), (-0.02763744135012373, 0.5818846438651135), places=4) self.assertAlmostEqual(fisher_ci(spearman_global, X, Y), (-0.06733469087453943, 0.5640758668009686), places=4) self.assertAlmostEqual(fisher_ci(kendall_global, X, Y), (-0.029964677270600665, 0.4098565164085108), places=4) self.assertAlmostEqual(fisher_ci(pearson_system, X, Y), (-0.6445648014599665, 0.9644395142168088), places=4) self.assertAlmostEqual(fisher_ci(spearman_system, X, Y), (-0.6708734441360908, 0.9756771001362685), places=4) self.assertAlmostEqual(fisher_ci(kendall_system, X, Y), (-0.7023910748254728, 0.9377789575997956), places=4) self.assertAlmostEqual(fisher_ci(pearson_summary, X, Y), (-0.808376631595968, 0.9287863878043723), places=4) self.assertAlmostEqual(fisher_ci(spearman_summary, X, Y), (-0.7262127280589684, 0.9653646507719408), places=4) self.assertAlmostEqual(fisher_ci(kendall_summary, X, Y), (-0.684486849088761, 0.9418063314024349), places=4) def test_corr_ci(self): # Regression test np.random.seed(3) X = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) Y = np.array([ [5, 2, 7], [1, 7, 3], [4, 2, 2] ]) corr_func = functools.partial(global_corr, pearsonr) # Make sure we get the same result going through bootstrap_ci and corr_ci expected_lower, expected_upper = bootstrap_ci(corr_func, X, Y, bootstrap_system_sample) lower, upper = corr_ci(corr_func, X, Y, 'bootstrap-system') self.assertAlmostEqual(lower, expected_lower, places=4) self.assertAlmostEqual(upper, expected_upper, places=4) expected_lower, expected_upper = bootstrap_ci(corr_func, X, Y, bootstrap_input_sample) lower, upper = corr_ci(corr_func, X, Y, 'bootstrap-input') self.assertAlmostEqual(lower, expected_lower, places=4) self.assertAlmostEqual(upper, expected_upper, places=4) expected_lower, expected_upper = bootstrap_ci(corr_func, X, Y, bootstrap_both_sample) lower, upper = corr_ci(corr_func, X, Y, 'bootstrap-both') self.assertAlmostEqual(lower, expected_lower, places=4) self.assertAlmostEqual(upper, expected_upper, places=4) # If we do a single tail, the result should be the same with alpha / 2 expected_lower, expected_upper = bootstrap_ci(corr_func, X, Y, bootstrap_system_sample) lower, upper = corr_ci(corr_func, X, Y, 'bootstrap-system', alpha=0.025, two_tailed=False) self.assertAlmostEqual(lower, expected_lower, places=4) self.assertAlmostEqual(upper, expected_upper, places=4) # None cases assert corr_ci(corr_func, X, Y, None) == (None, None) assert corr_ci(corr_func, X, Y, 'none') == (None, None) with self.assertRaises(Exception): corr_ci(corr_func, X, Y, 'does-not-exist') def test_random_bool_mask(self): np.random.seed(7) expected_rand = [ [0.07630829, 0.77991879, 0.43840923, 0.72346518], [0.97798951, 0.53849587, 0.50112046, 0.07205113], [0.26843898, 0.4998825, 0.67923, 0.80373904] ] np.testing.assert_array_almost_equal(np.random.rand(3, 4), expected_rand) np.random.seed(7) expected_mask = [ [False, True, False, True], [True, True, True, False], [False, False, True, True] ] mask = random_bool_mask(3, 4) np.testing.assert_array_equal(mask, expected_mask) def test_permute_systems(self): X = np.arange(1, 13).reshape(3, 4) Y = -np.arange(1, 13).reshape(3, 4) np.random.seed(7) expected_mask = [[False], [True], [False]] mask = random_bool_mask(3, 1) np.testing.assert_array_equal(mask, expected_mask) np.random.seed(7) expected_X = [ [1, 2, 3, 4], [-5, -6, -7, -8], [9, 10, 11, 12] ] expected_Y = [ [-1, -2, -3, -4], [5, 6, 7, 8], [-9, -10, -11, -12] ] X_p, Y_p = permute_systems(X, Y) np.testing.assert_array_equal(X_p, expected_X) np.testing.assert_array_equal(Y_p, expected_Y) np.testing.assert_array_equal(X, np.arange(1, 13).reshape(3, 4)) np.testing.assert_array_equal(Y, -np.arange(1, 13).reshape(3, 4)) def test_permute_inputs(self): X = np.arange(1, 13).reshape(3, 4) Y = -np.arange(1, 13).reshape(3, 4) np.random.seed(7) expected_mask = [[False, True, False, True]] mask = random_bool_mask(1, 4) np.testing.assert_array_equal(mask, expected_mask) np.random.seed(7) expected_X = [ [1, -2, 3, -4], [5, -6, 7, -8], [9, -10, 11, -12] ] expected_Y = [ [-1, 2, -3, 4], [-5, 6, -7, 8], [-9, 10, -11, 12] ] X_p, Y_p = permute_inputs(X, Y) np.testing.assert_array_equal(X_p, expected_X) np.testing.assert_array_equal(Y_p, expected_Y) np.testing.assert_array_equal(X, np.arange(1, 13).reshape(3, 4)) np.testing.assert_array_equal(Y, -np.arange(1, 13).reshape(3, 4)) def test_permute_both(self): X = np.arange(1, 13).reshape(3, 4) Y = -np.arange(1, 13).reshape(3, 4) np.random.seed(7) expected_mask = [ [False, True, False, True], [True, True, True, False], [False, False, True, True] ] mask = random_bool_mask(3, 4) np.testing.assert_array_equal(mask, expected_mask) # The True values should swap and the original matrices should be unchanged np.random.seed(7) expected_X = [ [1, -2, 3, -4], [-5, -6, -7, 8], [9, 10, -11, -12] ] expected_Y = [ [-1, 2, -3, 4], [5, 6, 7, -8], [-9, -10, 11, 12] ] X_p, Y_p = permute_both(X, Y) np.testing.assert_array_equal(X_p, expected_X) np.testing.assert_array_equal(Y_p, expected_Y) np.testing.assert_array_equal(X, np.arange(1, 13).reshape(3, 4)) np.testing.assert_array_equal(Y, -np.arange(1, 13).reshape(3, 4)) def test_bootstrap_diff_test(self): # Regression test np.random.seed(12) X = np.random.random((9, 5)) Y = np.random.random((9, 5)) Z = np.random.random((9, 5)) corr_func = functools.partial(global_corr, pearsonr) np.random.seed(2) assert bootstrap_diff_test(corr_func, X, Y, Z, bootstrap_system_sample, False) == 0.958 np.random.seed(2) assert bootstrap_diff_test(corr_func, Y, X, Z, bootstrap_system_sample, False) == 0.042 def test_permutation_diff_test(self): # Regression test np.random.seed(12) X = np.random.random((9, 5)) Y = np.random.random((9, 5)) Z = np.random.random((9, 5)) corr_func = functools.partial(global_corr, pearsonr) np.random.seed(2) self.assertAlmostEqual(permutation_diff_test(corr_func, X, Y, Z, permute_both, False), 0.97002997002997, places=4) np.random.seed(2) self.assertAlmostEqual(permutation_diff_test(corr_func, Y, X, Z, permute_both, False), 0.030969030969030968, places=4) def test_williams_diff_test(self): # This test verifies that the output is the same as the psych package for # several different randomly generated inputs N, M = 9, 5 corr_func = functools.partial(global_corr, pearsonr) np.random.seed(12) X = np.random.random((N, M)) Y = np.random.random((N, M)) Z = np.random.random((N, M)) # These are used as input to r.test # effective_N = N * M # r12 = corr_func(X, Z) # r13 = corr_func(Y, Z) # r23 = corr_func(X, Y) # One tail expected_pvalue = 0.2716978 actual_pvalue = williams_diff_test(corr_func, X, Y, Z, False) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) # The opposite order should produce 1-0.2716978. r.test does not do this and # will return 0.2716978 because it assumes that r12 > r13. actual_pvalue = williams_diff_test(corr_func, Y, X, Z, False) self.assertAlmostEqual(1.0 - expected_pvalue, actual_pvalue, places=5) # Two tails expected_pvalue = 0.5433956 actual_pvalue = williams_diff_test(corr_func, X, Y, Z, True) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) # Should not matter the order for two tails actual_pvalue = williams_diff_test(corr_func, Y, X, Z, True) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) X = np.random.random((N, M)) Y = np.random.random((N, M)) Z = np.random.random((N, M)) corr_func = functools.partial(system_level_corr, spearmanr) # These are used as input to r.test # effective_N = N # r12 = corr_func(X, Z) # r13 = corr_func(Y, Z) # r23 = corr_func(X, Y) # One tail # Since r12 < r13, r.test will only replicate this result with the reversed input order expected_pvalue = 0.4658712 actual_pvalue = williams_diff_test(corr_func, Y, X, Z, False) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) # r.test would return the same result here, but we return 1.0 - expected actual_pvalue = williams_diff_test(corr_func, X, Y, Z, False) self.assertAlmostEqual(1.0 - expected_pvalue, actual_pvalue, places=5) # Two tails expected_pvalue = 0.9317423 actual_pvalue = williams_diff_test(corr_func, X, Y, Z, True) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) # Order doesn't matter actual_pvalue = williams_diff_test(corr_func, Y, X, Z, True) self.assertAlmostEqual(expected_pvalue, actual_pvalue, places=5) def test_corr_diff_test(self): # Regression test np.random.seed(12) X = np.random.random((20, 10)) Y = np.random.random((20, 10)) Z = np.random.random((20, 10)) corr_func = functools.partial(global_corr, pearsonr) # Ensure it's the same result going through bootstrap_diff_test and corr_diff_test np.random.seed(2) expected = bootstrap_diff_test(corr_func, X, Y, Z, bootstrap_system_sample, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'bootstrap-system', False) == expected np.random.seed(2) expected = bootstrap_diff_test(corr_func, X, Y, Z, bootstrap_input_sample, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'bootstrap-input', False) == expected np.random.seed(2) expected = bootstrap_diff_test(corr_func, X, Y, Z, bootstrap_both_sample, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'bootstrap-both', False) == expected # Ensure it's the same result going through permutation_diff_test and corr_diff_test np.random.seed(2) expected = permutation_diff_test(corr_func, X, Y, Z, permute_systems, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'permutation-system', False) == expected np.random.seed(2) expected = permutation_diff_test(corr_func, X, Y, Z, permute_inputs, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'permutation-input', False) == expected np.random.seed(2) expected = permutation_diff_test(corr_func, X, Y, Z, permute_both, False) np.random.seed(2) assert corr_diff_test(corr_func, X, Y, Z, 'permutation-both', False) == expected # None cases assert corr_diff_test(corr_func, X, Y, Z, 'none', False) is None assert corr_diff_test(corr_func, X, Y, Z, None, False) is None with self.assertRaises(Exception): corr_diff_test(corr_func, X, Y, Z, 'does-not-exist', False) def test_bonferroni_partial_conjunction_pvalue_test(self): # Tests against https://github.com/rtmdrr/replicability-analysis-NLP/blob/master/Replicability_Analysis.py pvalues = [0.168, 0.297, 0.357, 0.019, 0.218, 0.001] assert bonferroni_partial_conjunction_pvalue_test(pvalues, alpha=0.05) == (1, [5]) assert bonferroni_partial_conjunction_pvalue_test(pvalues, alpha=0.10) == (2, [5, 3]) assert bonferroni_partial_conjunction_pvalue_test(pvalues, alpha=0.70) == (6, [5, 3, 0, 4, 1, 2])
nilq/baby-python
python
from argparse import ArgumentParser def parse_args(): parser = ArgumentParser(description="An auto downloader and uploader for TikTok videos.") parser.add_argument("user") parser.add_argument( "--no-delete", action="store_false", help="don't delete files when done" ) parser.add_argument( "--hashtag", action="store_true", help="download hashtag instead of username" ) parser.add_argument( "--limit", help="set limit on amount of TikToks to download" ) parser.add_argument( "--use-download-archive", action="store_true", help=( "record the video url to the download archive. " "This will download only videos not listed in the archive file. " "Record the IDs of all downloaded videos in it." ), ) parser.add_argument( "--id", action="store_true", help="download this video ID" ) parser.add_argument( "--liked", action="store_true", help="download the user's liked posts" ) args = parser.parse_args() return args
nilq/baby-python
python
from random import randint from lotto import getLotto from wc.wc import WC def getBoard(width=5, height=5, extra=75 - 5 * 5): lotto = getLotto(width, height, extra) board = [[lotto.draw() for _ in range(width)] for __ in range(height)] # TODO free spaces return Board(board, width, height) def transpose(board): return Board(WC.transpose(board.board), board.height, board.width) def reverse(board): return Board(WC.reverse(board.board), board.width, board.height) class Board(object): FREE_SPACE = -1 def __init__(self, board, width, height): self.board = board self.width = width self.height = height #def __str__(self): #return str(self.board) #return "\n".join((" ".join(("%3s" % (x if x is not Board.FREE_SPACE else 'X') for x in row)) for row in self.board)) def toString(self, isSelected): s = "\n".join((" ".join(("%3s" % (x if not isSelected(x) else 'X') for x in row)) for row in self.board)) print(s) return s #def __iter__(self): return iter(self.board)
nilq/baby-python
python
from infra.controllers.contracts.http import HttpRequest from cerberus import Validator from infra.controllers.validators.ports import CerberusErrors, PayloadValidator from utils.result import Error, Ok, Result class AddNewDebtValidator(PayloadValidator): def __init__(self) -> None: self.schema = { 'description': {'type': 'string', 'required': True}, 'part_value': {'type': 'number', 'required': True}, 'total_parts': {'type': 'integer', 'required': True}, 'paid_parts': {'type': 'integer', 'required': True}, 'start_date': {'type': 'dict', 'required': True, 'schema': { 'month': { 'type': 'integer', 'required': True }, 'year': { 'type': 'integer', 'required': True } }}, } self.validator = Validator(self.schema) def validate(self, http_request: HttpRequest) -> Result[HttpRequest, CerberusErrors]: is_valid = self.validator.validate(http_request.body) if not is_valid: return Error(self.validator.errors) return Ok(http_request)
nilq/baby-python
python
# Generated by Django 2.2.13 on 2020-10-27 04:49 from django.db import migrations import wagtail.core.blocks import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ("navigation", "0002_remove_pri_sec_footer_navs"), ] operations = [ migrations.AddField( model_name="navigationsettings", name="footer_columns", field=wagtail.core.fields.StreamField( [ ( "column", wagtail.core.blocks.StructBlock( [ ( "heading", wagtail.core.blocks.CharBlock( help_text="Leave blank if no header required.", required=False, ), ), ( "content", wagtail.core.blocks.RichTextBlock( features=[ "bold", "italic", "ol", "ul", "link", "document-link", ] ), ), ] ), ) ], default="", ), preserve_default=False, ), ]
nilq/baby-python
python
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # Zookeeper based fetch synchronizer import abc import logging from typing import Callable, List, Optional, Tuple from bai_kafka_utils.events import DownloadableContent, BenchmarkEvent, FetcherStatus, ContentSizeInfo from bai_kafka_utils.utils import md5sum from bai_zk_utils.states import FetcherResult from bai_zk_utils.zk_locker import RWLockManager, RWLock from kazoo.client import KazooClient from kazoo.exceptions import NoNodeError, BadVersionError from kazoo.protocol.states import WatchedEvent, EventType from preflight.estimator import estimate_fetch_size class DownloadDispatcher(metaclass=abc.ABCMeta): @abc.abstractmethod def dispatch_fetch(self, task: DownloadableContent, event: BenchmarkEvent, zk_node_path: str): pass @abc.abstractmethod def cancel_all(self, client_id: str, action_id: str = None): pass @abc.abstractmethod def cleanup(self, task: DownloadableContent, event: BenchmarkEvent): pass # client_id/action_id/dowloadable_content NodePathSource = Callable[[str, Optional[str], Optional[DownloadableContent]], str] DownloadOnDone = Callable[[DownloadableContent], None] ContentSizeEstimator = Callable[[str], ContentSizeInfo] logger = logging.getLogger(__name__) def get_lock_name(content: DownloadableContent) -> str: return md5sum(content.src) class DownloadManager: @staticmethod def __get_node_path(client_id: str, action_id: str = None, content: DownloadableContent = None) -> str: # MD5 has impact on the node - so different locks etc. path = f"/downloads/{client_id}" if action_id: path += f"/{action_id}" if content: path += f"/{md5sum(str(content))}" return path INITIAL_DATA = FetcherResult(FetcherStatus.PENDING).to_binary() @staticmethod def _set_failed(content: DownloadableContent, message: str): content.message = message content.status = FetcherStatus.FAILED content.dst = None def __init__( self, zk: KazooClient, download_dispatcher: DownloadDispatcher, lock_manager: RWLockManager, get_node_path: NodePathSource = None, size_estimator: ContentSizeEstimator = None, ): self._zk = zk self._download_dispatcher = download_dispatcher self._get_node_path = get_node_path or DownloadManager.__get_node_path self._lock_manager = lock_manager self._size_estimator = size_estimator or estimate_fetch_size def start(self) -> None: logger.info("Start") self._zk.start() def fetch(self, content: DownloadableContent, event: BenchmarkEvent, on_done: DownloadOnDone) -> None: logger.info("Fetch request %s", content) def on_content_locked(content: DownloadableContent, lock: RWLock): def _on_done_and_unlock(content: DownloadableContent): on_done(content) self._download_dispatcher.cleanup(content, event) lock.release() try: content.size_info = self._size_estimator(content.src) except Exception as e: msg = f"Failed to estimate the size of content {content.src}: {str(e)}" logger.exception(f"{msg}") FetcherResult(FetcherStatus.FAILED, None, msg).update(content) on_done(content) lock.release() return # This node will be killed if I die zk_node_path = self._get_node_path(event.client_id, event.action_id, content) self._zk.create(zk_node_path, DownloadManager.INITIAL_DATA, ephemeral=True, makepath=True) self.__handle_node_state(zk_node_path, _on_done_and_unlock, content) content.size_info = self._size_estimator(content.src) self._download_dispatcher.dispatch_fetch(content, event, zk_node_path) self._lock_manager.acquire_write_lock(content, on_content_locked) def __on_zk_changed(self, event: WatchedEvent, on_done: DownloadOnDone, content: DownloadableContent): if event.type == EventType.DELETED: if not content.status: # Something not final - and deleted??? logger.error("Deleted node %s for the not finalized content %s", event.path, content) # TODO More sophisticated handling of that? return self.__handle_node_state(event.path, on_done, content) def __handle_node_state(self, zk_node_path: str, on_done: DownloadOnDone, content: DownloadableContent): def _on_zk_changed(evt): self.__on_zk_changed(evt, on_done, content) data, _ = self._zk.get(zk_node_path, _on_zk_changed) result: FetcherResult = FetcherResult.from_binary(data) logger.info("Fetch request %s result = %s", content, result) if result.status.final: result.update(content) # We clean up self._zk.delete(zk_node_path) on_done(content) def stop(self) -> None: logger.info("Stop") self._zk.stop() def cancel(self, client_id: str, action_id: str) -> Tuple[List[str], int]: logger.info(f"Canceling action {client_id}/{action_id}") return ( self._download_dispatcher.cancel_all(client_id, action_id), self._update_nodes_to_cancel(client_id, action_id), ) def _update_nodes_to_cancel(self, client_id: str, action_id: str) -> int: # As always with stop-flags, we can face a bunch of race conditions zk_node_path = self._get_node_path(client_id, action_id) number_of_nodes_updated = 0 try: for child in self._zk.get_children(zk_node_path): abs_path = zk_node_path + "/" + child logger.info(f"Updating node {abs_path}") try: while True: data, zk_stat = self._zk.get(abs_path) result: FetcherResult = FetcherResult.from_binary(data) # The guy is final - it will not take long for us to cancel it. # The job is finished. # So now we are in a race with a zookeeper listener, that will pass the results downstream. if result.status.final: logger.info(f"{abs_path}: not to be canceled - already finished") break result.status = FetcherStatus.CANCELED new_data = result.to_binary() try: self._zk.set(abs_path, new_data, version=zk_stat.version) number_of_nodes_updated = number_of_nodes_updated + 1 except BadVersionError: logger.info(f"{abs_path}: the node was updated meanwhile") continue logger.info(f"{abs_path}: canceled") break except NoNodeError: logger.info(f"{abs_path}: the node was deleted meanwhile") # The task was just finished - status was repopted to customer and the node got deleted. # OK. It's not our deal anymore continue except NoNodeError: # Absorb NoNodeError logger.info(f"{zk_node_path}: node not found") return number_of_nodes_updated
nilq/baby-python
python
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2018, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import unittest import tempfile import os.path import numpy as np import pandas as pd from pandas.testing import assert_frame_equal import qiime2 from q2_feature_table import heatmap from q2_feature_table._heatmap._visualizer import _munge_metadata class TestHeatmap(unittest.TestCase): def setUp(self): self.table = pd.DataFrame(data=[[0, 10], [10, 12], [10, 11]], columns=['O1', 'O2'], index=['S1', 'S2', 'S3']) self.output_dir_obj = tempfile.TemporaryDirectory( prefix='q2-feature-table-test-temp-') self.output_dir = self.output_dir_obj.name def tearDown(self): self.output_dir_obj.cleanup() def assertBasicVizValidity(self, viz_dir, normalize=True): index_fp = os.path.join(viz_dir, 'index.html') self.assertTrue(os.path.exists(index_fp)) with open(index_fp) as fh: index_html = fh.read() normalize_str = '(normalized)' if normalize else '(not normalized)' self.assertTrue(normalize_str in index_html) for ext in ['png', 'svg']: fp = os.path.join(viz_dir, 'feature-table-heatmap.%s' % ext) self.assertTrue(os.path.exists(fp)) def test_defaults(self): heatmap(self.output_dir, self.table) self.assertBasicVizValidity(self.output_dir) def test_with_title(self): heatmap(self.output_dir, self.table, title='foo') self.assertBasicVizValidity(self.output_dir) def test_with_metadata(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'summer', 'russ'], name='pet', index=pd.Index(['S1', 'S2', 'S3'], name='id'))) heatmap(self.output_dir, self.table, metadata=md) self.assertBasicVizValidity(self.output_dir) def test_empty_table(self): empty_table = pd.DataFrame([], [], []) with self.assertRaisesRegex(ValueError, 'empty'): heatmap(self.output_dir, empty_table) def test_table_ids_are_subset_of_metadata_ids(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'russ'], name='pet', index=pd.Index(['S1', 'S3'], name='id'))) with self.assertRaisesRegex(ValueError, 'not present.*S2'): heatmap(self.output_dir, self.table, metadata=md) def test_extra_metadata_ids(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'summer', 'russ', 'peanut'], name='pet', index=pd.Index(['S1', 'S2', 'S3', 'S4'], name='id'))) heatmap(self.output_dir, self.table, metadata=md) self.assertBasicVizValidity(self.output_dir) def test_no_normalization(self): heatmap(self.output_dir, self.table, normalize=False) self.assertBasicVizValidity(self.output_dir, normalize=False) def test_no_sample_cluster(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'summer', 'russ'], name='pet', index=pd.Index(['S1', 'S2', 'S3'], name='id'))) heatmap(self.output_dir, self.table, metadata=md, cluster='features') self.assertBasicVizValidity(self.output_dir) class TestPrivateHelpers(unittest.TestCase): def setUp(self): self.table = pd.DataFrame(data=[[0, 10], [10, 12], [10, 11]], columns=['O1', 'O2'], index=['S1', 'S2', 'S3']) def test_munge_metadata_simple(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'russ', 'russ'], name='pet', index=pd.Index(['S1', 'S2', 'S3'], name='id'))) obs = _munge_metadata(md, self.table, 'both') exp_idx = pd.Index(['milo | S1', 'russ | S2', 'russ | S3'], name='pet | id') exp = pd.DataFrame([[0, 10], [10, 12], [10, 11]], columns=['O1', 'O2'], index=exp_idx) assert_frame_equal(exp, obs) def test_munge_metadata_ids_different_order(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['russ', 'milo', 'russ'], name='pet', index=pd.Index(['S2', 'S1', 'S3'], name='id'))) obs = _munge_metadata(md, self.table, 'both') exp_idx = pd.Index(['milo | S1', 'russ | S2', 'russ | S3'], name='pet | id') exp = pd.DataFrame([[0, 10], [10, 12], [10, 11]], columns=['O1', 'O2'], index=exp_idx) assert_frame_equal(exp, obs) def test_munge_metadata_missing_samples(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['milo', 'russ'], name='pet', index=pd.Index(['S1', 'S3'], name='id'))) with self.assertRaisesRegex(ValueError, 'not present.*S2'): _munge_metadata(md, self.table, 'both') def test_munge_metadata_empty_values(self): md = qiime2.CategoricalMetadataColumn( pd.Series([None, 'russ', np.nan], name='pet', index=pd.Index(['S1', 'S2', 'S3'], name='id'))) obs = _munge_metadata(md, self.table, 'both') exp_idx = pd.Index(['[No Value] | S1', 'russ | S2', '[No Value] | S3'], name='pet | id') exp = pd.DataFrame([[0, 10], [10, 12], [10, 11]], columns=['O1', 'O2'], index=exp_idx) assert_frame_equal(exp, obs) def test_munge_metadata_sort_samples(self): md = qiime2.CategoricalMetadataColumn( pd.Series(['peanut', 'milo', 'russ'], name='pet', index=pd.Index(['S1', 'S2', 'S3'], name='id'))) obs = _munge_metadata(md, self.table, 'features') exp_idx = pd.Index(['milo | S2', 'peanut | S1', 'russ | S3'], name='pet | id') exp = pd.DataFrame([[10, 12], [0, 10], [10, 11]], columns=['O1', 'O2'], index=exp_idx) assert_frame_equal(exp, obs) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Tue Oct 4 22:53:41 2016 @author: midhununnikrishnan """ import numpy as np import combinatorics as cb def sumofdigits(G,k=1)->int: """find + of digits """ su = 0 while G > 0: if k == 1: su += (G%10) else: su += (G%10)**k G //=10 return su _mrpt_num_trials = 10 # number of bases to test def is_probable_prime(n,numtrials=10): """ Miller-Rabin primality test. CODE PLAGIARIZED!!!! minor modification of the code in: https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test#Python A return value of False means n is certainly not prime. A return value of True means n is very likely a prime. """ _mrpt_num_trials = numtrials assert n >= 2 # special case 2,3 if n == 2 or n == 3: return True # ensure n is odd if n % 2 == 0: return False su = 0 t = n while t>0: su += t%10 t//=10 if su%3==0: return False # write n-1 as 2**s * d # repeatedly try to divide n-1 by 2 s = 0 d = n-1 while True: quotient, remainder = divmod(d, 2) if remainder == 1: break s += 1 d = quotient assert(2**s * d == n-1) # test the base a to see whether it is a witness for the compositeness of n def try_composite(a): if pow(a, d, n) == 1: return False for i in range(s): if pow(a, 2**i * d, n) == n-1: return False return True # n is definitely composite for i in range(_mrpt_num_trials): a = np.random.randint(2,n) if try_composite(a): return False return True # no base tested showed n as composite def sieve(lessthan:int=-1,numprimes:int=-1): """list of prime numbers using a simple Eratosthenes sieve numprimes := the number of consecutive primes from 2 to be computed lessthan := strict upper bound on the largest prime to be computed If both numprimes and lessthan are specified, lessthan is given precedence """ if numprimes < 1 and lessthan < 3: raise Exception('invalid specifications') if lessthan > 1e18: # your computer can easily crash for less raise Exception('are you trying to crash your computer?') q = np.zeros(lessthan+1) for j in range(2,(lessthan+1)//2): if q[j] == 0: for k in range(2,1+(lessthan-1)//j): q[int(k*j)] = 1 primes = [x for x in range(2,lessthan) if q[x]==0] return primes def isprime(N:int)->bool: """primality test """ if N > 1 and all( N%j for j in range(2,1+int(np.sqrt(N)))): return True else: return False def PrimeFactors(N): d = 2 factors = [] while N > 1: if N % d == 0: i=0 while N % d == 0: N /= d i+=1 factors.append((d,i)) d+=1 if d*d > N: if N > 1: factors.append((int(N),1)) break return factors class assistedPF: """ facility to efficiently factorize where multiple factorizations require to be done in sequence """ __Numprimes = 10 __sieve = [] __nbool = [] def __init__(self,N): self.__Numprimes = N self.__sieve = sieve(N) self.__nbool = [False]*N for i in self.__sieve: self.__nbool[i] = True def factorize(self,N): """ factorize w.r.t the primes constructed - prime factors p for p > N are not captured """ pfs = [] if self.__nbool[N]: return [(N,1)] for d in self.__sieve: i = 0 while N%d == 0: i += 1 N //= d if i>0: pfs.append((d,i)) if d > N: break return pfs def factorcombine(factors): prod = 1 for x in factors: prod *= x[0]**x[1] return prod def sumofFactors(N:int)->int: """ finds the sum of all proper divisors of N """ pf = PrimeFactors(N) prod = 1 for q in pf: prod *= (q[0]**(q[1]+1)-1)//(q[0]-1) return prod-N def gcd(a:int,b:int)->int: """ Euclid's algorithm for GCD of two integers """ if a<=0 or b<=0: raise Exception('only positive integers as input to gcd') while True: if a==b: return a elif b==1 or a==1: return 1 elif a>b: b = a-b a = a-b else: b = b-a a = b+a def coprime(N): """ Cheap generator to iterate across all coprime pairs of integers ordered by the product of the pair. Generates only pairs comprised of numbers whose product is below N. """ F = assistedPF(N) for i in range(1,N): P = F.factorize(i) for j in range(2**len(P)): bits = cb.int2list(2**len(P)+j,2)[1:] f1 = factorcombine([P[x] for x in range(len(P)) if bits[x]==0]) f2 = factorcombine([P[x] for x in range(len(P)) if bits[x]==1]) yield (f1,f2) def sqrtiter(N): """ generates an infinite iterator for the continued fraction coefficients of \sqrt{N}. i.e., ${a_0,a_1,a_2...}$ is yielded by this iterator where \sqrt{N} = a_0 + \frac{1}{a_1 + \frac{1}{a_2 + \dots}} """ b,c = 0,1 sqrt = np.sqrt(N) if int(sqrt)==sqrt: return 0 history = [] a = int((b+sqrt)/c) yield a while True: history.append((a,b,c)) b = c*a - b c = (N - b**2)/c assert int(c) == c assert c>0 a = int((b+sqrt)/c) yield a
nilq/baby-python
python
from lacore.adf.persist import make_adf from lacore.archive import restore_archive as _restore_archive from lacli.nice import with_low_priority from lacli.hash import HashIO def archive_handle(docs): h = HashIO() make_adf(docs, out=h) return h.getvalue().encode('hex') restore_archive = with_low_priority(_restore_archive)
nilq/baby-python
python
import pandas as pd def find_related_cols_by_name(dataframe_list, relationship_dict=None): # dataframe_list # List of pandas dataframe objects # # relationship_dict # This is an existing relationship_dict. If None, a new # relationship_dict should be created ### # Student code (create additional functions as necessary) ### # mock-up for demonstration - remove after development relationship_dict['airlines']['carrier']['relationships'] = [{'flights.carrier': {}}] relationship_dict['airports']['dest']['relationships'] = [{'flights.dest': {}}] relationship_dict['flights']['dest']['relationships'] = [{'airports.dest': {}}] relationship_dict['flights']['carrier']['relationships'] = [{'airlines.carrier': {}}] relationship_dict['flights']['flight_id']['relationships'] = [{'trip_logs.flight_id': {}}] relationship_dict['trip_logs']['flight_id']['relationships'] = [{'flights.flight_id': {}}] # return relationship structure return relationship_dict def find_related_cols_by_content(dataframe_list, relationship_dict=None): # dataframe_list # List of pandas dataframe objects # # relationship_dict # This is an existing relationship_dict. If None, a new # relationship_dict should be created ### # Student code (create additional functions as necessary) ### # return relationship structure return relationship_dict def find_parent_child_relationships(dataframe_list, relationship_dict, hints=None): # dataframe_list # List of pandas dataframe objects # # relationship_dict # And existing relationship_dict is required # # hints # Structure containing hints in cases where the data is ambiguous such # as when two columns are related and appear to be primary key candidates # in both tables. Format is: # [{parent table.column: child table.column}, ...] ### # Student code (create additional functions as necessary) ### # mock-up for demonstration - remove after development relationship_dict['airlines']['carrier']['relationships'] = [{'flights.carrier': {'type': 'Parent'}}] relationship_dict['airports']['dest']['relationships'] = [{'flights.dest': {'type': 'Parent'}}] relationship_dict['flights']['dest']['relationships'] = [{'airports.dest': {'type': 'Child'}}] relationship_dict['flights']['carrier']['relationships'] = [{'airlines.carrier': {'type': 'Child'}}] relationship_dict['flights']['flight_id']['relationships'] = [{'trip_logs.flight_id': {'type': 'Parent'}}] relationship_dict['trip_logs']['flight_id']['relationships'] = [{'flights.flight_id': {'type': 'Child'}}] # return relationship structure return relationship_dict
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Mar 5 10:15:25 2021 @author: lenakilian """ import pandas as pd import copy as cp import geopandas as gpd wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/' years = list(range(2007, 2018, 2)) geog = 'MSOA' yr = 2015 dict_cat = 'category_8' cat_dict = pd.read_excel(wd + '/data/processed/LCFS/Meta/lcfs_desc_anne&john.xlsx') ghg_list = cat_dict[[dict_cat]].drop_duplicates()[dict_cat].tolist() ghg_list.remove('other') ghg_list.remove('Other transport') ghg_list = [x[:10].replace('/', '').replace(' ', '') for x in ghg_list] var_list = ['AI2015ln', 'lim', 'pop65', 'pop14', 'bame', 'totalwork', 'totalinc'] var_list2 = ['AI2015_ln', 'lim', 'pop_65.', 'pop_14.', 'bame', 'total_work', 'total_inc'] model_fit = ['RSS.gw', 'AIC', 'AICc', 'enp', 'edf', 'gw.R2', 'gwR2.adj', 'BIC'] global_results = {} for ghg in ghg_list: for var in var_list: global_results[ghg + '_' + var] = pd.read_csv(wd + 'Spatial_Emissions/outputs/GWR/global_coeffs/global_coef_london_' + ghg + '_' + var + '_' + str(yr) + '.csv') fit = global_results[ghg + '_' + var][model_fit].drop_duplicates().T.reset_index() fit['Summary'] = 'Model fit' fit.columns = ['Measure', 'Value', 'Summary'] global_results[ghg + '_' + var] = global_results[ghg + '_' + var].set_index(['Unnamed: 0']).drop(model_fit, axis=1).\ stack().reset_index().drop_duplicates() global_results[ghg + '_' + var].columns = ['Summary', 'Measure', 'Value'] global_results[ghg + '_' + var] = global_results[ghg + '_' + var].append(fit) global_results[ghg + '_' + var]['income_control'] = False if var != 'totalinc': temp = pd.read_csv(wd + 'Spatial_Emissions/outputs/GWR/global_coeffs/global_coef_london_' + ghg + '_' + var + '_' + str(yr) + '_w-inc.csv') fit = temp[model_fit].drop_duplicates().T.reset_index() fit['Summary'] = 'Model fit' fit.columns = ['Measure', 'Value', 'Summary'] temp = temp.set_index(['Unnamed: 0']).drop(model_fit, axis=1).\ stack().reset_index().drop_duplicates() temp.columns = ['Summary', 'Measure', 'Value'] temp = temp.append(fit) temp['income_control'] = True global_results[ghg + '_' + var] = global_results[ghg + '_' + var].append(temp) global_results[ghg + '_' + var] = global_results[ghg + '_' + var].set_index(['Summary', 'Measure', 'income_control']) global_results[ghg + '_' + var] = global_results[ghg + '_' + var].unstack(level='income_control') all_results = pd.DataFrame(index = global_results[ghg_list[0] + '_' + var_list[0]].rename(index={var_list2[0]:'predictor'}).index) for ghg in ghg_list: for i in range(len(var_list)): var = var_list[i] temp = cp.copy(global_results[ghg + '_' + var]).rename(index={var_list2[i]:'predictor'}) temp.columns = pd.MultiIndex.from_arrays([[ghg + '_' + var] * len(temp.columns), temp.columns.levels[1].tolist()]) all_results = all_results.join(temp, how='left') all_results = all_results.dropna(how='all') # Make tidy table check = all_results.loc[['Max.', 'Min.', 'Median', 'Global Estimate', 'Global pval', 'Global tval']].swaplevel(axis=0).loc['predictor'] check = all_results.loc[['Max.', 'Min.', 'Median', 'Global Estimate', 'Global pval', 'Global tval', 'Model fit']].T for item in check['Global pval'].columns.tolist(): check[('Global pval str', item)] = ' ' check.loc[check[('Global pval', item)] < 0.05, ('Global pval str', item)] = '*' check.loc[check[('Global pval', item)] < 0.01, ('Global pval str', item)] = '**' keep = [# Model fit ('Model fit', 'AIC'), ('Model fit', 'gwR2.adj'), # Global coefficients w7 pvalues ('Global Estimate', 'predictor'), ('Global pval str', 'predictor'), ('Global Estimate', 'Intercept'), ('Global pval str', 'Intercept'), ('Global Estimate', 'population'), ('Global pval str', 'population'), ('Global Estimate', 'total_inc'), ('Global pval str', 'total_inc'), # Local coefficient summary (predictor only) ('Min.', 'predictor'), ('Median', 'predictor'), ('Max.', 'predictor') ] check = check[keep] check[('Desc.', 'DV')] = [x[0].split('_')[0] for x in check.index.tolist()] check[('Desc.', 'Pred.')] = [x[0].split('_')[1] for x in check.index.tolist()] check[('Desc.', 'Income controlled')] = [x[1] for x in check.index.tolist()] check = check.set_index([('Desc.', 'DV'), ('Desc.', 'Pred.'), ('Desc.', 'Income controlled')]).reset_index() order = dict(zip(var_list, [1, 2, 3, 4, 5, 6, 0])) order2 = dict(zip([0, 1, 2, 3, 4, 5, 6], ['Income', 'Public Transport Density', 'Pop. limited in day-to-day activities', 'Pop. aged 65 or older', 'Pop. aged 14 or younger', 'Pop. identifying as BAME', 'Distance to workplace'])) check[('index', 'Pred.')] = check[('Desc.', 'Pred.')].map(order) check[('Desc.', 'Pred.')] = check[('index', 'Pred.')].map(order2) check[('index', 'DV')] = check[('Desc.', 'DV')].map(dict(zip(ghg_list, [0, 2, 3, 4, 1]))) check = check.sort_values([('index', 'DV'), ('index', 'Pred.'), ('Desc.', 'Income controlled')]) check.loc[check[('Desc.', 'Income controlled')] == True, ('Desc.', 'Income controlled')] = 'Yes' check.loc[check[('Desc.', 'Income controlled')] == False, ('Desc.', 'Income controlled')] = 'No' check.loc[check[('Desc.', 'Pred.')] == 'Income', ('Desc.', 'Income controlled')] = 'Yes' check.to_csv(wd + 'Spatial_Emissions/outputs/GWR/summary_table.csv')
nilq/baby-python
python
""" Provides helping function for issues. """ import copy from json import JSONDecodeError from math import ceil from typing import Optional, List, Collection, Dict import arrow from pyramid.request import Request from slugify import slugify from dbas.database import DBDiscussionSession from dbas.database.discussion_model import User, Issue, Language, sql_timestamp_pretty_print, \ ClickedStatement, StatementToIssue, Statement, TextVersion from dbas.handler.language import get_language_from_header from dbas.helper.query import generate_short_url from dbas.helper.url import UrlManager from dbas.lib import get_enabled_issues_as_query, nick_of_anonymous_user, get_enabled_statement_as_query from dbas.strings.keywords import Keywords as _ from dbas.strings.translator import Translator def prepare_json_of_issue(db_issue: Issue, db_user: User) -> Dict: """ Prepares slug, info, argument count and the date of the issue as dict :param db_issue: Issue :param db_user: User :return: Issue-dict() """ slug = db_issue.slug title = db_issue.title info = db_issue.info long_info = db_issue.long_info stat_count = len(db_issue.all_arguments) lang = db_issue.lang date_pretty = sql_timestamp_pretty_print(db_issue.date, lang) duration = (arrow.utcnow() - db_issue.date) days, seconds = duration.days, duration.seconds duration = ceil(days * 24 + seconds / 3600) date_ms = int(db_issue.date.format('X')) * 1000 date = db_issue.date.format('DD.MM.YY') time = db_issue.date.format('HH:mm') all_array = [get_issue_dict_for(issue, db_issue.uid, lang) for issue in db_user.accessible_issues if issue.uid != db_issue.uid] _t = Translator(lang) tooltip = _t.get(_.discussionInfoTooltipSg) if stat_count == 1 else _t.get(_.discussionInfoTooltipPl) tooltip = tooltip.format(date, time, stat_count) decision_process = db_issue.decision_process return { 'slug': slug, 'lang': lang, 'info': info, 'long_info': long_info, 'title': title, 'uid': db_issue.uid, 'stat_count': stat_count, 'date': date, 'date_ms': date_ms, 'date_pretty': date_pretty, 'all': all_array, 'tooltip': tooltip, 'intro': _t.get(_.currentDiscussion), 'duration': duration, 'read_only': db_issue.is_read_only, 'decidotron_budget': decision_process.to_dict() if decision_process else None } def get_number_of_authors(issue_uid: int) -> int: """ Returns number of active users for the issue :param issue_uid: Issue Issue.uid :return: Integer """ issues_statements_uids = [el.statement_uid for el in DBDiscussionSession.query(StatementToIssue).filter_by(issue_uid=issue_uid).all()] active_statements_uids = [el.uid for el in get_enabled_statement_as_query().filter(Statement.uid.in_(issues_statements_uids)).all()] active_users = [el.author_uid for el in DBDiscussionSession.query(TextVersion).filter( TextVersion.statement_uid.in_(active_statements_uids))] return len(set(active_users)) def get_issue_dict_for(db_issue: Issue, uid: int, lang: str) -> dict: """ Creates an dictionary for the issue :param db_issue: Issue :param uid: current selected Issue.uid :param lang: ui_locales :return: dict() """ _um = UrlManager(db_issue.slug) issue_dict = { 'uid': str(db_issue.uid), 'slug': db_issue.slug, 'title': db_issue.title, 'url': '/' + db_issue.slug, 'review_url': _um.get_review_url() if str(uid) != str(db_issue.uid) else '', 'info': db_issue.info, 'stat_count': len(db_issue.statements), 'date': sql_timestamp_pretty_print(db_issue.date, lang), 'author': db_issue.author.public_nickname, 'error': '', 'author_url': '/user/{}'.format(db_issue.author.uid), 'enabled': 'disabled' if str(uid) == str(db_issue.uid) else 'enabled' } return issue_dict def get_id_of_slug(slug: str) -> Issue: """ Returns the uid of the issue with given slug :param slug: slug :return: uid """ return get_enabled_issues_as_query().filter_by(slug=slug).first() def save_issue_in_session(issue: Issue, request: Request): """ :param issue: :param request: :return: """ request.session['issue'] = issue.uid def get_issue_id(request) -> Optional[int]: """ Returns issue uid saved in request. If there is no uid, we will choose an issue based on the language from the requests header :param request: self.request :return: uid """ issue_uid = None try: issue_uid = request.json_body.get('issue') except (JSONDecodeError, AttributeError): pass if not issue_uid: issue_uid = request.matchdict.get('issue') if not issue_uid: issue_uid = request.params.get('issue') if not issue_uid: issue_uid = request.session.get('issue') # no issue found if not issue_uid: return None # save issue in session request.session['issue'] = issue_uid return issue_uid def get_issue_based_on_header(request): """ :param request: :return: """ # logger('IssueHelper', 'get_issue_based_on_header', 'no saved issue found') ui_locales = get_language_from_header(request) db_issues = get_enabled_issues_as_query() db_lang = DBDiscussionSession.query(Language).filter_by(ui_locales=ui_locales).first() db_issue = db_issues.filter_by(lang_uid=db_lang.uid).first() if not db_issue: db_issue = db_issues.first() return db_issue.uid def get_title_for_slug(slug) -> Optional[str]: """ Returns the issues title for a given slug :param slug: String :return: String """ db_issues = DBDiscussionSession.query(Issue).all() for issue in db_issues: if str(slugify(issue.title)) == str(slug): return issue.title return None def get_issues_overview_for(db_user: User, app_url: str) -> Dict[str, Collection]: """ Returns dictionary with keywords 'user' and 'others', which got lists with dicts with infos IMPORTANT: URL's are generated for the frontend! :param db_user: User :param app_url: current applications url :return: dict """ if not db_user or db_user.nickname == nick_of_anonymous_user: return { 'user': [], 'other': [] } if db_user.is_admin(): db_issues_other_users = DBDiscussionSession.query(Issue).filter(Issue.author != db_user).all() else: db_issues_other_users = [issue for issue in db_user.accessible_issues if issue.author != db_user] db_issues_of_user = DBDiscussionSession.query(Issue).filter_by(author=db_user).order_by( Issue.uid.asc()).all() return { 'user': [__create_issue_dict(issue, app_url) for issue in db_issues_of_user], 'other': [__create_issue_dict(issue, app_url) for issue in db_issues_other_users] } def get_issues_overview_on_start(db_user: User) -> dict: """ Returns list with title, date, and count of statements for each visible issue :param db_user: User :return: """ db_issues: List[Issue] = db_user.accessible_issues db_issues.sort(key=lambda issue: issue.uid) readable = [] writable = [] featured = [] for index, db_issue in enumerate(db_issues): issue_dict = { 'uid': db_issue.uid, 'url': '/discuss/' + db_issue.slug, 'statements': len(db_issue.statements), 'active_users': get_number_of_authors(db_issue.uid), 'title': db_issue.title, 'date': db_issue.date.format('DD.MM.YY HH:mm'), 'lang': { 'is_de': db_issue.lang == 'de', 'is_en': db_issue.lang == 'en', }, 'featured': db_issue.is_featured } if db_issue.is_read_only: readable.append(issue_dict) else: writable.append(issue_dict) if db_issue.is_featured: featured_issue_dict = copy.deepcopy(issue_dict) featured_issue_dict['info'] = db_issue.info featured.append(featured_issue_dict) return { 'issues': { 'readable': readable, 'writable': writable, 'featured': featured } } def set_discussions_properties(db_user: User, db_issue: Issue, value, iproperty, translator) -> dict: """ :param db_user: User :param db_issue: Issue :param value: The value which should be assigned to property :param iproperty: Property of Issue, e.g. is_disabled :param translator: :return: """ if db_issue.author_uid != db_user.uid and not db_user.is_admin(): return {'error': translator.get(_.noRights)} if iproperty == 'enable': db_issue.set_disabled(not value) elif iproperty == 'public': db_issue.set_private(not value) elif iproperty == 'writable': db_issue.set_read_only(not value) else: return {'error': translator.get(_.internalKeyError)} return {'error': ''} def __create_issue_dict(db_issue: Issue, app_url: str) -> dict: """ Returns dictionary with several information about the given issue :param db_issue: database row of issue :param app_url: current applications url :return: dict() """ short_url_dict = generate_short_url(app_url + '/discuss/' + db_issue.slug) url = short_url_dict['url'] if len(short_url_dict['url']) > 0 else app_url + '/discuss/' + db_issue.slug # we do nto have to check for clicked arguments, cause arguments consist out of statements statements = [el.statement_uid for el in DBDiscussionSession.query(StatementToIssue).filter_by(issue_uid=db_issue.uid).all()] db_clicked_statements = DBDiscussionSession.query(ClickedStatement).filter( ClickedStatement.statement_uid.in_(statements)).all() authors_clicked_statement = [click.author_uid for click in db_clicked_statements] db_authors_len = DBDiscussionSession.query(User).filter(User.uid.in_(authors_clicked_statement)).count() prepared_dict = { 'uid': db_issue.uid, 'title': db_issue.title, 'url': '/' + db_issue.slug, 'short_url': url, 'date': db_issue.date.format('DD.MM.YY HH:mm'), 'count_of_statements': len(statements), 'is_enabled': not db_issue.is_disabled, 'is_public': not db_issue.is_private, 'is_writable': not db_issue.is_read_only, 'participants': db_authors_len, 'lang': { 'is_de': db_issue.lang == 'de', 'is_en': db_issue.lang == 'en', } } return prepared_dict
nilq/baby-python
python
# Generated by Django 3.0.7 on 2020-07-12 09:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('admin_app', '0004_order_arsip'), ] operations = [ migrations.AddField( model_name='order', name='p1_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p2_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p3_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p4_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p5_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p6_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p7_a', field=models.BooleanField(default=False), ), migrations.AddField( model_name='order', name='p8_a', field=models.BooleanField(default=False), ), ]
nilq/baby-python
python
import torch import torch.nn as nn from torchvision import models from torchvision import transforms from bench_press.models.modules.spatial_softmax import SpatialSoftmax pretrained_model_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def get_vgg_encoder(vgg_type, num_features): """ :param vgg_type: classname of desired vgg model, e.g. torchvision.models.vgg16 :param num_features: number of output features for encoder :return: vgg model (nn.Module type) """ model = vgg_type(pretrained=True, progress=True) model.classifier[-1] = nn.Linear(in_features=4096, out_features=num_features) return model def print_hook(self, input, output): print(f'output size: {output.data.size()}') print(f'output norm: {output.data.norm()}') def get_resnet_encoder(resnet_type, num_features, freeze=False): model = resnet_type(pretrained=True, progress=True) for param in model.parameters(): param.requires_grad = not freeze model.fc = nn.Linear(in_features=model.fc.in_features, out_features=num_features) return model def get_resnet_spatial_encoder(resnet_type, num_features, freeze=False): model = get_resnet_encoder(resnet_type, num_features, freeze=freeze) model_list = list(model.children())[:-2] model = nn.Sequential(*model_list) spatial_softmax = SpatialSoftmax(6, 8, 512) model.add_module('spatial_softmax', spatial_softmax) model.add_module('fc', nn.Linear(512*2, num_features)) return model
nilq/baby-python
python
import arcade import math import random import settings # default window SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = "WeFly X Charlie" BULLET_SPEED = 2 Score = 0 INSTRUCTIONS_PAGE_0 = 0 INSTRUCTIONS_PAGE_1 = 1 GAME_RUNNING = 2 GAME_OVER = 3 WIN = 4 position_y_1 = 600 position_y_2 = 0 # default boss' properties explode = 0 explode_x = 0 explode_y = 0 fps = 0 boss_create_fps = 0 level = 0 # boss level prompt prompt = False prompt_time = 0 boss_sound_on = 0 game_sound_on = 0 boss_hp = 0 boss_hp_current = 0 # default boss laser laser_bomb = False laser_effect = 0 laser_fps = 0 # Calculate the remaining missile laser_counter = 0 laser_counter_update = 0 try: background_sound = arcade.sound.load_sound("music/bgm_zhuxuanlv.mp3") missile_sound_1 = arcade.load_sound("music/rocketswitch.wav") hp_bonus_sound = arcade.load_sound("music/supply.wav") button_sound = arcade.load_sound("music/button.wav") bomb_sound = arcade.load_sound("music/all_bomb.wav") game_sound = arcade.sound.load_sound("music/bgm_zhandou2.mp3.wav") game_sound_1 = arcade.sound.load_sound("music/bgm_zhandou2.mp3.wav") game_sound_2 = arcade.sound.load_sound("music/bgm_zhandou2.mp3.wav") game_sound_3 = arcade.sound.load_sound("music/bgm_zhandou2.mp3.wav") boss_sound_1 = arcade.sound.load_sound("music/boss_sound.wav") boss_sound_2 = arcade.sound.load_sound("music/boss_sound.wav") boss_sound_3 = arcade.sound.load_sound("music/boss_sound.wav") boss_sound_4 = arcade.sound.load_sound("music/boss_sound.wav") except Exception as e: print("Error loading sound.", e) class Enemy(arcade.Sprite): # pass attribute to enemy def __init__(self, image, scale, ehp, score, speed, boss): """ Initialize an enemy with information passed in. :param image: enemy image :param scale: enemy scale :param ehp: enemy hit points :param score: kill enemy score :param speed: enemy speed :param boss: enemy type, True when he is boss """ arcade.Sprite.__init__(self, image, scale) self.ehp = ehp self.score = score self.speed = speed self.boss = boss self.left_boss = True # self armo damage, hhp def hitted(self, hhp): """ Enemy hit by self bullet. Return boss kill information and killed coordinates. :param hhp: self bullet damage to the enemy :return: Tuple, represents boss killed(1), otherwise(0); killed xy coordinates in order. """ global Score self.ehp = max(0, self.ehp - hhp) if self.ehp == 0: self.kill() Score += self.score if self.boss: return (1, self.center_x, self.center_y) return (0, 0, 0) def drop(self): """ Update enemy location :return: None """ if self.boss and self.center_y <= 450: if self.center_x <= 100: self.left_boss = False if self.center_x >= 700: self.left_boss = True if self.left_boss: self.center_x -= 2 else: self.center_x += 2 if self.center_x == 100: self.left_boss = False if self.center_x == 700: self.left_boss = True else: self.center_y -= self.speed if self.center_y < 0: self.kill() class Chapter1View(arcade.View): def __init__(self): super().__init__() self.frame_count = 0 self.hp = 100 self.boss = False self.laser_player = 0 self.enemy_list = None self.bullet_list = None self.bullet_self_list = None self.player_list = None self.player = None self.assist = None self.bonus = None self.instructions = [] texture = arcade.load_texture("images/fm.jpeg") self.instructions.append(texture) texture = arcade.load_texture("images/intro.jpeg") self.instructions.append(texture) self.current_state = INSTRUCTIONS_PAGE_0 def setup(self): """ Initialize game interface. Default schedule is 60 fps. :return: None """ self.frame_count = 0 self.hp = 100 self.boss = False self.laser_player = 0 self.enemy_list = None self.bullet_list = None self.bullet_self_list = None self.player_list = None self.player = None self.assist = None self.bonus = None arcade.schedule(self.on_update, 1 / 60) self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() self.bullet_self_list = arcade.SpriteList() self.assist = arcade.SpriteList() self.bonus = arcade.SpriteList() # Add player ship self.player = arcade.Sprite("images/SeHero.png", 0.6) self.player_list.append(self.player) # draw instruction page def draw_instructions_page(self, page_number): """ Draw an instruction page. Load the page as an image. """ page_texture = self.instructions[page_number] arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2, page_texture.width, page_texture.height, page_texture, 0) if self.current_state == INSTRUCTIONS_PAGE_0: page_texture = arcade.load_texture("images/play.png") arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, 200, page_texture.width, page_texture.height, page_texture, 0) # draw game over page def draw_game_over(self): """ Draw "Game over" across the screen. """ output = "Game Over" arcade.draw_text(output, 220, 350, arcade.color.WHITE, 54) output = "Click anywhere to quit" arcade.draw_text(output, 245, 260, arcade.color.WHITE, 24) def draw_game_win(self): texture = arcade.load_texture("images/win_page.jpeg") arcade.draw_texture_rectangle(400, 300, 800, 600, texture) def draw_game(self): # Draw background and boss for each level if level == 0: texture_1 = arcade.load_texture("images/bg_0.jpg") arcade.draw_texture_rectangle(400, position_y_1, 800, 600, texture_1) texture_2 = arcade.load_texture("images/bg_0.jpg") arcade.draw_texture_rectangle(400, position_y_2, 800, 600, texture_1) texture_0 = arcade.load_texture("images/boss_2.png") if level == 1: texture_1 = arcade.load_texture("images/bg_new.jpg") arcade.draw_texture_rectangle(400, position_y_1, 800, 600, texture_1) texture_2 = arcade.load_texture("images/bg_new.jpg") arcade.draw_texture_rectangle(400, position_y_2, 800, 600, texture_1) texture_0 = arcade.load_texture("images/boss_4.png") if level == 2: texture_1 = arcade.load_texture("images/bg_1.jpg") arcade.draw_texture_rectangle(400, position_y_1, 800, 600, texture_1) texture_2 = arcade.load_texture("images/bg_1.jpg") arcade.draw_texture_rectangle(400, position_y_2, 800, 600, texture_1) texture_0 = arcade.load_texture("images/boss_1.png") if level == 3: texture_1 = arcade.load_texture("images/bg_new_1.jpg") arcade.draw_texture_rectangle(400, position_y_1, 800, 600, texture_1) texture_2 = arcade.load_texture("images/bg_new_1.jpg") arcade.draw_texture_rectangle(400, position_y_2, 800, 600, texture_1) texture_0 = arcade.load_texture("images/boss_5.png") # draw images self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() self.bullet_self_list.draw() self.assist.draw() self.bonus.draw() # boss killed explode animation if explode == 1: arcade.draw_texture_rectangle(explode_x, explode_y, 240, 180, texture_0) texture_1 = arcade.load_texture("images/bigairplane3.png") arcade.draw_texture_rectangle(explode_x, explode_y, 90, 90, texture_1) elif explode == 2: arcade.draw_texture_rectangle(explode_x, explode_y, 240, 180, texture_0) texture_1 = arcade.load_texture("images/bigairplane4.png") arcade.draw_texture_rectangle(explode_x, explode_y, 90, 90, texture_1) elif explode == 3: arcade.draw_texture_rectangle(explode_x, explode_y, 240, 180, texture_0) texture_1 = arcade.load_texture("images/bigairplane5.png") arcade.draw_texture_rectangle(explode_x, explode_y, 90, 90, texture_1) elif explode == 4: texture_0 = arcade.load_texture("images/bg_road.png") arcade.draw_texture_rectangle(400, 300, 450, 430, texture_0) # Draw different boss lasers for b in self.enemy_list: if level == 0: if laser_effect == 1: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) elif laser_effect == 2: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) elif laser_effect == 3: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) elif laser_effect == 4: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) elif laser_effect == 5: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) elif laser_effect == 6: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) if level == 1: if laser_effect == 1: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) elif laser_effect == 2: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) elif laser_effect == 3: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) elif laser_effect == 4: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) elif laser_effect == 5: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) elif laser_effect == 6: arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) if level == 2: if laser_effect == 1: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) elif laser_effect == 2: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) elif laser_effect == 3: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) elif laser_effect == 4: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) elif laser_effect == 5: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) elif laser_effect == 6: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) arcade.draw_texture_rectangle(b.center_x + 30, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) if level == 3: if laser_effect == 1: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser6.png")) elif laser_effect == 2: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser7.png")) elif laser_effect == 3: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser8.png")) elif laser_effect == 4: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser9.png")) elif laser_effect == 5: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser10.png")) elif laser_effect == 6: arcade.draw_texture_rectangle(b.center_x - 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) arcade.draw_texture_rectangle(b.center_x, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) arcade.draw_texture_rectangle(b.center_x + 40, b.center_y - 300, 30, 600, arcade.load_texture("images/bomb_laser11.png")) if prompt: arcade.draw_texture_rectangle(400, 350, 300, 200, arcade.load_texture("images/boss_prompt.png")) if self.boss: arcade.draw_lrtb_rectangle_outline(300, 500, 580, 560, arcade.color.BLACK, 2) arcade.draw_lrtb_rectangle_filled(302, 302 + (198 * boss_hp_current) // boss_hp, 578, 562, arcade.color.RADICAL_RED) # show hp, current score, and remaining laser times on the screen arcade.draw_text("Score: {0:10.2f}".format(Score), 610, 560, arcade.color.WHITE, 12) arcade.draw_lrtb_rectangle_outline(60, 170, 580, 560, arcade.color.WHITE, 2) arcade.draw_lrtb_rectangle_filled(62, 62 + (106 * self.hp) // 100, 578, 562, arcade.color.WHITE) arcade.draw_text("HP: {0:10.2f}%".format(self.hp), 180, 562, arcade.color.WHITE, 12) if self.laser_player >= 1: for i in range(self.laser_player): arcade.draw_texture_rectangle(760 - i * 50, 520, 50, 40, arcade.load_texture("images/missile_icon.png")) def on_show(self): arcade.set_background_color(arcade.color.BLUE_SAPPHIRE) def dead(self): """ Clear the screen when dead :return: None """ self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() self.bullet_self_list = arcade.SpriteList() self.current_state = GAME_OVER def on_draw(self): arcade.start_render() # arcade.draw_text("Chapter 1", settings.WIDTH/2, settings.HEIGHT/2, # arcade.color.BLACK, font_size=30, anchor_x="center") # page_texture = arcade.load_texture("Icon-57.png") # arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, 200, page_texture.width, page_texture.height, page_texture, # 0) # # arcade.start_render() if self.current_state == GAME_RUNNING: self.draw_game() elif self.current_state == INSTRUCTIONS_PAGE_0: self.draw_instructions_page(0) elif self.current_state == INSTRUCTIONS_PAGE_1: self.draw_instructions_page(1) elif self.current_state == GAME_OVER: self.draw_game() self.draw_game_over() elif self.current_state == WIN: self.draw_game_win() def update(self, delta_time): """All the logic to move, and the game logic goes here. """ global explode, explode_x, explode_y, fps, position_y_1, position_y_2, level, prompt, prompt_time, boss_hp, boss_hp_current global up_pressed, down_pressed, left_pressed, right_pressed, laser_bomb, laser_effect, laser_fps, laser_counter, laser_counter_update global boss_create_fps, boss_sound_on, game_sound_on, game_sound_1, game_sound_2, game_sound_3, boss_sound_1, boss_sound_2, boss_sound_3, game_sound, boss_sound_4 if self.current_state != GAME_RUNNING and self.frame_count % 3480 == 0: try: arcade.play_sound(background_sound) except Exception as e: print("Error playing sound.", e) pass if self.current_state == GAME_RUNNING: try: arcade.stop_sound(background_sound) except Exception as e: print("Error pausing sound.", e) pass if level == 4: self.current_state = WIN return if self.current_state == GAME_RUNNING: if self.boss and boss_sound_on == 0: boss_sound_on = 1 try: if level == 0: arcade.stop_sound(game_sound) arcade.play_sound(boss_sound_1) if level == 1: game_sound_1.pause() arcade.play_sound(boss_sound_2) if level == 2: game_sound_2.pause() arcade.play_sound(boss_sound_3) if level == 3: game_sound_3.pause() arcade.play_sound(boss_sound_4) except Exception as e: print("Error pausing sound.", e) pass if not self.boss: try: if level == 0: boss_sound_1.pause() if level == 1: boss_sound_2.pause() if level == 2: boss_sound_3.pause() if level == 3: boss_sound_4.pause() except Exception as e: print("Error pausing sound.", e) pass boss_sound_on = 0 # if (self.frame_count - fps) == 180 and fps != 0: # game_sound_on = 0 if game_sound_on == 0: try: if level == 0: arcade.play_sound(game_sound) if level == 1: arcade.play_sound(game_sound_1) if level == 2: arcade.play_sound(game_sound_2) if level == 3: arcade.play_sound(game_sound_3) except Exception as e: print("Error playing sound.", e) pass game_sound_on = 1 # update remaining laser based on current score laser_counter = Score // 1000 + 1 if laser_counter + laser_counter_update == 1: arcade.play_sound(missile_sound_1) self.laser_player += 1 laser_counter_update -= 1 if self.hp <= 0: game_sound_on = 10 try: arcade.stop_sound(game_sound) # game_sound_1.pause() # game_sound_2.pause() # game_sound_3.pause() # boss_sound_1.pause() # boss_sound_2.pause() # boss_sound_3.pause() # boss_sound_4.pause() except Exception as e: print("Error pausing sound.", e) self.dead() else: # drop hp bonus every 60s if self.frame_count % 3600 == 3599: bonus_hp = arcade.Sprite("images/hp_bonus.png", 0.45) bonus_hp.center_x = random.randrange(0, SCREEN_WIDTH) bonus_hp.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 1.25) self.bonus.append(bonus_hp) if self.frame_count % 240 == 0 and not self.boss and not 1 <= explode <= 4: for _ in range(2 + level): # generate randomly enemy planes of different levels ranNum = random.randint(0, 1000) if ranNum < 500: enemy = Enemy("images/plane_small.png", 0.8, 2, 10, 4, False) elif ranNum < 850: enemy = Enemy("images/bigplane0.png", 0.7, 3, 50, 3, False) else: enemy = Enemy("images/boss0.png", 0.35, 5, 100, 2, False) enemy.center_x = random.randrange(0, SCREEN_WIDTH) enemy.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 1.25) enemy.angle = 180 self.enemy_list.append(enemy) # create a boss and ensure no small enemies appear during the boss battle elif self.frame_count - fps == (1799 * (level + 1)) and not self.boss and not 1 <= explode <= 4: # 提示 boss_create_fps = self.frame_count prompt = True prompt_time = self.frame_count # update boss image based on game level if level == 0: enemy = Enemy("images/boss_2.png", 0.8, 25, 500, 2, True) elif level == 1: enemy = Enemy("images/boss_4.png", 0.8, 35, 1000, 3, True) elif level == 2: enemy = Enemy("images/boss_1.png", 0.8, 50, 2000, 3, True) elif level == 3: enemy = Enemy("images/boss_5.png", 0.8, 70, 4000, 3, True) enemy.center_x = random.randrange(0, SCREEN_WIDTH) enemy.center_y = SCREEN_HEIGHT * 2 enemy.angle = 180 self.enemy_list.append(enemy) self.boss = True boss_hp = enemy.ehp # set time for boss prompt to be 3s if self.frame_count - prompt_time == 180 and prompt: prompt = False # update player's hp based on different damage levels from boss for boss in self.enemy_list: if 1 <= laser_effect <= 6: # realize the disappearance of self bullet when it hits boss for e in self.bullet_self_list: if boss.center_x - 20 <= e.center_x <= boss.center_x + 20: e.kill() # calculate different damage levels of laser from boss if level == 0: if self.player.center_x - 36 < boss.center_x < self.player.center_x + 36: self.hp = max(0, self.hp - 0.8) if level == 1: if self.player.center_x - 36 < boss.center_x < self.player.center_x + 36: self.hp = max(0, self.hp - 0.9) if level == 2: if self.player.center_x - 36 < boss.center_x - 45 < self.player.center_x + 36 or self.player.center_x - 36 < boss.center_x + 15 < self.player.center_x + 36: self.hp = max(0, self.hp - 1) if level == 3: if self.player.center_x - 36 < boss.center_x - 45 < self.player.center_x + 36 or self.player.center_x - 36 < boss.center_x < self.player.center_x + 36 or self.player.center_x - 36 < boss.center_x + 15 < self.player.center_x + 36: self.hp = max(0, self.hp - 1.1) # update the background position position_y_1 -= 1 position_y_2 -= 1 if position_y_1 == -300: position_y_1 = 900 if position_y_2 == -300: position_y_2 = 900 # collision with bullet bullet_collide_list = arcade.check_for_collision_with_list(self.player, self.bullet_list) for collide_bullet in bullet_collide_list: collide_bullet.kill() self.hp = max(0, self.hp - 5) # collision with enemy enemy_collide_list = arcade.check_for_collision_with_list(self.player, self.enemy_list) for collide_enemy in enemy_collide_list: collide_enemy.kill() if self.boss: self.hp = 0 self.hp = max(0, self.hp - 30) # calculate different damage of player's bullet or bomb makes on enemy or boss for e in self.enemy_list: if e.boss: boss_hp_current = e.ehp bullet_hit_list = arcade.check_for_collision_with_list(e, self.bullet_self_list) for bullet_hit in bullet_hit_list: bullet_hit.kill() boss_hit = e.hitted(1) if boss_hit[0] == 1: self.boss = False explode = 1 explode_x = boss_hit[1] explode_y = boss_hit[2] fps = self.frame_count for bomb in self.assist: bullet_hit_list = arcade.check_for_collision_with_list(bomb, self.bullet_list) for b in bullet_hit_list: b.kill() for e in self.enemy_list: if e.boss: boss_hp_current = e.ehp bullet_hit_list = arcade.check_for_collision_with_list(e, self.assist) for bullet_hit in bullet_hit_list: boss_hit = e.hitted(0.3) if boss_hit[0] == 1: self.boss = False explode = 1 explode_x = boss_hit[1] explode_y = boss_hit[2] fps = self.frame_count # boss explode animation if explode == 1 and self.frame_count - fps == 20: arcade.play_sound(bomb_sound) explode += 1 elif explode == 2 and self.frame_count - fps == 40: explode += 1 elif explode == 3 and self.frame_count - fps == 60: explode += 1 elif explode == 4 and self.frame_count - fps == 180: explode += 1 level += 1 # bomb_sound.pause() game_sound_on = 0 # use loop to make all enemies facing to the player for enemy in self.enemy_list: # First, calculate the angle to the player. We could do this # only when the bullet fires, but in this case we will rotate # the enemy to face the player each frame, so we'll do this # each frame. # Position the start at the enemy's current location start_x = enemy.center_x start_y = enemy.center_y # list_1[i][2]Get the destination location for the bullet dest_x = self.player.center_x dest_y = self.player.center_y # Do math to calculate how to get the bullet to the destination. # Calculation the angle in radians between the start points # and end points. This is the angle the bullet will travel. x_diff = dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # use if statement to exclude the boss angle if enemy.boss: enemy.angle = 0 else: enemy.angle = math.degrees(angle) - 270 # determine the shooting characteristics of enemy / boss planes if enemy.boss and self.frame_count % ((120 - 20 * level) // 2) == 0: bullet = arcade.Sprite("images/boss_bullet.png", 0.5) bullet.center_x = start_x bullet.center_y = start_y bullet.angle = 0 bullet.change_x = 0 bullet.change_y = - BULLET_SPEED * (level // 3 + 1) self.bullet_list.append(bullet) elif self.frame_count % (120 - 20 * level) == 0: bullet = arcade.Sprite("images/enemy_bullet.png", 0.5) bullet.center_x = start_x bullet.center_y = start_y bullet.angle = math.degrees(angle) bullet.change_x = math.cos(angle) * BULLET_SPEED * (level // 3 + 1) bullet.change_y = math.sin(angle) * BULLET_SPEED * (level // 3 + 1) self.bullet_list.append(bullet) # determine the shooting frequency of the player airplane if self.frame_count % (15 - 2 * level) == 0: bullet = arcade.Sprite("images/Bomb2.png", 0.7) bullet.center_x = self.player.center_x bullet.center_y = self.player.center_y # Angle the bullet sprite bullet.angle = 0 # Taking into account the angle, calculate our change_x # and change_y. Velocity is how fast the bullet travels. bullet.change_x = 0 bullet.change_y = BULLET_SPEED * 3 self.bullet_self_list.append(bullet) # arcade.play_sound(bullet_sound) # use loops to remove the bullet when it flies off-screen for bullet in self.bullet_self_list: if bullet.bottom > 600: bullet.kill() for bullet in self.assist: if bullet.bottom > 600: bullet.kill() for bullet in self.bullet_list: if bullet.top < 0: bullet.kill() # use loops to control the dropping of hp_bonus for hp_bonus in self.bonus: hp_bonus.center_y -= 5 # update player's hp when it catches hp_bonus if arcade.check_for_collision(self.player, hp_bonus): self.hp = min(100, self.hp + 30) arcade.play_sound(hp_bonus_sound) hp_bonus.kill() # remove hp_bonus when it gets out of windows if hp_bonus.top < 0: hp_bonus.kill() # keyboard control the movement of the player if up_pressed: self.player.center_y = min(552, self.player.center_y + 5) if down_pressed: self.player.center_y = max(48, self.player.center_y - 5) if left_pressed: self.player.center_x = max(36, self.player.center_x - 5) if right_pressed: self.player.center_x = min(764, self.player.center_x + 5) # trigger the missile if laser_bomb and self.laser_player > 0 and len(self.assist) <= 1: assist_bomb = arcade.Sprite("images/assisent1_1.png", 1) assist_bomb.center_x = self.player.center_x - 25 assist_bomb.center_y = self.player.center_y assist_bomb.angle = 0 assist_bomb.change_x = 0 assist_bomb.change_y = 10 self.assist.append(assist_bomb) assist_bomb = arcade.Sprite("images/assisent1_1.png", 1) assist_bomb.center_x = self.player.center_x + 25 assist_bomb.center_y = self.player.center_y assist_bomb.angle = 0 assist_bomb.change_x = 0 assist_bomb.change_y = 10 self.assist.append(assist_bomb) self.laser_player -= 1 # use if statement to set the laser shooting period to be 8s if self.boss and (self.frame_count - boss_create_fps) % 480 == 0 and ( self.frame_count - boss_create_fps) != 0: laser_effect = 1 laser_fps = self.frame_count # use if statement to animate laser if laser_effect == 1 and self.frame_count - laser_fps == 20: laser_effect += 1 elif laser_effect == 2 and self.frame_count - laser_fps == 40: laser_effect += 1 elif laser_effect == 3 and self.frame_count - laser_fps == 60: laser_effect += 1 elif laser_effect == 4 and self.frame_count - laser_fps == 80: laser_effect += 1 elif laser_effect == 5 and self.frame_count - laser_fps == 100: laser_effect += 1 elif laser_effect == 6 and self.frame_count - laser_fps == 120: laser_effect += 1 # realize the dropping of boss and enemy planes for e in self.enemy_list: e.drop() if level == 4: self.current_state = WIN self.set_mouse_visible(True) self.bullet_list.update() self.bullet_self_list.update() self.assist.update() # update the frame_count self.frame_count += 1 # def on_key_press(self, key, modifiers): # self.director.next_view() def on_mouse_motion(self, x, y, delta_x, delta_y): """ Called whenever the mouse moves. :param x: player x-location :param y: player y-location :param delta_x: player delta x :param delta_y: player delta y :return: None """ if self.current_state == GAME_RUNNING: self.player.center_x = x self.player.center_y = y def on_mouse_press(self, x, y, button, modifiers): global level, Score, prompt, prompt_time, boss_hp, boss_hp_current, laser_bomb, laser_effect, laser_fps, laser_counter, laser_counter_update global game_sound_on """ Called when the user presses a mouse button. """ # Change states as needed. if self.current_state == INSTRUCTIONS_PAGE_0 and x >= 280 and x <= 520 and y >= 102 and y <= 198: arcade.play_sound(button_sound) # Next page of instructions. self.current_state = INSTRUCTIONS_PAGE_1 elif self.current_state == INSTRUCTIONS_PAGE_1: # Start the game self.current_state = GAME_RUNNING self.setup() elif self.current_state == GAME_OVER: self.close() # The addition of sound effect would mess up our page transfer # Restart the game. # level = 0 # Score = 0 # prompt = False # prompt_time = 0 # # boss_hp = 0 # boss_hp_current = 0 # # laser_bomb = False # laser_effect = 0 # laser_fps = 0 # # laser_counter = 0 # laser_counter_update = 0 # # self.setup() # self.current_state = GAME_RUNNING # game_sound_on = 0 elif self.current_state == WIN: self.close() # Restart the game. # level = 0 # Score = 0 # prompt = False # prompt_time = 0 # # boss_hp = 0 # boss_hp_current = 0 # # laser_bomb = False # laser_effect = 0 # laser_fps = 0 # # laser_counter = 0 # laser_counter_update = 0 # self.setup() # self.current_state = GAME_RUNNING # def on_mouse_press(self, x, y, button, modifiers): # if x >= 280 and x <= 520 and y >= 102 and y <= 198: # game_main() # Variables to record if certain keys are being pressed. up_pressed = False down_pressed = False left_pressed = False right_pressed = False if __name__ == "__main__": """This section of code will allow you to run your View independently from the main.py file and its Director. You can ignore this whole section. Keep it at the bottom of your code. It is advised you do not modify it unless you really know what you are doing. """ from utils import FakeDirector window = arcade.Window(settings.WIDTH, settings.HEIGHT) my_view = Chapter1View() my_view.director = FakeDirector(close_on_next_view=True) window.show_view(my_view) arcade.run()
nilq/baby-python
python
from Crypto.Cipher import AES import base64 import hashlib def jm_sha256(data): sha256 = hashlib.sha256() sha256.update(data.encode("utf-8")) res = sha256.digest() # print("sha256加密结果:", res) return res def pkcs7padding(text): bs = AES.block_size length = len(text) bytes_length = len(bytes(text, encoding='utf-8')) # tips:utf-8编码时,英文占1个byte,而中文占3个byte padding_size = length if(bytes_length == length) else bytes_length padding = bs - padding_size % bs # tips:chr(padding)看与其它语言的约定,有的会使用'\0' padding_text = chr(padding) * padding return text + padding_text def aes_encrypt_v2(content, key): key_bytes = jm_sha256(key) iv = "\0".encode("utf-8") * 16 aes = AES.new(key_bytes, AES.MODE_CBC, iv) content_padding = pkcs7padding(content) encrypt_bytes = aes.encrypt(bytes(content_padding, encoding='utf-8')) result = str(base64.b64encode(encrypt_bytes), encoding='utf-8') return result mystr1 = "123" mykey1 = "12345678" # 3gVLeGnili1JBTYLHAk8pQ== print(aes_encrypt_v2(mystr1, mykey1)) mystr2 = "你好abcd1234" mykey2 = "1234567812345678" # Qkz+MXCIESJZVgHJffouTQ== print(aes_encrypt_v2(mystr2, mykey2))
nilq/baby-python
python
import os import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration DEBUG = False DOMAIN_NAME = "deeptipandey.site" AWS_STORAGE_BUCKET_NAME = AWS_BUCKET_NAME = os.getenv("AWS_BUCKET_NAME", "") AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID", "") AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY", "") S3_DOMAIN = AWS_S3_CUSTOM_DOMAIN = str(AWS_BUCKET_NAME) + ".s3.amazonaws.com" AWS_SES_REGION_NAME = os.getenv("AWS_SES_REGION_NAME", "") AWS_SES_REGION_ENDPOINT = os.getenv("AWS_SES_REGION_ENDPOINT", "") AWS_S3_OBJECT_PARAMETERS = { "CacheControl": "max-age=86400", } STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" DEFAULT_S3_PATH = "media" STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" STATIC_S3_PATH = "static" COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" COMPRESS_JS_FILTERS = ["compressor.filters.jsmin.JSMinFilter"] MEDIA_ROOT = "/%s/" % DEFAULT_S3_PATH MEDIA_URL = "//%s/%s/" % (S3_DOMAIN, DEFAULT_S3_PATH) STATIC_ROOT = "/%s/" % STATIC_S3_PATH STATIC_URL = "https://%s/" % (S3_DOMAIN) ADMIN_MEDIA_PREFIX = STATIC_URL + "admin/" CORS_ORIGIN_ALLOW_ALL = True AWS_IS_GZIPPED = True AWS_ENABLED = True AWS_S3_SECURE_URLS = True COMPRESS_URL = STATIC_URL EMAIL_BACKEND = "django_ses.SESBackend" SESSION_COOKIE_DOMAIN = ".deeptipandey.site" ELASTIC_APM = { "SERVICE_NAME": os.getenv("ELASTIC_APM_SERVICE_NAME"), "SECRET_TOKEN": os.getenv("ELASTIC_APM_SECRET_TOKEN"), "SERVER_URL": os.getenv("ELASTIC_APM_SERVER_URL"), } sentry_sdk.init( dsn=os.getenv("SENTRY_DSN"), integrations=[DjangoIntegration()], # If you wish to associate users to errors (assuming you are using # django.contrib.auth) you may enable sending PII data. send_default_pii=True, )
nilq/baby-python
python
''' Задача 1 Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована. ''' print("Задача 1") for i in range(1, 6): print(i, 0) i += 1 ''' Задача 2 Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5. ''' print("Задача 2") count = 0 for i in range(1, 11): print('Введите цифру номер ', i, ': ', end='') num = input() while len(num) != 1 or not num.isdigit(): print(num, ' - не цифра') print('Введите цифру номер ', i, ': ', end = '') num = input() dig = int(num) if dig == 5: count += 1 print('Количество введеных пользователем цифр 5:', count) ''' Задача 3 Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран. ''' print("Задача 3") sum = 0 for i in range(1,101): sum+=i print('Сумма ряда чисел от 1 до 100: ', sum) ''' Задача 4 Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран. ''' print("Задача 4") p = 1 for i in range(1,11): p*=i print('Произведение ряда чисел от 1 до 100: ', p) ''' Задача 5 Вывести цифры числа на каждой строчке. ''' print("Задача 5") integer_number = 5689 while integer_number>0: print(integer_number%10) integer_number = integer_number//10 ''' Задача 6 Найти сумму цифр числа. ''' print("Задача 6") integer_number = 123 print('Задано число: ', integer_number) sum = 0 while integer_number>0: dig = integer_number%10 sum+=dig integer_number = integer_number//10 print('Сумма цифр числа: ', sum) ''' Задача 7 Найти произведение цифр числа. ''' print("Задача 7") integer_number = 234 print('Задано число: ', integer_number) p = 1 while integer_number>0: dig = integer_number%10 p*=dig integer_number = integer_number//10 print('Произведение цифр числа: ', p) ''' Задача 8 Дать ответ на вопрос: есть ли среди цифр числа 5? ''' print("Задача 8") integer_number = 213553 print('Задано число: ', integer_number) while integer_number>0: if integer_number%10 == 5: print('Число содержит цифру 5') break integer_number = integer_number//10 else: print('Число не содержит цифру 5') ''' Задача 9 Найти максимальную цифру в числе ''' print("Задача 9") max = 0 num = int(input('Введите число: ')) while num > 0: if num%10 > max: max = num % 10 else: num = num//10 print("Максимальная цифра в числе - ", max) ''' Задача 10 Найти количество цифр 5 в числе ''' print("Задача 10") num = int(input('Введите число: ')) con = 0 while num > 0: if num%10 == 5: con +=1 num = num//10 print('Количество цифр 5 в числе:', con)
nilq/baby-python
python
#coding=utf-8 import os, re, sys import json import datetime from openpyxl import load_workbook def parse_cell_value(value): #布尔类型 if isinstance(value, bool): return value #int类型 if isinstance(value, int): return value #float类型 if isinstance(value, float): return value if not value: return '' #日期类型 if isinstance(value, datetime.datetime): return value.ctime() value = value.replace(' ', '') #对象数组类型 if value.find(';') != -1: obj_list = value.split(';') parsed_list = [] for i in obj_list: if i: i = parse_cell_value(i) parsed_list.append(i) return parsed_list value_list = value.split(',') #对象类型 if value.find(':') != -1: obj = {} for i in value_list: i = i.split(':') obj[i[0]] = parse_cell_value(i[1]) return obj #数组类型 if len(value_list) > 1: parsed_list = [] for i in value_list: if i: i = parse_cell_value(i) parsed_list.append(i) return parsed_list #布尔类型 if re.match('true', value, re.IGNORECASE): return True if re.match('false', value, re.IGNORECASE): return False #递归判断 if re.match(r'^\d+(\.\d+)?$', value): if value.find('.') != -1: return float(value) return int(value) #string类型 return value.encode('utf-8') def get_workbooks(dir_name = 'excel'): excel_path = os.path.join(os.getcwd(), dir_name) # print os.listdir(excel_path) file_list = os.listdir(excel_path) if not file_list: print 'no excel file !' return workbooks = [] for i in file_list: #windows环境下,存在正在使用的.xlsx文件,会在当前目录下生成~$开头的文件 #读取时,忽略此文件 if i.find('~$') == -1: file_path = os.path.join(excel_path, i) # print file_path wb = load_workbook(file_path) workbooks.append(wb) return workbooks def save_json(file_name, json_data, dir_name = 'json'): json_path = os.path.join(os.getcwd(), dir_name) file_path = os.path.join(json_path, file_name + '.json') with open(file_path, 'w') as f: json.dump(json_data, f, ensure_ascii = False, indent = 4) def xlsx2json(head_row = 2): workbooks = get_workbooks() if workbooks: for wb in workbooks: for sheet in wb: # print sheet.title if sheet.rows: head = sheet.rows[head_row - 1] # print head json_list = [] for row in sheet.rows[head_row:]: row_dic = {} for head_cell, cell in zip(head, row): # print head_cell.value, cell.value, type(cell.value) row_dic[head_cell.value] = parse_cell_value(cell.value) json_list.append(row_dic) save_json(sheet.title, json_list) if __name__ == '__main__': head_row = 2 if len(sys.argv) != 1: try: head_row = int(sys.argv[1]) except ValueError: print 'please input head row number right, example: python xlsx2json.py 3' sys.exit() xlsx2json(head_row)
nilq/baby-python
python
import os.path from data.base_dataset import BaseDataset, get_transform from data.image_folder import make_dataset from PIL import Image import random import util.util as util import numpy as np class ConditionalDataset(BaseDataset): """ This dataset class can load unaligned/unpaired datasets with classes. It requires two directories to host training images from domain A '/path/to/data/trainA' and from domain B '/path/to/data/trainB' respectively. You can train the model with the dataset flag '--dataroot /path/to/data'. Similarly, you need to prepare two directories: '/path/to/data/testA' and '/path/to/data/testB' during test time. """ def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' #self.dir_Seg = os.path.join(opt.dataroot, opt.phase + 'Seg') if opt.phase == "test" and not os.path.exists(self.dir_A) \ and os.path.exists(os.path.join(opt.dataroot, "valA")): self.dir_A = os.path.join(opt.dataroot, "valA") self.dir_B = os.path.join(opt.dataroot, "valB") # self.dir_Seg = os.path.join(opt.dataroot, "valSeg") self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' # self.Seg_paths = sorted(make_dataset(self.dir_Seg, opt.max_dataset_size)) self.A_size = len(self.A_paths) # get the size of dataset A self.B_size = len(self.B_paths) # get the size of dataset B #self.Seg_size = len(self.Seg_paths) classes = [] for path in self.B_paths: classes += [int(path.split("_")[-1][:-4])] self.classes = np.unique(np.array(classes)) print(self.classes) def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index (int) -- a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) -- an image in the input domain B (tensor) -- its corresponding image in the target domain A_paths (str) -- image paths B_paths (str) -- image paths Seg_paths (str) """ A_path = self.A_paths[index % self.A_size] # make sure index is within then range if self.opt.serial_batches: # make sure index is within then range index_B = index % self.B_size else: # randomize the index for domain B to avoid fixed pairs. index_B = random.randint(0, self.B_size - 1) B_path = self.B_paths[index_B] A_img = Image.open(A_path).convert('RGB') B_img = Image.open(B_path).convert('RGB') # Apply image transformation # For FastCUT mode, if in finetuning phase (learning rate is decaying), # do not perform resize-crop data augmentation of CycleGAN. # print('current_epoch', self.current_epoch) is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs modified_opt = util.copyconf(self.opt, no_flip=True, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size) transform = get_transform(modified_opt) A = transform(A_img) B = transform(B_img) """A_tmp = np.round((255 * (np.asarray(A) + 1) / 2)) Seg = np.zeros((256, 256, 1), dtype=np.uint8) Seg[np.where((A_tmp[1, :256, :] >= 90) & (A_tmp[1, :256, :] <= 120))] = [255] Seg = np.repeat(Seg, repeats=3, axis=-1) #A = np.asarray(A) Seg = np.transpose(Seg,[2,0,1]) Seg = np.float32((((Seg / 255.0)*2)-1)) #A = np.concatenate((A, Seg)) #B = np.asarray(B) #B = np.concatenate((B, np.transpose(Seg,[2,0,1]))) from matplotlib import pyplot as plt""" B_class = int(B_path.split("_")[-1][:-4]) B_class = np.eye(len(self.classes), dtype=np.float32)[B_class] return {'A': A, 'B': B, 'B_class': B_class, 'A_paths': A_path, 'B_paths': B_path} def __len__(self): """Return the total number of images in the dataset. As we have two datasets with potentially different number of images, we take a maximum of """ return max(self.A_size, self.B_size)
nilq/baby-python
python
import json import urlparse import sys def handle(req): """handle a request to the function Args: req (str): request body """ sys.stderr.write(req) qs = urlparse.parse_qs(req) if "user_name" in qs: if not qs["user_name"][0] == "slackbot": emoticons = "" msg = qs["text"][0] if "dockercon" in msg: emoticons = ":whale:" elif "serverless" in msg: emoticons = ":openfaas: :+1: :robot_face:" elif "azure" in msg: emoticons = ":cloud:" elif "sofia" in msg: emoticons = ":flag-bg: :flag-bg: :flag-bg:" elif "signup" in msg: emoticons = ":+1:" elif "lucas" in msg: emoitcons = ":flag-de:" ret = { "text": qs["user_name"][0] + " sent a message with a length of... '" + str(len(req)) + "' " + emoticons } return json.dumps(ret) return req
nilq/baby-python
python
import numpy as np import matplotlib.pyplot as plt class PlotDrawer: @staticmethod def draw(mfcc_data): PlotDrawer.__prepare_plot(mfcc_data) plt.show() # plt.close() @staticmethod def save(filename, mfcc_data): PlotDrawer.__prepare_plot(mfcc_data) plt.savefig(filename) plt.close() @staticmethod def __prepare_plot(mfcc_data): ig, ax = plt.subplots() data= np.swapaxes(mfcc_data, 0, 1) cax = ax.imshow(data, interpolation='nearest', origin='lower', aspect='auto') ax.set_title('MFCC') @staticmethod def save_without_frame_energy(filename, mfcc_data): mfcc = PlotDrawer.__remove_energy_from_mfcc(mfcc_data) PlotDrawer.__prepare_plot(mfcc) plt.savefig(filename) plt.close() @staticmethod def __remove_energy_from_mfcc(mfcc_data): new_mfcc = [] for frame_id in range(len(mfcc_data)): new_mfcc.append(mfcc_data[frame_id][1:]) return np.array(new_mfcc, dtype=float)
nilq/baby-python
python
import datetime import io import operator import os import re from zipfile import ZipFile # def make_entry(entry): # if isinstance(entry, Entry): # return entry # mtime = os.path.getmtime(entry) # return Entry(entry, mtime) # handlers = { # ".zip": (lambda x: None) # } # class DirectoryHandler: # def __init__(self, file): # self.file = file # def open(self, file, mode="r"): # raise NotImplementedError() # def namelist(self): # return os.listdir(self.file) # class NestedIO: # """ # File-like capable of opening nested archives. # Parameters: # file: Can be a string, path-like or file-like # root_path: Path of this container # """ # _handlers = {} # @staticmethod # def register_handler(extension, handler): # NestedIO._handlers[extension] = handler # def __init__(self, file, mode="r", root_path=None, parent=None): # print("NestedIO({!r}, {!r}, {!r}, {!r})".format(file, mode, root_path, parent)) # self.fp = None # if isinstance(file, os.PathLike): # file = os.fspath(file) # if isinstance(file, str): # if root_path is not None: # if not os.path.commonpath((file, root_path)) == root_path: # raise ValueError("{} is not below {}.".format(file, root_path)) # rel_path = os.path.relpath(file, root_path) # else: # rel_path = file # # First, see if this "container" is actually a directory # if os.path.isdir(file): # print("{} is a dir.".format(file)) # self.fp = DirectoryHandler(file) # elif os.path.isfile(file): # print("{} is a file.".format(file)) # ext = os.path.splitext(file)[1] # try: # handler = NestedIO._handlers[ext] # except KeyError: # handler = io.open # print("Handler is {!r}.".format(handler)) # self.fp = handler(file, mode=mode) # else: # # Find occurences of container filenames in the path # # ".[ext]/" in file or file.endswith(".[ext]") # match = container_ext_re.search(rel_path) # if match is not None: # # TODO: Eliminate the possibility that this is just a folder with an extension # # (This can be handled implicitely) # ext = match[1] # # Open the path up to the match # parent_path, child_path = rel_path[:match.end(1)], rel_path[match.end(1)+1:] # print(parent_path, child_path) # parent_root_path = os.path.join(root_path, parent_path) if root_path is not None else parent_path # print("Recursion into {}.".format(parent_path)) # parent = NestedIO(parent_path, mode, root_path=parent_root_path, parent=self) # self.fp = parent.open(child_path) # # Easy case (fp is still None): # if self.fp is None: # raise ValueError("{!r} could not be opened.".format(file)) # else: # self.fp = file # self.name = root_path or getattr(file, 'name', None) # def __repr__(self): # return "NestedIO(fp={})".format(self.fp) # def open(self, member): # """ # Open a member. # """ # print("{!r}.open({})...".format(self, member)) # return self.fp.open(member) # def read(self): # pass # def write(self): # pass # def list(self): # """ # List members. # """ # pass # # ZipFile() # class ZipHandler: # def __init__(self, file, mode="r"): # self.file = ZipFile(file, mode) # def namelist(self): # return self.file.namelist() # def open(self, file, mode="r"): # print("ZipHandler.open({})".format(file)) # return self.file.open(file, mode) # NestedIO.register_handler(".zip", ZipHandler) # # ufo = NestedIO() # # with NestedIO("foo.zip") as root: # # with root.open("bar.txt") as f: # # print(f.read()) # # with NestedIO("foo.zip/bar.zip/baz.txt") as f: # # print(f.read()) # # with NestedIO("foo.zip/bar.zip") as f: # # f.read() # Read whole file # # f.list() # List file members # # def recurse(entries): # # working_table = list(entries) # # while working_table: # # entry = working_table.pop() # # ext = os.path.splitext(entry)[0] # # if ext in handlers: # # working_table.extend(handlers[ext](entry)) # # else: # # yield entry # NestedIO("/home/moi/Work/Work.zip/test.zip/test.c") # with ZipFile("/home/moi/Work/zip-test/Work.zip") as z1: # with z1.open("test.zip", "r") as z2: # buffer = io.BytesIO(z2.read()) # with ZipFile(buffer).open("test/test.c") as f: # print(f.read()) # # is equivalent to: # with open("/home/moi/Work/zip-test/Work.zip/test.zip/test/test.c", "r") as f: # print(f.read()) # # Fails because test2.zip is in fact only a directory inside Work.zip # with open("/home/moi/Work/zip-test/Work.zip/test2.zip/bar.txt", "r") as f: # print(f.read()) # with open("/home/moi/Work/zip-test/test2.zip", "r") as f1: # print(f1.members()) # with f1.open("bar.txt", "r") as f2: # print(f2.read()) with open("/home/moi/Work/zip-test") as root: for member in root.imembers(): print(member)
nilq/baby-python
python
# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from designate.objects.record import Record from designate.objects.record import RecordList class SOA(Record): """ SOA Resource Record Type Defined in: RFC1035 """ FIELDS = { 'mname': { 'schema': { 'type': 'string', 'format': 'domainname', 'maxLength': 255, }, 'required': True }, 'rname': { 'schema': { 'type': 'string', 'format': 'domainname', 'maxLength': 255, }, 'required': True }, 'serial': { 'schema': { 'type': 'integer', 'minimum': 1, 'maximum': 4294967295, }, 'required': True }, 'refresh': { 'schema': { 'type': 'integer', 'minimum': 0, 'maximum': 2147483647 }, 'required': True }, 'retry': { 'schema': { 'type': 'integer', 'minimum': 0, 'maximum': 2147483647 }, 'required': True }, 'expire': { 'schema': { 'type': 'integer', 'minimum': 0, 'maximum': 2147483647 }, 'required': True }, 'minimum': { 'schema': { 'type': 'integer', 'minimum': 0, 'maximum': 2147483647 }, 'required': True }, } def _to_string(self): return ("%(mname)s %(rname)s %(serial)s %(refresh)s %(retry)s " "%(expire)s %(minimum)s" % self) def _from_string(self, v): mname, rname, serial, refresh, retry, expire, minimum = v.split(' ') self.mname = mname self.rname = rname self.serial = int(serial) self.refresh = int(refresh) self.retry = int(retry) self.expire = int(expire) self.minimum = int(minimum) # The record type is defined in the RFC. This will be used when the record # is sent by mini-dns. RECORD_TYPE = 6 class SOAList(RecordList): LIST_ITEM_TYPE = SOA
nilq/baby-python
python
#!/usr/bin/python import random import math import shelve import os #initialize constants STARTING_AMOUNT = 1000 MAX_BETS = 10000 #ROUND_LIMIT = 500000 ROUND_LIMIT = 5000 #whether or not to give verbose terminal output for each round of gambling #note: with short rounds, terminal output becomes a significant bottleneck # also, with short rounds, the information flashes on the screen too quickly # to be of use. VERBOSE = False #whether or not to graph each round #note: this can use up a lot of memory if MAX_BETS is a very high number GRAPHING = False #matplotlib is needed only if we are graphing each round if GRAPHING: import matplotlib.pyplot as plt #the fibonacci strategy uses the fibonacci sequence, so calculate it if VERBOSE: print 'calculating fibonacci sequence' #the first two numbers are both ones fibonacci_sequence = [1, 1] for i in range(500): #500 should be a safe place which no gambler will reach #each term is the two previous terms added together, a + b a = fibonacci_sequence[-2] b = fibonacci_sequence[-1] fibonacci_sequence.append(a + b) if VERBOSE: print 'done calculating fibonacci sequence' class Round: """Stores the variables of a round of gambling""" def __init__(self, strategy, max_amount, wins, losses, turns_lasted, losing_streaks, starting_bet, starting_amount, end_amount): self.strategy = strategy self.max_amount = max_amount self.wins = wins self.losses = losses self.turns_lasted = turns_lasted self.losing_streaks = losing_streaks self.starting_bet = starting_bet self.starting_amount = starting_amount self.end_amount = end_amount class Gambler: """Simulates one round of gambling. The update_bet method is overriden by different strategies""" money = STARTING_AMOUNT starting_amount = money bet = 1 # 1 betting unit starting_bet = 1 # used by some strategies strategy = 'flat' # the default update_bet strategy is flat betting def update_bet(self, win): #in flat betting, bet does not change pass def gamble(self): #output to terminal the strategy used and the round number print 'gambling, using ' + self.strategy + ', round number ' + \ str(len(rounds_list) + 1) bet_number = 0 max_money = 0 wins = 0 losses = 0 #this class also handles graphing and updating the graph's index if GRAPHING: global figindex #if graphing, the money the gambler has is stored in this list #after each bet, which is graphed at the end moneyovertime = [] unfortunate_losing_streaks = 0 i = 0 while i < MAX_BETS: i += 1 if GRAPHING: #append current money amount to moneyovertime #this is used only for graphing moneyovertime.append(self.money) #if gambler is out of money, end early if self.money == 0: break #track the maximum money achieved #if current money is higher, increase the max_money accordingly if self.money > max_money: max_money = self.money #gambler can't bet more than he has, can he? if self.bet > self.money: #if he is trying to, just make him bet all of his money self.bet = self.money #unfortunate losing streak: each time gambler bets all money unfortunate_losing_streaks += 1 #there is 50% chance of winning; flip a coin win = random.getrandbits(1) if win: #gambler has won! track the number of wins wins += 1 #and give him his money self.money += self.bet else: #gambler has lost! track the number of losses losses += 1 #and take money from him self.money -= self.bet #finally, update the gambler's bet based on if he won self.update_bet(win) #bet must always be over 0, if not, there is an error assert self.bet > 0 if VERBOSE: #lots of terminal output of verbose mode is on print "WINS=", wins print "LOSSES=", losses print "MAX=", max_money print "TURNS LASTED=", i print "UNFORTUNATE LOSING STREAKS=", unfortunate_losing_streaks print 'END AMOUNT=', self.money #add the tracked data to the rounds list rounds_list.append( Round(self.strategy, max_money, wins, losses, i, unfortunate_losing_streaks, gambler.starting_bet, self.starting_amount, self.money) ) if GRAPHING: #if graphing, plot the graph of moneyovertime print 'plotting the graph...' plt.plot(moneyovertime) #money is the Y variable plt.ylabel("Money") #number of gambles is the X variable plt.xlabel("Gambles") #the graph goes from 0 to the maximum money achieved plt.ylim(0,max_money) #finally, save the graph plt.savefig(graph_dir + str(figindex)) #increase the index of the graph figindex += 1 #clear the current figure plt.clf() print 'done\n' class FibonacciGambler(Gambler): fib_position = 0 strategy = 'fibonacci' def update_bet(self, win): if win: self.fib_position = max(self.fib_position - 2, 0) else: self.fib_position += 1 self.bet = fibonacci_sequence[self.fib_position] class ProgressiveFibonacciGambler(Gambler): fib_position = 0 strategy = 'progressive fibonacci' def update_bet(self, win): if win: self.fib_position += 1 else: self.fib_position = max(self.fib_position - 2, 0) self.bet = fibonacci_sequence[self.fib_position] class Doubler(Gambler): strategy = 'doubling' def update_bet(self, win): if win: self.bet = self.starting_bet else: self.bet = self.bet * 2 class ProgressiveDoubler(Gambler): strategy = 'progressive doubling' def update_bet(self, win): if win: self.bet = self.bet * 2 else: self.bet = self.starting_bet class Tripler(Gambler): strategy = 'tripling' def update_bet(self, win): if win: self.bet = self.starting_bet else: self.bet = self.bet * 3 class ProgressiveTripler(Gambler): strategy = 'progressive tripling' def update_bet(self, win): if win: self.bet = self.bet * 3 else: self.bet = self.starting_bet class OscarGrinder(Gambler): strategy = 'Oscar\'s Grind' goal = STARTING_AMOUNT + 1 def update_bet(self, win): if self.money == self.goal: self.goal = self.money + 1 if win: self.bet = self.bet + 1 if self.bet + self.money > self.goal: #rule 1: always drop bet just large enough to gain one unit self.bet = self.goal - self.money #dictionary with strategies as keys and their respective gamblers as values gamblers = { 'flat' : Gambler, 'fibonacci' : FibonacciGambler, 'progressive fibonacci' : ProgressiveFibonacciGambler, 'doubling' : Doubler, 'progressive doubling' : ProgressiveDoubler, 'tripling' : Tripler, 'progressive tripling' : ProgressiveTripler, 'Oscar\'s Grind' : OscarGrinder, } strategies = [ 'flat', 'fibonacci', 'progressive fibonacci', 'doubling', 'progressive doubling', 'tripling', 'progressive tripling', "Oscar's Grind", ] if __name__ == '__main__': #the keys of gamblers contain each strategy, so use them for strategy in strategies: print '\n', 'preparing to gamble using', strategy #if graphing, get the directory to store the graphs in #this is in data/[strategy]/graphs if GRAPHING: graph_dir = "data/" + strategy + "/graphs/" figindex = 0 #don't overwrite graphs already in the graph directory figindex += len(os.listdir(graph_dir)) #load the shelve databases with previous experimentation data try: print 'loading data file...' data_file = shelve.open("data/" + strategy + "/data.db") except: #if we can't load the file, make a new one print 'cannot load data file: creating new one' data_file = shelve.open("data/" + strategy + "/data.db", 'n') break try: #try to load the rounds from the shelve database rounds_list = data_file['rounds'] except KeyError: #if the database has no data, create a new rounds list #and add it in later print 'unable to load data' rounds_list = [] print 'done loading data file' #only simulate and save rounds if we need to if len(rounds_list) < ROUND_LIMIT: #now, simulate gambling rounds until we get to ROUND_LIMIT while len(rounds_list) < ROUND_LIMIT: #initialize a new gambler from the class for the strategy gambler = gamblers[strategy]() try: gambler.gamble() except KeyboardInterrupt: #if the user hits Ctrl+C, quit gambling with this strategy print 'stopping' break #finally, put all of the experiment data into the shelve database print 'saving data...' data_file['rounds'] = rounds_list data_file.close() print 'data saved'
nilq/baby-python
python
# Copyright 2016 Ericsson AB. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (c) 2017 Wind River Systems, Inc. # import re from testtools import matchers from dcmanagerclient.tests import base_shell_test as base class TestCLIBashCompletionV1(base.BaseShellTests): def test_bash_completion(self): bash_completion, stderr = self.shell('bash-completion') self.assertIn('bash-completion', bash_completion) self.assertFalse(stderr) class TestCLIHelp(base.BaseShellTests): def test_help(self): required = [ '.*?^usage: ', '.*?^\s+help\s+print detailed help for another command' ] kb_help, stderr = self.shell('help') for r in required: self.assertThat((kb_help + stderr), matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
nilq/baby-python
python
import mfclient import mf_connect import csv import time projSummary = "MFProjectSummary" storeSummary = "MFStoreSummary" timestr = time.strftime("%Y%m%d-%H%M%S") with open(projSummary+timestr+".csv", 'ab') as f: header = ["project","allocation","usage"] writer = csv.writer(f) writer.writerow(header) f.close() with open(storeSummary+timestr+".csv", 'ab') as f: header = ["Store","Size","Used","Free"] writer = csv.writer(f) writer.writerow(header) f.close() # Create mediaflux connection cxn = mf_connect.connect() try: projsList = cxn.execute("vicnode.project.list") print projsList for proj in projsList: if proj.value() == "proj-cryoem_instrument_data-1128.4.51": namespace = "/projects/cryo-em/" + proj.value() projDetailsQuery = mfclient.XmlStringWriter('args') projDetailsQuery.add("namespace", namespace) projDetails = cxn.execute("asset.namespace.describe", projDetailsQuery.doc_text()) allocation = projDetails.element("namespace/quota/inherited/allocation") usage = projDetails.element("namespace/quota/inherited/used") else: namespace = "/projects/"+proj.value() projDetailsQuery = mfclient.XmlStringWriter('args') projDetailsQuery.add("namespace", namespace) projDetails = cxn.execute("asset.namespace.describe", projDetailsQuery.doc_text()) allocation = projDetails.element("namespace/quota/allocation") usage = projDetails.element("namespace/quota/used") print namespace # projDetailsQuery = mfclient.XmlStringWriter('args') # projDetailsQuery.add("namespace",namespace) # projDetails = cxn.execute("asset.namespace.describe",projDetailsQuery.doc_text()) print projDetails # allocation = projDetails.element("namespace/quota/allocation") # usage = projDetails.element("namespace/quota/used") # Build new line for CSV results file # Format project, allocation, used fields = [proj.value(), allocation.value(), usage.value()] # Write results to file with open(projSummary+timestr+".csv", 'ab') as f: writer = csv.writer(f) writer.writerow(fields) f.close() storesList = cxn.execute("asset.store.list") for store in storesList: print store name = store.value("name") print name w = mfclient.XmlStringWriter('args') w.add("name", name) storeDeets = cxn.execute("asset.store.describe",w.doc_text()) storeTotal = storeDeets.value("store/mount/max-size") storeUsed = storeDeets.value("store/mount/size") storeFree = storeDeets.value("store/mount/free") storeFields = [name, storeTotal, storeUsed, storeFree] with open(storeSummary + timestr + ".csv", 'ab') as f: writer = csv.writer(f) writer.writerow(storeFields) f.close() finally: cxn.close()
nilq/baby-python
python
from flask import jsonify, request, abort, Blueprint from ..auth0 import auth from ..auth0.authManagementAPI import * from datetime import * from ..db.models import * api = Blueprint('api', __name__) # build Auth0 Management API builder = Auth0ManagementAPIBuilder() auth0api = builder.load_base_url(). \ load_access_token(). \ load_users(). \ load_roles(). \ build() @api.route('/', methods=['GET']) @api.route('/login-results', methods=['GET']) def index(): # quick verification response to verify # that the server is active return jsonify({'success': True}) @api.route('/visits/create', methods=['POST']) @auth.requires_auth(permission='post:visits') def create_visit(payload): body = request.get_json() try: visit = Visit(nurse_id=body.get('nurse_id'), patient_id=body.get('patient_id'), visit_time=datetime.now()) # fetching names also verifies whether the user # id exist in auth0 names = auth0api.get_user_name([visit.nurse_id, visit.patient_id]) visit.insert() selection = Visit.query.get(visit.id) result = selection.long_format(names[0], names[1]) except exc.SQLAlchemyError: visit.reset() abort(422) except: abort(422) return jsonify({'success': True, 'data': result}) @api.route('/visits/<int:a_id>', methods=['PATCH']) @auth.requires_auth(permission='patch:visits') def update_visit(payload, a_id): body = request.get_json() try: visit = Visit.query.get(a_id) assert visit is not None, f'visit record not found {a_id}' if 'patient_id' in body: visit.patient_id = body.get('patient_id') if 'nurse_id' in body: visit.nurse_id = body.get('nurse_id') # fetching names also verifies whether the user # id exist in auth0 names = auth0api.get_user_name([visit.nurse_id, visit.patient_id]) visit.update() selection = Visit.query.get(a_id) result = selection.long_format(names[0], names[1]) except exc.SQLAlchemyError: visit.reset() abort(422) except: abort(422) return jsonify({'success': True, 'data': result}) @api.route('/visits/<int:a_id>', methods=['DELETE']) @auth.requires_auth(permission='delete:visits') def delete_visit(payload, a_id): try: visit = Visit.query.get(a_id) assert visit is not None, f'visit record not found {a_id}' visit.delete() except exc.SQLAlchemyError: visit.reset() abort(422) except: abort(422) return jsonify({'success': True, 'visit_id': a_id}) @api.route('/vital-signs/create', methods=['POST']) @auth.requires_auth(permission='post:vital-signs') def create_vitalsign(payload): body = request.get_json() try: vitalsign = VitalSign(visit_id=body.get('visit_id'), tempCelsius=body.get('tempCelsius')) vitalsign.insert() selection = VitalSign.query.get(vitalsign.id) result = selection.short_format() except exc.SQLAlchemyError: vitalsign.reset() abort(422) except: abort(422) return jsonify({'success': True, 'data': result}) @api.route('/vital-signs/<int:a_id>', methods=['PATCH']) @auth.requires_auth(permission='patch:vital-signs') def update_vitalsign(payload, a_id): body = request.get_json() try: vitalsign = VitalSign.query.get(a_id) assert vitalsign is not None, f'vital sign record not found {a_id}' if 'visit_id' in body: vitalsign.visit_id = body.get('visit_id') if 'tempCelsius' in body: vitalsign.tempCelsius = body.get('tempCelsius') vitalsign.update() selection = VitalSign.query.get(a_id) result = selection.short_format() except exc.SQLAlchemyError: vitalsign.reset() abort(422) except: abort(422) return jsonify({'success': True, 'data': result}) @api.route('/vital-signs/<int:a_id>', methods=['DELETE']) @auth.requires_auth(permission='delete:vital-signs') def delete_vitalsigns(payload, a_id): try: vitalsign = VitalSign.query.get(a_id) assert vitalsign is not None, f'vital sign record not found {a_id}' vitalsign.delete() except exc.SQLAlchemyError: vitalsign.reset() abort(422) except: abort(422) return jsonify({'success': True, 'vitalsign_id': a_id}) @api.route('/patients/search', methods=['GET']) @auth.requires_auth(permission='read:patient-data') def search_patient(payload): body = request.get_json() try: patient_id = body.get('patient_id') visits = Visit.query.filter_by(patient_id=patient_id).all() assert visits != [], \ f'no patients found in visit record with id: {patient_id}' result = format_visit_and_vital_sign_data(visits) except exc.SQLAlchemyError: visits.reset() abort(404) except: abort(404) return jsonify({'success': True, 'data': result}) @api.route('/patients/search/user', methods=['GET']) @auth.requires_auth(permission='read:restrictive-patient-data') def get_user_patient_record(payload): try: # use decoded payload data to get patient id (active user) patient_id = payload.get('sub') visits = Visit.query.filter_by(patient_id=patient_id).all() assert visits != [], \ f'no patients found in visit record with id: {patient_id}' result = format_visit_and_vital_sign_data(visits) except exc.SQLAlchemyError: visits.reset() abort(404) except: abort(404) return jsonify({'success': True, 'data': result}) ''' Packages visits and vital sign data INPUT: visits [list] : list of visit objects from Visit class OUTPUT: result [list] : Reformatted data ''' def format_visit_and_vital_sign_data(visits): result = [] for visit in visits: names = auth0api.get_user_name([visit.nurse_id, visit.patient_id]) visit_format = visit.long_format(names[0], names[1]) if not visit.vitalsigns: # no vital signs have been documented in this visit vitalsign_format = [] else: vitalsign_format = visit.vitalsigns[0].short_format() element = {"visit": visit_format, "vitalSign": vitalsign_format} result.append(element) return result @api.errorhandler(400) def bad_request(error): return jsonify({ "success": False, "error": 400, "message": "Bad Request" }), 400 @api.errorhandler(401) def unauthorized(error): return jsonify({ "success": False, "error": 401, "message": "Unauthorized" }), 401 @api.errorhandler(404) def not_found(error): return jsonify({ "success": False, "error": 404, "message": "Not Found" }), 404 @api.errorhandler(422) def unprocessable(error): return jsonify({ "success": False, "error": 422, "message": "Unprocessable" }), 422 @api.errorhandler(auth.AuthError) def handle_auth_error(ex): response = jsonify(ex.error) response.status_code = ex.status_code return response
nilq/baby-python
python
from _collections import deque def solution(): people = deque() while True: name = input() if name == 'End': print(f'{len(people)} people remaining.') break elif name == 'Paid': while people: popped_person = people.popleft() print(popped_person) else: people.append(name) solution()
nilq/baby-python
python
from numpywren import compiler, frontend, exceptions import unittest import astor import ast import inspect def F1(a: int, b: int) -> int: return a//b def F2(a: float, b: int) -> float: return a + b def F3(a: float, b: int) -> float: c = a + b d = log(c) e = ceiling(d) return c def F4(a: float, b: int) -> float: c = a + b d = log(c) if (c > d): e = log(d) else: e = d return e def F5(a: float, b: int) -> float: c = a + b d = log(c) e = d**c if (c > d): f = e else: f = d return f def F6(a: float, b: int, c: int) -> float: return ((a + b) * (b**a))/floor(c) def F7_err(N: int, M: int) -> float: c = a + b d = log(c) e = d**c if (c > d): f = c g = e else: f = e return f def F7_no_err(a: int, b: int) -> float: c = a + b d = log(c) e = d**c if (c > d): f = d else: f = e return d def F8(N: int, M: int): for i in range(N): for j in range(i+1, M): if (i < j/2): z = i + j else: z = i - j def F9(N: int, M: int): for i in range(N): for j in range(i+1, M): if (i < j/2): if (j > log(M)): z = i + j else: z = 2*i+4*j else: z = i - j class FrontEndTest(unittest.TestCase): def test_types_simple(self): parser, type_checker, f2_ast = compiler.lpcompile(F2) tree = astor.dump_tree(f2_ast) assert type_checker.decl_types['a'] == frontend.ConstFloatType assert type_checker.decl_types['b'] == frontend.ConstIntType def test_types_simple_2(self): parser, type_checker, f3_ast = compiler.lpcompile(F3) tree = astor.dump_tree(f3_ast) assert type_checker.decl_types['c'] == frontend.ConstFloatType assert type_checker.decl_types['d'] == frontend.ConstFloatType assert type_checker.decl_types['e'] == frontend.ConstIntType def test_types_simple_if(self): parser, type_checker, f_ast = compiler.lpcompile(F4) tree = astor.dump_tree(f_ast) assert type_checker.decl_types['c'] == frontend.ConstFloatType assert type_checker.decl_types['d'] == frontend.ConstFloatType assert type_checker.decl_types['e'] == frontend.ConstFloatType def test_types_compound_expr_3(self): parser, type_checker, f_ast = compiler.lpcompile(F6) assert type_checker.return_node_type == frontend.ConstFloatType def test_types_if_statement_err(self): try: parser, type_checker, f_ast = compiler.lpcompile(F7_err) except exceptions.LambdaPackParsingException: pass def test_types_if_statement_no_err(self): parser, type_checker, f_ast = compiler.lpcompile(F7_no_err) assert type_checker.decl_types['f'] == frontend.ConstFloatType assert type_checker.decl_types['d'] == frontend.ConstFloatType def test_types_for_loop_if_statment(self): parser, type_checker, f_ast = compiler.lpcompile(F8) assert type_checker.decl_types['z'] == frontend.LinearIntType assert type_checker.decl_types['i'] == frontend.LinearIntType def test_types_for_loop_nested_if_statment(self): parser, type_checker, f_ast = compiler.lpcompile(F9) assert type_checker.decl_types['z'] == frontend.LinearIntType assert type_checker.decl_types['i'] == frontend.LinearIntType
nilq/baby-python
python
from datetime import date from new_movies import movies_directory from new_movies.configuration import UNLIMITED_WATCHING_START_DATE, UNLIMITED_WATCHING_END_DATE from new_movies.exceptions import NoCreditsForMovieRent, MovieNotFound, ViewsLimitReached from new_movies.movie import Movie from new_movies.rented_movie import RentedMovie def rent_movie(user, movie): if user.credits_left < 1: raise NoCreditsForMovieRent() user.rented_movies.append(RentedMovie(movie)) user.credits_left -= 1 def watch_movie(user, movie): rented_movie = _get_rented_movie(user, movie) if not rented_movie: raise MovieNotFound() if _unlimited_watching_promo(): _watch_movie_during_unlimited_promo(user, rented_movie) else: _watch_movie_during_standard_period(user, rented_movie) def _get_rented_movie(user, movie): for rented_movie in user.rented_movies: if rented_movie.movie == movie: return rented_movie def _unlimited_watching_promo(): return UNLIMITED_WATCHING_START_DATE <= date.today() <= UNLIMITED_WATCHING_END_DATE def _watch_movie_during_unlimited_promo(user, rented_movie): _start_streaming(user, rented_movie.movie) def _watch_movie_during_standard_period(user, rented_movie): if rented_movie.views_left < 1: raise ViewsLimitReached() rented_movie.views_left -= 1 _start_streaming(user, rented_movie.movie) def _start_streaming(user, movie): datetime_format = user.datetime_preferences.value print(f"User: {user} is watching {movie.info_with_date_format(datetime_format)}") def add_movie(): print("Adding new movie") print("Provide movie's data") name = input("Title: ") category = input("Category: ") release_date_input = input("Release date (in YYYY-MM-DD format): ") release_date = date.fromisoformat(release_date_input) new_movie = Movie(name, category, release_date) movies_directory.add_movie(new_movie)
nilq/baby-python
python
################################################################################ # # Implementation of angular additive margin softmax loss. # # Adapted from: https://github.com/clovaai/voxceleb_trainer/blob/master/loss/aamsoftmax.py # # Author(s): Nik Vaessen ################################################################################ import torch import torch as t import torch.nn as nn import torch.nn.functional as F import math ################################################################################ # wrap around aam-loss implementation class AngularAdditiveMarginSoftMaxLoss(t.nn.Module): def __init__( self, margin: float = 0.2, scale: float = 30, ): super(AngularAdditiveMarginSoftMaxLoss, self).__init__() self.margin = margin self.scale = scale self.ce = nn.CrossEntropyLoss() # self.easy_margin = easy_margin self.cos_m = math.cos(self.margin) self.sin_m = math.sin(self.margin) # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] self.th = math.cos(math.pi - self.margin) self.mm = math.sin(math.pi - self.margin) * self.margin def forward(self, input_tensor: t.Tensor, speaker_labels: t.Tensor): assert input_tensor.size()[0] == speaker_labels.size()[0] # cos(theta) cosine = input_tensor # cos(theta + m) sine = torch.sqrt((1.0 - torch.mul(cosine, cosine)).clamp(0, 1)) phi = cosine * self.cos_m - sine * self.sin_m phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) one_hot = torch.zeros_like(cosine) one_hot.scatter_(1, speaker_labels.view(-1, 1), 1) output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output = output * self.scale loss = self.ce(output, speaker_labels) prediction = F.softmax(output, dim=1) return loss, prediction
nilq/baby-python
python
#!/usr/bin/env python import os import sqlite3 from datetime import datetime from getpass import getuser from bashhistory import db_connection class SQL: COLUMNS = [ "command", "at", "host", "pwd", "user", "exit_code", "pid", "sequence", ] CREATE_COMMANDS: str = """ DROP TABLE IF EXISTS commands ; CREATE TABLE commands ( command TEXT NOT NULL, at TIMESTAMP NOT NULL, host TEXT NOT NULL, pwd TEXT NOT NULL, user TEXT NOT NULL, exit_code INTEGER, pid INTEGER, sequence INTEGER ) ; CREATE INDEX commands_at ON commands (at) ; CREATE INDEX commands_pwd ON commands (pwd) ; CREATE INDEX commands_exit_code ON commands (exit_code) ; """ INSERT_COMMAND: str = """ INSERT INTO commands(command, at, host, pwd, user, exit_code, pid, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?); """ def create_db(): db_conn = db_connection.connect(create_if_missing=False) db_conn.executescript(SQL.CREATE_COMMANDS) db_conn.commit() db_conn.close() def insert_command( command: str, at: datetime = None, host: str = None, pwd: str = None, user: str = None, exit_code: int = None, pid: int = None, sequence: int = None, db_conn: sqlite3.Connection = None, commit: bool = True, ): if not at: at = datetime.utcnow() if not host: host = os.uname()[1] if not pwd: pwd = os.getcwd() if not user: user = getuser() close_after = False if not db_conn: close_after = True db_conn = db_connection.connect() db_conn.cursor().execute(SQL.INSERT_COMMAND, [ command, at.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], host, pwd, user, exit_code, pid, sequence, ]).close() if close_after: db_connection.close(db_conn, commit=True) elif commit: db_conn.commit()
nilq/baby-python
python
import numpy as np import pandas as pd def print_matrix(matrix): pd.set_option('display.max_rows', len(matrix)) print() print(matrix) def normalize(matrix): return matrix.div(matrix.sum(axis=1), axis=0) def generate_matrix(data): key_set = set(data.keys()) for edges in data.values(): keys = edges.keys() key_set.update(keys) all_keys = sorted(list(key_set)) for key in all_keys: if key not in data: data[key] = {key: 1} matrix_list = [] for key in all_keys: edges = data[key] row = [] # sum_of_row = sum(edges.values()) for key2 in all_keys: # value = Fraction(edges.get(key2, 0), sum_of_row) value = edges.get(key2, 0) row.append(value) matrix_list.append(row) matrix = pd.DataFrame( data=matrix_list, index=all_keys, columns=all_keys, ) result = normalize(matrix).astype('float') return result def find_absorbing_rows(matrix): result = [] for index, row in enumerate(matrix.iterrows()): values = row[1].values if values[index] == 1: result.append(row[0]) return result def sort_states(matrix): all_states = list(matrix.index.values) absorbing = find_absorbing_rows(matrix) transition = [name for name in all_states if name not in absorbing] return transition, absorbing def sort_matrix(matrix): # sort the matrix transition, absorbing = sort_states(matrix) sorted_states = transition + absorbing sorted_matrix = matrix.reindex(index=sorted_states, columns=sorted_states) return sorted_matrix def decompose(matrix): # sort the matrix transition, absorbing = sort_states(matrix) sorted_states = transition + absorbing sorted_matrix = matrix.reindex(index=sorted_states, columns=sorted_states) matrix_size = len(matrix) t_size = len(transition) q_matrix = sorted_matrix.iloc[0:t_size, 0:t_size] r_matrix = sorted_matrix.iloc[0:t_size, t_size:matrix_size] return q_matrix, r_matrix # result = calculate_b(drunk_walk_example) def get_steady_state(matrix): q, r = decompose(matrix) i = np.identity(len(q)) q = q.mul(-1) q = q.add(i) v = np.linalg.inv(q) result = np.matmul(v, r) return result
nilq/baby-python
python
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse class NegativeTargets(Client): """Amazon Advertising API for Sponsored Display Documentation: https://advertising.amazon.com/API/docs/en-us/sponsored-display/3-0/openapi#/Negative%20targeting This API enables programmatic access for campaign creation, management, and reporting for Sponsored Display campaigns. For more information on the functionality, see the `Sponsored Display Support Center <https://advertising.amazon.com/help#GTPPHE6RAWC2C4LZ>`_ . For API onboarding information, see the `account setup <https://advertising.amazon.com/API/docs/en-us/setting-up/account-setup>`_ topic. This specification is available for download from the `Advertising API developer portal <https://d3a0d0y2hgofx6.cloudfront.net/openapi/en-us/sponsored-display/3-0/openapi.yaml>`_. """ @sp_endpoint('/sd/negativeTargets', method='GET') def list_negative_targets(self, **kwargs) -> ApiResponse: r""" list_negative_targets(self, \*\*kwargs) -> ApiResponse Gets a list of negative targeting clauses filtered by specified criteria. | query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0 | query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size. | query **stateFilter**:*string* | Optional. The returned array is filtered to include only ad groups with state set to one of the values in the specified comma-delimited list. Available values : enabled, paused, archived, enabled, paused, enabled, archived, paused, archived, enabled, paused, archived Default value : enabled, paused, archived. | query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers. | query **adGroupIdFilter**:*string* | Optional. Restricts results to keywords associated with ad groups specified by identifier in the comma-delimited list. | query **targetIdFilter**:*string* | Optional. A comma-delimited list of target identifiers. Missing in official Amazon documentation Returns: ApiResponse """ return self._request(kwargs.pop('path'), params=kwargs) @sp_endpoint('/sd/negativeTargets', method='PUT') def edit_negative_targets(self, **kwargs) -> ApiResponse: r""" Updates one or more negative targeting clauses. Negative targeting clauses are identified using their targetId. The mutable field is state. Maximum length of the array is 100 objects. body: | UpdateNegativeTargetingClause REQUIRED {'description': 'A list of up to 100 negative targeting clauses. Note that the only mutable field is state.}' | '**state**': *number*, {'description': 'The resource state. [ enabled, paused, archived ]'} | '**targetId***': *integer($int64)*, {'description': 'The identifier of the TargetId.'} Returns: ApiResponse """ return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs) @sp_endpoint('/sd/negativeTargets', method='POST') def create_negative_targets(self, **kwargs) -> ApiResponse: r""" create_products_targets(self, \*\*kwargs) -> ApiResponse: Creates one or more targeting expressions. body: | REQUIRED {'description': 'An array of asins objects.}' | '**state**': *number*, {'description': 'The current resource state. [ enabled, paused, archived ]'} | '**adGroupId**': *number*, {'description': 'The identifier of the ad group to which this negative target is associated.'} | '**expression**' | '**type**': *string*, {'description': 'The intent type. See the targeting topic in the Amazon Advertising support center for more information.', 'enum': '[ asinSameAs, asinBrandSameAs ]'} | '**value**': *string*, {'description': 'The value to be negatively targeted. Used only in manual expressions.'} | '**expressionType**': *string*, {'description': '[ auto, manual ]'} Returns: ApiResponse """ return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs) @sp_endpoint('/sd/negativeTargets/{}', method='GET') def get_negative_target(self, targetId, **kwargs) -> ApiResponse: r""" This call returns the minimal set of negative targeting clause fields, but is more efficient than getNegativeTargetsEx. Get a negative targeting clause specified by identifier. path **negativeTargetId**:*integer* | Required. The negative targeting clause identifier. Returns: ApiResponse """ return self._request(fill_query_params(kwargs.pop('path'), targetId), params=kwargs) @sp_endpoint('/sd/negativeTargets/{}', method='DELETE') def delete_negative_targets(self, targetId, **kwargs) -> ApiResponse: r""" Equivalent to using the updateNegativeTargetingClauses operation to set the state property of a targeting clause to archived. See Developer Notes for more information. Archives a negative targeting clause. path **negativeTargetId**:*integer* | Required. The negative targeting clause identifier. Returns: ApiResponse """ return self._request(fill_query_params(kwargs.pop('path'), targetId), params=kwargs) @sp_endpoint('/sd/negativeTargets/extended', method='GET') def list_negative_targets_extended(self, **kwargs) -> ApiResponse: r""" Gets an array of NegativeTargetingClauseEx objects for a set of requested negative targets. Note that this call returns the full set of negative targeting clause extended fields, but is less efficient than getNegativeTargets. | query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0 | query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size. | query **stateFilter**:*string* | Optional. The returned array is filtered to include only ad groups with state set to one of the values in the specified comma-delimited list. Available values : enabled, paused, archived, enabled, paused, enabled, archived, paused, archived, enabled, paused, archived Default value : enabled, paused, archived. | query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers. | query **adGroupIdFilter**:*string* | Optional. Restricts results to keywords associated with ad groups specified by identifier in the comma-delimited list. | query **targetIdFilter**:*string* | Optional. A comma-delimited list of target identifiers. Missing in official Amazon documentation Returns: ApiResponse """ return self._request(kwargs.pop('path'), params=kwargs) @sp_endpoint('/sd/negativeTargets/extended/{}', method='GET') def get_negative_target_extended(self, targetId, **kwargs) -> ApiResponse: r""" Gets a negative targeting clause with extended fields. Note that this call returns the full set of negative targeting clause extended fields, but is less efficient than getNegativeTarget. path **negativeTargetId**:*integer* | Required. The negative targeting clause identifier. Returns: ApiResponse """ return self._request(fill_query_params(kwargs.pop('path'), targetId), params=kwargs)
nilq/baby-python
python
from vpython import * scene.title = "VPython: Draw a sphere" sphere() # using defaults #see http://www.vpython.org/contents/docs/defaults.html
nilq/baby-python
python
# -*- coding: utf-8 -*- import requests #resp = requests.post("http://localhost:5000/predict", json={"raw_text":"how do you stop war?"}) # resp_prod = requests.post("http://213.159.215.173:5000/get_summary", json={"raw_text":"A significant number of executives from 151 financial institutions in 33 countries say that within the next two years they expect to become mass adopters of AI and expect AI to become an essential business driver across the financial industry."}) resp_prod = requests.post("http://35.202.164.44:5000/get_summary", json={"raw_text":"A significant number of executives from 151 financial institutions in 33 countries say that within the next two years they expect to become mass adopters of AI and expect AI to become an essential business driver across the financial industry."}) #print(resp.json()) #print(str(resp)) print('prod:', resp_prod.json()) print('prod:', str(resp_prod))
nilq/baby-python
python
# Generated by Django 3.2.6 on 2021-09-01 20:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('produtos', '0004_ajuste_produtos'), ] operations = [ migrations.CreateModel( name='Fornecedor', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=100)), ('email', models.CharField(max_length=100)), ('ramo', models.CharField(blank=False, max_length=50, null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='TelefoneFornecedor', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('telefone', models.CharField(max_length=20)), ('fornecedor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='produtos.fornecedor')), ], ), ]
nilq/baby-python
python
import os dirpath = os.pardir import sys sys.path.append(dirpath) import torch.utils.model_zoo as model_zoo from torch.autograd import Variable from torch.optim import lr_scheduler import resnet_epi_fcr import resnet_vanilla import resnet_SNR import resnet_se from common.data_reader import BatchImageGenerator from common.utils import * class ModelAggregate: def __init__(self, flags): torch.set_default_tensor_type('torch.cuda.FloatTensor') self.setup(flags) self.setup_path(flags) self.configure(flags) def setup(self, flags): torch.backends.cudnn.deterministic = flags.deterministic print('torch.backends.cudnn.deterministic:', torch.backends.cudnn.deterministic) fix_all_seed(flags.seed) self.network = resnet_vanilla.resnet18(pretrained=False, num_classes=flags.num_classes) self.network = self.network.cuda() print(self.network) print('flags:', flags) if not os.path.exists(flags.logs): os.makedirs(flags.logs) flags_log = os.path.join(flags.logs, 'flags_log.txt') write_log(flags, flags_log) self.load_state_dict(flags, self.network) def setup_path(self, flags): root_folder = flags.data_root train_data = ['art_painting_train.hdf5', 'cartoon_train.hdf5', 'photo_train.hdf5', 'sketch_train.hdf5'] val_data = ['art_painting_val.hdf5', 'cartoon_val.hdf5', 'photo_val.hdf5', 'sketch_val.hdf5'] test_data = ['art_painting_test.hdf5', 'cartoon_test.hdf5', 'photo_test.hdf5', 'sketch_test.hdf5'] self.train_paths = [] for data in train_data: path = os.path.join(root_folder, data) self.train_paths.append(path) self.val_paths = [] for data in val_data: path = os.path.join(root_folder, data) self.val_paths.append(path) unseen_index = flags.unseen_index self.unseen_data_path = os.path.join(root_folder, test_data[unseen_index]) self.train_paths.remove(self.train_paths[unseen_index]) self.val_paths.remove(self.val_paths[unseen_index]) if not os.path.exists(flags.logs): os.makedirs(flags.logs) flags_log = os.path.join(flags.logs, 'path_log.txt') write_log(str(self.train_paths), flags_log) write_log(str(self.val_paths), flags_log) write_log(str(self.unseen_data_path), flags_log) self.batImageGenTrains = [] for train_path in self.train_paths: batImageGenTrain = BatchImageGenerator(flags=flags, file_path=train_path, stage='train', b_unfold_label=False) self.batImageGenTrains.append(batImageGenTrain) self.batImageGenVals = [] for val_path in self.val_paths: batImageGenVal = BatchImageGenerator(flags=flags, file_path=val_path, stage='val', b_unfold_label=False) self.batImageGenVals.append(batImageGenVal) self.batImageGenTest = BatchImageGenerator(flags=flags, file_path=self.unseen_data_path, stage='test', b_unfold_label=False) def load_state_dict(self, flags, nn): if flags.state_dict: try: tmp = torch.load(flags.state_dict) if 'state' in tmp.keys(): pretrained_dict = tmp['state'] else: pretrained_dict = tmp except: pretrained_dict = model_zoo.load_url(flags.state_dict) model_dict = nn.state_dict() # 1. filter out unnecessary keys pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and v.size() == model_dict[k].size()} print('model dict keys:', len(model_dict.keys()), 'pretrained keys:', len(pretrained_dict.keys())) print('model dict keys:', model_dict.keys(), 'pretrained keys:', pretrained_dict.keys()) # 2. overwrite entries in the existing state dict model_dict.update(pretrained_dict) # 3. load the new state dict nn.load_state_dict(model_dict) def configure(self, flags): for name, para in self.network.named_parameters(): print(name, para.size()) self.optimizer = sgd(parameters=self.network.parameters(), lr=flags.lr, weight_decay=flags.weight_decay, momentum=flags.momentum) self.scheduler = lr_scheduler.StepLR(optimizer=self.optimizer, step_size=flags.step_size, gamma=0.1) self.loss_fn = torch.nn.CrossEntropyLoss() def train(self, flags): self.network.train() self.network.bn_eval() self.best_accuracy_val = -1 for ite in range(flags.loops_train): self.scheduler.step(epoch=ite) # get the inputs and labels from the data reader total_loss = 0.0 for index in range(len(self.batImageGenTrains)): images_train, labels_train = self.batImageGenTrains[index].get_images_labels_batch() inputs, labels = torch.from_numpy( np.array(images_train, dtype=np.float32)), torch.from_numpy( np.array(labels_train, dtype=np.float32)) # wrap the inputs and labels in Variable inputs, labels = Variable(inputs, requires_grad=False).cuda(), \ Variable(labels, requires_grad=False).long().cuda() # forward with the adapted parameters outputs, _ = self.network(x=inputs) # loss loss = self.loss_fn(outputs, labels) total_loss += loss # init the grad to zeros first self.optimizer.zero_grad() # backward your network total_loss.backward() # optimize the parameters self.optimizer.step() if ite < 500 or ite % 500 == 0: print( 'ite:', ite, 'total loss:', total_loss.cpu().item(), 'lr:', self.scheduler.get_lr()[0]) flags_log = os.path.join(flags.logs, 'loss_log.txt') write_log( str(total_loss.item()), flags_log) if ite % flags.test_every == 0 and ite is not 0: self.test_workflow(self.batImageGenVals, flags, ite) def test_workflow(self, batImageGenVals, flags, ite): accuracies = [] for count, batImageGenVal in enumerate(batImageGenVals): accuracy_val = self.test(batImageGenTest=batImageGenVal, flags=flags, ite=ite, log_dir=flags.logs, log_prefix='val_index_{}'.format(count)) accuracies.append(accuracy_val) mean_acc = np.mean(accuracies) if mean_acc > self.best_accuracy_val: self.best_accuracy_val = mean_acc acc_test = self.test(batImageGenTest=self.batImageGenTest, flags=flags, ite=ite, log_dir=flags.logs, log_prefix='dg_test') f = open(os.path.join(flags.logs, 'Best_val.txt'), mode='a') f.write( 'ite:{}, best val accuracy:{}, test accuracy:{}\n'.format(ite, self.best_accuracy_val, acc_test)) f.close() if not os.path.exists(flags.model_path): os.makedirs(flags.model_path) outfile = os.path.join(flags.model_path, 'best_model.tar') torch.save({'ite': ite, 'state': self.network.state_dict()}, outfile) def bn_process(self, flags): if flags.bn_eval == 1: self.network.bn_eval() def test(self, flags, ite, log_prefix, log_dir='logs/', batImageGenTest=None): # switch on the network test mode self.network.eval() if batImageGenTest is None: batImageGenTest = BatchImageGenerator(flags=flags, file_path='', stage='test', b_unfold_label=True) images_test = batImageGenTest.images labels_test = batImageGenTest.labels threshold = 50 if len(images_test) > threshold: n_slices_test = int(len(images_test) / threshold) indices_test = [] for per_slice in range(n_slices_test - 1): indices_test.append(int(len(images_test) * (per_slice + 1) / n_slices_test)) test_image_splits = np.split(images_test, indices_or_sections=indices_test) # Verify the splits are correct test_image_splits_2_whole = np.concatenate(test_image_splits) assert np.all(images_test == test_image_splits_2_whole) # split the test data into splits and test them one by one test_image_preds = [] for test_image_split in test_image_splits: images_test = Variable(torch.from_numpy(np.array(test_image_split, dtype=np.float32))).cuda() tuples = self.network(images_test) predictions = tuples[-1]['Predictions'] predictions = predictions.cpu().data.numpy() test_image_preds.append(predictions) # concatenate the test predictions first predictions = np.concatenate(test_image_preds) else: images_test = Variable(torch.from_numpy(np.array(images_test, dtype=np.float32))).cuda() tuples = self.network(images_test) predictions = tuples[-1]['Predictions'] predictions = predictions.cpu().data.numpy() accuracy = compute_accuracy(predictions=predictions, labels=labels_test) print('----------accuracy test----------:', accuracy) if not os.path.exists(log_dir): os.makedirs(log_dir) f = open(os.path.join(log_dir, '{}.txt'.format(log_prefix)), mode='a') f.write('ite:{}, accuracy:{}\n'.format(ite, accuracy)) f.close() # switch on the network train mode self.network.train() self.bn_process(flags) return accuracy class ModelAggregate_SNR_CausalityLoss: def __init__(self, flags): torch.set_default_tensor_type('torch.cuda.FloatTensor') self.setup(flags) self.setup_path(flags) self.configure(flags) def setup(self, flags): torch.backends.cudnn.deterministic = flags.deterministic print('torch.backends.cudnn.deterministic:', torch.backends.cudnn.deterministic) fix_all_seed(flags.seed) self.network = resnet_SNR.resnet18_snr_causality(pretrained=False, num_classes=flags.num_classes) self.network = self.network.cuda() # print(self.network) # print('flags:', flags) if not os.path.exists(flags.logs): os.makedirs(flags.logs) flags_log = os.path.join(flags.logs, 'flags_log.txt') write_log(flags, flags_log) self.load_state_dict(flags, self.network) def setup_path(self, flags): root_folder = flags.data_root train_data = ['art_painting_train.hdf5', 'cartoon_train.hdf5', 'photo_train.hdf5', 'sketch_train.hdf5'] val_data = ['art_painting_val.hdf5', 'cartoon_val.hdf5', 'photo_val.hdf5', 'sketch_val.hdf5'] test_data = ['art_painting_test.hdf5', 'cartoon_test.hdf5', 'photo_test.hdf5', 'sketch_test.hdf5'] self.train_paths = [] for data in train_data: path = os.path.join(root_folder, data) self.train_paths.append(path) self.val_paths = [] for data in val_data: path = os.path.join(root_folder, data) self.val_paths.append(path) unseen_index = flags.unseen_index self.unseen_data_path = os.path.join(root_folder, test_data[unseen_index]) self.train_paths.remove(self.train_paths[unseen_index]) self.val_paths.remove(self.val_paths[unseen_index]) if not os.path.exists(flags.logs): os.makedirs(flags.logs) flags_log = os.path.join(flags.logs, 'path_log.txt') write_log(str(self.train_paths), flags_log) write_log(str(self.val_paths), flags_log) write_log(str(self.unseen_data_path), flags_log) self.batImageGenTrains = [] for train_path in self.train_paths: batImageGenTrain = BatchImageGenerator(flags=flags, file_path=train_path, stage='train', b_unfold_label=False) self.batImageGenTrains.append(batImageGenTrain) self.batImageGenVals = [] for val_path in self.val_paths: batImageGenVal = BatchImageGenerator(flags=flags, file_path=val_path, stage='val', b_unfold_label=False) self.batImageGenVals.append(batImageGenVal) self.batImageGenTest = BatchImageGenerator(flags=flags, file_path=self.unseen_data_path, stage='test', b_unfold_label=False) def load_state_dict(self, flags, nn): if flags.state_dict: try: tmp = torch.load(flags.state_dict) if 'state' in tmp.keys(): pretrained_dict = tmp['state'] else: pretrained_dict = tmp except: pretrained_dict = model_zoo.load_url(flags.state_dict) model_dict = nn.state_dict() # 1. filter out unnecessary keys pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and v.size() == model_dict[k].size()} #print('model dict keys:', len(model_dict.keys()), 'pretrained keys:', len(pretrained_dict.keys())) #print('model dict keys:', model_dict.keys(), 'pretrained keys:', pretrained_dict.keys()) # 2. overwrite entries in the existing state dict model_dict.update(pretrained_dict) # 3. load the new state dict nn.load_state_dict(model_dict) def configure(self, flags): # for name, para in self.network.named_parameters(): # print(name, para.size()) self.optimizer = sgd(parameters=self.network.parameters(), lr=flags.lr, weight_decay=flags.weight_decay, momentum=flags.momentum) self.scheduler = lr_scheduler.StepLR(optimizer=self.optimizer, step_size=flags.step_size, gamma=0.1) self.loss_fn = torch.nn.CrossEntropyLoss() def split_model_parameters(self): model_params = [] ft_params = [] for n, p in self.network.named_parameters(): n = n.split('.') if n[-1] == 'gamma' or n[-1] == 'beta': ft_params.append(p) else: model_params.append(p) return model_params, ft_params def get_entropy(self, p_softmax): # exploit ENTropy minimization (ENT) to help DA, mask = p_softmax.ge(0.000001) mask_out = torch.masked_select(p_softmax, mask) entropy = -(torch.sum(mask_out * torch.log(mask_out))) return (entropy / float(p_softmax.size(0))) def get_causality_loss(self, x_IN_entropy, x_useful_entropy, x_useless_entropy): self.ranking_loss = torch.nn.SoftMarginLoss() y = torch.ones_like(x_IN_entropy) return self.ranking_loss(x_IN_entropy - x_useful_entropy, y) + self.ranking_loss(x_useless_entropy - x_IN_entropy, y) def train(self, flags): self.network.train() self.network.bn_eval() self.best_accuracy_val = -1 for ite in range(flags.loops_train): self.scheduler.step(epoch=ite) # get the inputs and labels from the data reader #total_loss = 0.0 for index in range(len(self.batImageGenTrains)): # clear fast weight, \ # Use fast weights to aid in learning associative tasks and store temporary memories of recent past. for weight in self.network.parameters(): weight.fast = None images_train, labels_train = self.batImageGenTrains[index].get_images_labels_batch() images_meta_train, labels_meta_train = self.batImageGenTrains[len(self.batImageGenTrains)-(index+1)].get_images_labels_batch() inputs, labels = torch.from_numpy(np.array(images_train, dtype=np.float32)), \ torch.from_numpy(np.array(labels_train, dtype=np.float32)) inputs_meta, labels_meta = torch.from_numpy(np.array(images_meta_train, dtype=np.float32)), \ torch.from_numpy(np.array(labels_meta_train, dtype=np.float32)) # wrap the inputs and labels in Variable inputs, labels = Variable(inputs, requires_grad=False).cuda(), \ Variable(labels, requires_grad=False).long().cuda() inputs_meta, labels_meta = Variable(inputs_meta, requires_grad=False).cuda(), \ Variable(labels_meta, requires_grad=False).long().cuda() # forward with the original parameters outputs, _, \ x_IN_1_prob, x_1_useful_prob, x_1_useless_prob, \ x_IN_2_prob, x_2_useful_prob, x_2_useless_prob, \ x_IN_3_prob, x_3_useful_prob, x_3_useless_prob, \ x_IN_3_logits, x_3_useful_logits, x_3_useless_logits = self.network(x=inputs) # Causality loss: loss_causality = 0.01 * self.get_causality_loss(self.get_entropy(x_IN_1_prob), self.get_entropy(x_1_useful_prob), self.get_entropy(x_1_useless_prob)) + \ 0.01 * self.get_causality_loss(self.get_entropy(x_IN_2_prob), self.get_entropy(x_2_useful_prob), self.get_entropy(x_2_useless_prob)) + \ 0.01 * self.get_causality_loss(self.get_entropy(x_IN_3_prob), self.get_entropy(x_3_useful_prob), self.get_entropy(x_3_useless_prob)) + \ 0.01 * self.loss_fn(x_3_useful_logits, labels) # common loss loss = self.loss_fn(outputs, labels) + loss_causality self.optimizer.zero_grad() loss.backward() self.optimizer.step() if ite < 500 or ite % 500 == 0: print('ite:', ite, 'common loss:', loss.cpu().item(), \ 'lr:', self.scheduler.get_lr()[0]) flags_log = os.path.join(flags.logs, 'loss_log.txt') write_log(str(loss.item()), flags_log) if ite % flags.test_every == 0 and ite is not 0: self.test_workflow(self.batImageGenVals, flags, ite) def test_workflow(self, batImageGenVals, flags, ite): accuracies = [] for count, batImageGenVal in enumerate(batImageGenVals): accuracy_val = self.test(batImageGenTest=batImageGenVal, flags=flags, ite=ite, log_dir=flags.logs, log_prefix='val_index_{}'.format(count)) accuracies.append(accuracy_val) mean_acc = np.mean(accuracies) if mean_acc > self.best_accuracy_val: self.best_accuracy_val = mean_acc acc_test = self.test(batImageGenTest=self.batImageGenTest, flags=flags, ite=ite, log_dir=flags.logs, log_prefix='dg_test') f = open(os.path.join(flags.logs, 'Best_val.txt'), mode='a') f.write( 'ite:{}, best val accuracy:{}, test accuracy:{}\n'.format(ite, self.best_accuracy_val, acc_test)) f.close() if not os.path.exists(flags.model_path): os.makedirs(flags.model_path) outfile = os.path.join(flags.model_path, 'best_model.tar') torch.save({'ite': ite, 'state': self.network.state_dict()}, outfile) def bn_process(self, flags): if flags.bn_eval == 1: self.network.bn_eval() def test(self, flags, ite, log_prefix, log_dir='logs/', batImageGenTest=None): # switch on the network test mode self.network.eval() if batImageGenTest is None: batImageGenTest = BatchImageGenerator(flags=flags, file_path='', stage='test', b_unfold_label=True) images_test = batImageGenTest.images labels_test = batImageGenTest.labels threshold = 50 if len(images_test) > threshold: n_slices_test = int(len(images_test) / threshold) indices_test = [] for per_slice in range(n_slices_test - 1): indices_test.append(int(len(images_test) * (per_slice + 1) / n_slices_test)) test_image_splits = np.split(images_test, indices_or_sections=indices_test) # Verify the splits are correct test_image_splits_2_whole = np.concatenate(test_image_splits) assert np.all(images_test == test_image_splits_2_whole) # split the test data into splits and test them one by one test_image_preds = [] for test_image_split in test_image_splits: images_test = Variable(torch.from_numpy(np.array(test_image_split, dtype=np.float32))).cuda() tuples = self.network(images_test) predictions = tuples[1]['Predictions'] predictions = predictions.cpu().data.numpy() test_image_preds.append(predictions) # concatenate the test predictions first predictions = np.concatenate(test_image_preds) else: images_test = Variable(torch.from_numpy(np.array(images_test, dtype=np.float32))).cuda() tuples = self.network(images_test) predictions = tuples[1]['Predictions'] predictions = predictions.cpu().data.numpy() accuracy = compute_accuracy(predictions=predictions, labels=labels_test) print('----------accuracy test----------:', accuracy) if not os.path.exists(log_dir): os.makedirs(log_dir) f = open(os.path.join(log_dir, '{}.txt'.format(log_prefix)), mode='a') f.write('ite:{}, accuracy:{}\n'.format(ite, accuracy)) f.close() # switch on the network train mode self.network.train() self.bn_process(flags) return accuracy
nilq/baby-python
python
from rest_framework.permissions import IsAuthenticated from rest_framework.viewsets import ModelViewSet from .models import ( Cart, Item ) from .serializers import ( CartSerializerDefault, CartSerializerPOST, ItemSerializerDefault, ItemSerializerPOST ) class CartViewSet(ModelViewSet): """ API endpoint that allows Cart to be viewed, created, deleted or edited. """ queryset = Cart.objects.all() serializer_class = CartSerializerDefault permission_classes = (IsAuthenticated,) def get_serializer_class(self): if self.action == 'create': return CartSerializerPOST return CartSerializerDefault def list(self, request): """ API endpoint that allows all cart to be viewed. --- Response example: Return a list of: ``` { "pk": "integer", "creation_date": "date", "checked_out": "boolean" } ``` """ response = super(CartViewSet, self).list(request) return response def create(self, request): """ API endpoint that allows cart to be created. --- Body example: ``` { "creation_date": "date", "checked_out": "boolean" } ``` Response example: ``` { "pk": 1, "creation_date": "date", "checked_out": "boolean" } ``` """ response = super(CartViewSet, self).create(request) return response def destroy(self, request, pk=None): """ API endpoint that allows cart to be deleted. """ response = super(CartViewSet, self).destroy(request, pk) return response def retrieve(self, request, pk=None): """ API endpoint that allows allow the return\ of a cart through the method Get. --- Response example: ``` { "id": "integer", "creation_date": "date", "checked_out": "boolean" } ``` """ response = super(CartViewSet, self).retrieve(request, pk) return response def partial_update(self, request, pk=None, **kwargs): """ API endpoint that allows a cart to be edited. --- Parameters: Cart ID and a JSON with one or more attributes of cart Example: ``` { "creation_date": "date", "checked_out": "boolean" } ``` """ response = super(CartViewSet, self).\ partial_update(request, pk, **kwargs) return response def update(self, request, pk=None, **kwargs): """ API endpoint that allows a cart to be edited. --- Parameters: Cart ID and a JSON with all attributes Example: ``` { "creation_date": "date", "checked_out": "boolean" } ``` """ response = super( CartViewSet, self).update( request, pk, **kwargs ) return response class ItemViewSet(ModelViewSet): """ API endpoint that allows Item to be viewed, created, deleted or edited. """ queryset = Item.objects.all() serializer_class = ItemSerializerDefault permission_classes = (IsAuthenticated,) def get_serializer_class(self): if self.action == 'create': return ItemSerializerPOST return ItemSerializerDefault def list(self, request): """ API endpoint that allows all item to be viewed. --- Response example: Return a list of: ``` { "id": "integer", "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", "content_type": "content_type" } ``` """ response = super(ItemViewSet, self).list(request) return response def create(self, request): """ API endpoint that allows item to be created. --- Body example: ``` { "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", "content_type": "content_type" } ``` Response example: ``` { "pk": 1, "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", "content_type": "content_type" } ``` """ response = super(ItemViewSet, self).create(request) return response def destroy(self, request, pk=None): """ API endpoint that allows item to be deleted. """ response = super(CartViewSet, self).destroy(request, pk) return response def retrieve(self, request, pk=None): """ API endpoint that allows allow the return\ of a item through the method Get. --- Response example: ``` { "id": "integer", "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", } ``` """ response = super(ItemViewSet, self).retrieve(request, pk) return response def partial_update(self, request, pk=None, **kwargs): """ API endpoint that allows a cart to be edited. --- Parameters: Item ID and a JSON with one or more attributes of item Example: ``` { "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", } ``` """ response = super(ItemViewSet, self).\ partial_update(request, pk, **kwargs) return response def update(self, request, pk=None, **kwargs): """ API endpoint that allows a cart to be edited. --- Parameters: Item ID and a JSON with all attributes Example: ``` { "quantity": "integer", "object_id": "integer", "unit_price": "integer", "cart": "cart", } ``` """ response = super( ItemViewSet, self).update( request, pk, **kwargs ) return response
nilq/baby-python
python
#coding:utf-8 # # id: bugs.core_1056 # title: A query could produce different results, depending on the presence of an index # decription: # tracker_id: CORE-1056 # min_versions: [] # versions: 2.0 # qmid: bugs.core_1056 import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.0 # resources: None substitutions_1 = [] init_script_1 = """create table t (c varchar(10) character set win1250 collate pxw_csy); insert into t values ('ch'); commit; """ db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """set plan on; select * from t where c starting with 'c'; commit; create index t_c on t (c); commit; select * from t where c starting with 'c'; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ PLAN (T NATURAL) C ========== ch PLAN (T INDEX (T_C)) C ========== ch """ @pytest.mark.version('>=2.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-22 19:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('mimicon2016', '0001_initial'), ] operations = [ migrations.AlterField( model_name='signupextra', name='want_certificate', field=models.BooleanField(default=False, verbose_name='Haluan todistuksen ty\xf6skentelyst\xe4ni Mimiconissa'), ), ]
nilq/baby-python
python
from django.contrib import admin from models import * # Register your models here. class CategoryAdmin(admin.ModelAdmin): list_display = ['id', 'title'] class GoodsInfoAdmin(admin.ModelAdmin): list_display = ['id', 'title', 'price', 'unit', 'click', 'inventory', 'detail', 'desc', 'image'] admin.site.register(Category, CategoryAdmin) admin.site.register(GoodsInfo, GoodsInfoAdmin)
nilq/baby-python
python
from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash from app import db, login_manager @login_manager.user_loader def load_user(id): return User.query.get(int(id)) dictionary_table = db.Table('dictionary', db.Column('user_id', db.Integer, db.ForeignKey('user.id')), db.Column('word_id', db.Integer, db.ForeignKey('word.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(70), nullable=False, unique=True) password = db.Column(db.String(94), nullable=False) first_name = db.Column(db.String(30), nullable=False) last_name = db.Column(db.String(30), nullable=False) dictionary = db.relationship('Word', secondary=dictionary_table, lazy='dynamic', backref=db.backref('users', lazy='dynamic') ) created_at = db.Column(db.DateTime, default=db.func.now(), nullable=False) def generate_password_hash(self, password): self.password = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password, password)
nilq/baby-python
python
from typing import List, Tuple import torch from torch.utils.data import Dataset from .feature import InputFeature class FeaturesDataset(Dataset): def __init__(self, features: List[InputFeature]): self.features = features def __len__(self,): return len(self.features) def __getitem__(self, idx: int): raise NotImplementedError() class T5NERDataset(FeaturesDataset): def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: feat = self.features[idx] input_ids = torch.tensor(feat.source_token_ids, dtype=torch.long) attention_mask = torch.tensor(feat.attention_mask, dtype=torch.long) lm_labels = torch.tensor(feat.target_token_ids, dtype=torch.long) outputs = (input_ids, attention_mask, lm_labels) return outputs
nilq/baby-python
python
# Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic TFX ImportExampleGen executor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from typing import Any, Dict, Text, Union from absl import logging import apache_beam as beam import tensorflow as tf from tfx.components.example_gen import base_example_gen_executor from tfx.components.example_gen import utils from tfx.proto import example_gen_pb2 @beam.ptransform_fn @beam.typehints.with_input_types(beam.Pipeline) @beam.typehints.with_output_types(bytes) def _ImportSerializedRecord( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[Text, Any], split_pattern: Text) -> beam.pvalue.PCollection: """Read TFRecord files to PCollection of records. Note that each input split will be transformed by this function separately. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of records (tf.Example, tf.SequenceExample, or bytes). """ input_base_uri = exec_properties[utils.INPUT_BASE_KEY] input_split_pattern = os.path.join(input_base_uri, split_pattern) logging.info('Reading input TFRecord data %s.', input_split_pattern) # TODO(jyzhao): profile input examples. return (pipeline # TODO(jyzhao): support multiple input container format. | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(file_pattern=input_split_pattern)) class Executor(base_example_gen_executor.BaseExampleGenExecutor): """Generic TFX import example gen executor.""" def GetInputSourceToExamplePTransform(self) -> beam.PTransform: """Returns PTransform for importing records.""" @beam.ptransform_fn @beam.typehints.with_input_types(beam.Pipeline) @beam.typehints.with_output_types(Union[tf.train.Example, tf.train.SequenceExample, bytes]) def ImportRecord(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any], split_pattern: Text) -> beam.pvalue.PCollection: """PTransform to import records. The records are tf.train.Example, tf.train.SequenceExample, or serialized proto. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of records (tf.Example, tf.SequenceExample, or bytes). """ output_payload_format = exec_properties.get(utils.OUTPUT_DATA_FORMAT_KEY) serialized_records = ( pipeline # pylint: disable=no-value-for-parameter | _ImportSerializedRecord(exec_properties, split_pattern)) if output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO: return serialized_records elif (output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE): return (serialized_records | 'ToTFExample' >> beam.Map(tf.train.Example.FromString)) elif (output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE): return (serialized_records | 'ToTFSequenceExample' >> beam.Map( tf.train.SequenceExample.FromString)) raise ValueError('output_payload_format must be one of FORMAT_TF_EXAMPLE,' ' FORMAT_TF_SEQUENCE_EXAMPLE or FORMAT_PROTO') return ImportRecord
nilq/baby-python
python
#!/usr/bin/env python3 """Emulate a client by calling directly EC2 instance.""" import os import sys import json import logging # AWS Lambda does not ship requests out of the box # import requests import urllib3 # Global configuration logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) http = urllib3.PoolManager() def test_ec2_via_http(ip): """Call EC2 via HTTP.""" try: r = http.request('GET', 'http://{0}'.format(ip), timeout=3.5, retries=0) response = r.data.decode('utf-8') if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug('Correct response: %s...', response[:20]) return (200 <= r.status < 300, r.status, response) except urllib3.exceptions.HTTPError as err: err_string = str(err) logging.error('Encountered error while accessing %s: %s ', ip, err_string) return (False, 500, err_string) def lambda_handler(event, context): """Entrypoint to AWS lambda execution.""" ip_to_test = os.environ["IP_TO_TEST"] status, code, text = test_ec2_via_http(ip_to_test) # Lamda response should follow: # https://aws.amazon.com/premiumsupport/knowledge-center/malformed-502-api-gateway/ # in order to be consumable via API Gateway return { 'statusCode': code, 'isBase64Encoded': False, 'body': json.dumps({'status': status, 'text': text}) } def main(): """Enter the program to test it locally.""" # given ip_to_test = sys.argv[1] # when test_result = test_ec2_via_http(ip_to_test) # then logging.info("Status: {0}, Code: {1}, Text: {2}".format(*test_result)) if __name__ == "__main__": main()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Implement S3 Backed Binary and Unicode Attribute. Since the content of big Binary or Unicode are not stored in DynamoDB, we cannot use custom attriubte ``pynamodb.attributes.Attribute`` to implement it. """ import os import zlib from base64 import b64encode, b64decode from pynamodb.models import Model from six import string_types try: import typing except: pass s3_endpoint = None if 'S3_PORT' in os.environ: s3_endpoint = 'http://{}:{}'.format( os.environ['SLS_OFF_HOST'], os.environ['S3_PORT'] ) def s3_key_safe_b64encode(text): return b64encode(text.encode("utf-8")).decode("utf-8").replace("=", "") def s3_key_safe_b64decode(text): div, mod = divmod(len(text), 4) if mod != 0: text = text + "=" * (4 - mod) return b64decode(text.encode("utf-8")).decode("utf-8") def parse_s3_uri(s3_uri): chunks = s3_uri.split("/", 3) bucket = chunks[2] key = chunks[3] return bucket, key class BaseS3BackedAttribute(object): """ Implement S3 relative operation for each attribute. :type s3_uri_getter: typing.Union[str, typing.Callable] :param s3_uri_getter: str or callable function, it takes the pynamodb orm object as input, returns the S3 URI string for this s3 backed attribute. """ def __init__(self, s3_uri_getter, compress=False, name=None): self.s3_uri_getter = s3_uri_getter if isinstance(s3_uri_getter, string_types): self.s3_uri_getter_real = lambda obj: getattr(obj, s3_uri_getter) elif callable(s3_uri_getter): self.s3_uri_getter_real = s3_uri_getter else: raise Exception self.compress = compress self.name = name def serialize(self, data): raise NotImplementedError def deserialize(self, data): raise NotImplementedError def set_to(self, data): return (self, data) def head_object(self, model_obj): s3_uri = self.s3_uri_getter_real(model_obj) bucket, key = parse_s3_uri(s3_uri) return model_obj.get_s3_client().head_object(Bucket=bucket, Key=key) def _put_binary_data(self, model_obj, data): """ Write binary data as it is to s3. :type model_obj: S3BackedMixin :type data: bytes """ s3_uri = self.s3_uri_getter_real(model_obj) bucket, key = parse_s3_uri(s3_uri) res = model_obj.get_s3_client().put_object( Bucket=bucket, Key=key, Body=data) return res def put_object(self, model_obj, data): """ :type model_obj: S3BackedMixin """ if self.compress: body = zlib.compress(self.serialize(data)) else: body = self.serialize(data) return self._put_binary_data(model_obj, body) def _read_binary_data(self, model_obj): """ Read binary data as it is from s3 :type model_obj: S3BackedMixin """ s3_uri = self.s3_uri_getter_real(model_obj) bucket, key = parse_s3_uri(s3_uri) res = model_obj.get_s3_client().get_object( Bucket=bucket, Key=key) return res["Body"].read() def read_data(self, model_obj): """ :return: """ if self.compress: return self.deserialize(zlib.decompress(self._read_binary_data(model_obj))) else: return self.deserialize(self._read_binary_data(model_obj)) def delete_object(self, model_obj): """ :type model_obj: S3BackedMixin """ s3_uri = self.s3_uri_getter_real(model_obj) bucket, key = parse_s3_uri(s3_uri) res = model_obj.get_s3_client().delete_object(Bucket=bucket, Key=key) return res class S3BackedBinaryAttribute(BaseS3BackedAttribute): def serialize(self, data): return data def deserialize(self, data): return data class S3BackedUnicodeAttribute(BaseS3BackedAttribute): def serialize(self, data): return data.encode("utf-8") def deserialize(self, data): return data.decode("utf-8") class S3BackedMixin(object): # type: typing.Type[Model] _s3_client = None _s3_backed_attr_mapper = None _s3_backed_value_mapper = None @classmethod def get_s3_backed_attr_mapper(cls): """ :type cls: Model :rtype: dict """ if cls._s3_backed_attr_mapper is None: cls._s3_backed_attr_mapper = dict() for attr, value in cls.__dict__.items(): try: if isinstance(value, BaseS3BackedAttribute): value.name = attr cls._s3_backed_attr_mapper[attr] = value except Exception as e: pass return cls._s3_backed_attr_mapper @classmethod def get_s3_client(cls): """ :type cls: Model """ if cls._s3_client is None: pynamodb_connection = cls._get_connection().connection cls._s3_client = pynamodb_connection.session.create_client( "s3", pynamodb_connection.region, endpoint_url=s3_endpoint) return cls._s3_client def atomic_save(self, condition=None, s3_backed_data=None): """ An ``atomic`` save operation for multiple S3 backed attribute. :type self: typing.Union[Model, S3BackedMixin] :type s3_backed_data: List[BaseS3BackedAttribute.set_to(data)] :param s3_backed_data: example ``[page.html_content.set_to("<html> ... </html>"), page.image_content.set_to(b"...")]`` """ if s3_backed_data is None: s3_backed_data = list() saved_data_list = list() for s3_backed_attr, data in s3_backed_data: try: s3_backed_attr.put_object(self, data) saved_data_list.append((s3_backed_attr, data)) # if any of s3.put_object failed, roll back and skip dynamodb.put_item except Exception as put_object_error: for s3_backed_attr, data in saved_data_list: s3_backed_attr.delete_object(self) raise put_object_error try: res = self.save(condition=condition) del saved_data_list return res except Exception as dynamodb_save_error: # delete saved s3 object if dynamodb write operation failed for s3_backed_attr, data in saved_data_list: s3_backed_attr.delete_object(self) del saved_data_list raise dynamodb_save_error def atomic_update(self, actions=None, condition=None, s3_backed_data=None): """ An ``atomic`` update operation for multiple S3 backed attribute. :type self: typing.Union[Model, S3BackedMixin] :type s3_backed_data: List[BaseS3BackedAttribute.set_to(data)] :param s3_backed_data: example ``[page.html_content.set_to("<html> ... </html>"), page.image_content.set_to(b"...")]`` """ if s3_backed_data is None: s3_backed_data = list() previous_data_list = list() for s3_backed_attr, data in s3_backed_data: try: previous_data_list.append( ( s3_backed_attr, s3_backed_attr._read_binary_data(self) ) ) s3_backed_attr.put_object(self, data) # if any of s3.put_object failed, roll back and skip dynamodb.put_item except Exception as put_object_error: for s3_backed_attr, data in previous_data_list: s3_backed_attr.put_object(self, data) raise put_object_error if actions is not None: return self.update(actions=actions, condition=condition) def atomic_delete(self, condition=None): """ An ``atomic`` delete operation for multiple S3 backed attribute. :type self: typing.Union[Model, S3BackedMixin] """ self.delete(condition=condition) for attr, value in self.get_s3_backed_attr_mapper().items(): # check if the s3 object exists, if exists, delete it try: value.head_object(self) value.delete_object(self) except Exception as e: pass
nilq/baby-python
python
# Copyright 2016 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from touchdown.aws.common import Resource from touchdown.core import argument, serializers from touchdown.core.plan import Plan, Present from ..account import BaseAccount from .rule import Rule from .waf import WafApply, WafDescribe, WafDestroy class ActivatedRule(Resource): resource_name = "activated_rule" action = argument.String( field="Action", choices=["BLOCK", "ALLOW", "COUNT"], serializer=serializers.Dict(Type=serializers.String()), ) priority = argument.Integer(field="Priority") rule = argument.Resource(Rule, field="RuleId") class WebACL(Resource): resource_name = "web_acl" name = argument.String(field="Name") metric_name = argument.String(field="MetricName") default_action = argument.String( field="DefaultAction", choices=["BLOCK", "ALLOW", "COUNT"], serializer=serializers.Dict(Type=serializers.String()), ) activated_rules = argument.ResourceList( ActivatedRule, field="ActivatedRules", create=False ) account = argument.Resource(BaseAccount) class Describe(WafDescribe, Plan): resource = WebACL service_name = "waf" api_version = "2015-08-24" describe_action = "list_web_acls" describe_envelope = "WebACLs" annotate_action = "get_web_acl" key = "WebACLId" container_update_action = "update_web_acl" container = "Rules" container_member = "ActivatedRule" local_container = "activated_rules" class Apply(WafApply, Describe): create_action = "create_web_acl" signature = (Present("name"), Present("metric_name"), Present("default_action")) class Destroy(WafDestroy, Describe): destroy_action = "delete_web_acl"
nilq/baby-python
python
from __future__ import print_function, division import sys sys._running_pytest = True import pytest from sympy.core.cache import clear_cache def pytest_report_header(config): from sympy.utilities.misc import ARCH s = "architecture: %s\n" % ARCH from sympy.core.cache import USE_CACHE s += "cache: %s\n" % USE_CACHE from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY version = '' if GROUND_TYPES =='gmpy': if HAS_GMPY == 1: import gmpy elif HAS_GMPY == 2: import gmpy2 as gmpy version = gmpy.version() s += "ground types: %s %s\n" % (GROUND_TYPES, version) return s def pytest_addoption(parser): parser.addoption("--slow", action="store_true", help="allow slow tests to run") def pytest_configure(config): # register an additional marker config.addinivalue_line("markers", "slow: slow test") def pytest_runtest_setup(item): if not isinstance(item, pytest.Function): return if item.config.getoption("--slow"): if not 'slow' in item.keywords: pytest.skip() elif 'slow' in item.keywords: pytest.skip("slow test: pass --slow to run") def pytest_terminal_summary(terminalreporter): if (terminalreporter.stats.get('error', None) or terminalreporter.stats.get('failed', None)): terminalreporter.write_sep( ' ', 'DO *NOT* COMMIT!', red=True, bold=True) def pytest_runtest_teardown(): clear_cache()
nilq/baby-python
python
# -*- coding: utf-8 -*- from .munsell import * # noqa from . import munsell __all__ = [] __all__ += munsell.__all__
nilq/baby-python
python
import argparse import importlib from verify import mnist, cifar, imagenet import time def verify(args): try: net_class_module = importlib.import_module(args.netclassfile) net_class = getattr(net_class_module, args.netclassname) except Exception as err: print('Error: Import model class failed.') print(err) exit(-1) if args.epsilon > 1. or args.epsilon < 0.: print('Error: error rate should be in [0,1]') if args.eta > 1. or args.eta < 0.: print('Error: significance level should be in [0,1]') start = time.time() if args.dataset == 'mnist': mnist.mnist_verify(net_class, args) elif args.dataset == 'cifar10': cifar.cifar_verify(net_class, args) elif args.dataset == 'imagenet': imagenet.imagenet_verify(net_class, args) print('Time: ', time.time()-start) parser = argparse.ArgumentParser() parser.add_argument('-ncf', '--netclassfile', type=str, help='Python network class file contains the network class defined by PyTorch', required=True) parser.add_argument('-nc', '--netclassname', type=str, help='Name of the network class', required=True) parser.add_argument('-m', '--model', type=str, help='Model File for the network class containing the PyTorch statedict', required=True) parser.add_argument('-d', '--dataset', type=str, choices=['mnist', 'cifar10', 'imagenet'], help='The dataset of the model can be either mnist, cifar10 or imagenet', required=True) parser.add_argument('-r', '--radius', type=int, choices=range(0, 256), help='The verification radius of the L-inf ball (0-255)', required=True, metavar='0-255') parser.add_argument('-eps', '--epsilon', type=float, help='The error rate of the PAC-model', required=True) parser.add_argument('-eta', '--eta', type=float, help='The significance level of the PAC-model (1-confidence)', required=True) parser.add_argument('-img', '--image', type=str, help='Path of the image file to be verified (required for Imagenet models)') parser.add_argument('-ind', '--index', type=int, default=0, help='The index of the image to be verified. (required for Mnist and Cifar10 models)') parser.add_argument('-train', '--train', action='store_true', help='Set if you want to verify images in trainset. (optional, only effect on Mnist and Cifar10 models)') parser.add_argument('-gpu', '--gpu', action='store_true', help='Set to use GPU (Optional, defualt False)') parser.add_argument('-FT', '--FThreshold', type=int, default=2000, help='The sampling threshold for the first focused learning phase. (optional, only effect on Mnist and Cifar10, default 2000)') parser.add_argument('-ST', '--SThreshold', type=int, default=8000, help='The sampling threshold for the second focused learning phase. (optional, only effect on Mnist and Cifar10, default 8000)') parser.add_argument('-b', '--budget', type=int, default=20000, help='The sampling budget for stepwise splitting. (optional, only effect on Imagenet, default=20000)') parser.add_argument('-bsize', '--batchsize', type=int, default=200, help='The batchsize of the sampling procedure (optional, only effect on Imagenet and Cifar10, default=200)') parser.add_argument('-mean', '--mean', type=tuple, help='The mean used to normalize the data. (optional, (0.485, 0.456, 0.406) for Imagenet, (0.4914, 0.4822, 0.4465) for Cifar10, (0.1307,) for Mnist, by default)') parser.add_argument('-std', '--std', type=tuple, help='The standard deviation used to normalize the data. (optional, (0.229, 0.224, 0.225) for Imagenet, (0.2023, 0.1994, 0.2010) for Cifar10, (0.3081,) for Mnist, by default)') parser.add_argument('-l', '--label', type=int, choices=range(0, 1000), help='The true label of the image according to the 1000-classes Imagenet dataset. (optional, will use the output label of the neural network if not provided, only effect on Imagenet)', metavar='0-999') parser.add_argument('-solver', '--lpsolver', choices=[ 'gurobi', 'cbc'], help='The Linear Programming Solver. (Gurobi or CBC, cvxpy default LP solver if not assigned)') imagenet_required = ['image'] args = parser.parse_args() verify(args) # print(args)
nilq/baby-python
python
""" Produces template's named argument to article categories mapping """ from __future__ import print_function import logging import json import re from collections import defaultdict from mwclient.client import Site import requests logging.basicConfig(level=logging.INFO) def get_articles_from_top_categories(site, categories_limit=3, articles_limit=5): """ :type site Site :type categories_limit int :type articles_limit int :rtype: list[str,str] """ # http://muppet.sandbox-s6.wikia.com/api.php?action=query&list=querypage&qppage=Mostpopularcategories&qplimit=20 res = site.get(action='query', list='querypage', qppage='Mostpopularcategories', qplimit=categories_limit) categories = [result['title'] for result in res['query']['querypage']['results']] for category in categories: # get first X pages from the category # http://muppet.sandbox-s6.wikia.com/api.php?action=query&list=categorymembers&cmtitle=Category:Sesame%20Street%20Episodes&cmlimit=50 res = site.get(action='query', list='categorymembers', cmtitle='Category:{}'.format(category), cmlimit=articles_limit) for page in res['query']['categorymembers']: # we're interested in main namespace articles one if page['ns'] == 0: yield page['title'], category def get_infobox_arguments(site, title): """ :type site Site :type title str :rtype: list[str] """ logger = logging.getLogger('get_infobox_arguments') logger.info('Article: %s', title) # https://nfs.sandbox-s6.fandom.com/wikia.php?controller=TemplatesApiController&method=getMetadata&title=Ferrari_355_F1 res = json.loads(site.raw_call( http_method='GET', script='wikia', data={ 'controller': 'TemplatesApiController', 'method': 'getMetadata', 'title': title } )) infoboxes = [template for template in res['templates'] if template['type'] == 'infobox'] # print(infoboxes) # return a set of template arguments used on a given article arguments = set() for infobox in infoboxes: arguments.update(infobox['parameters'].keys()) return arguments def arguments_to_categories(wikis, env=None, proxy=None): """ :type wikis list[str] :type env str :type proxy str :rtype: dict """ logger = logging.getLogger('arguments_to_categories') # apply the environment if env: wikis = [re.sub(r'\.(wikia|fandom)', '.{}.\\1'.format(env), wiki) for wiki in wikis] logger.info('Gathering stats for %s domains', wikis) # we will emit results as (template argument) => (a set of article categories where this argument is used) res = defaultdict(set) # set up connection to MediaWiki backend via our internal proxy pool = requests.Session() if proxy: logger.info('Using HTTP proxy: %s', proxy) pool.proxies = {'http': proxy} # gather statistics for each wiki for wiki in wikis: site = Site(host=('http', wiki), path='/', pool=pool) # process each article for article, category in get_articles_from_top_categories(site): # update each template argument found with a category where this article is in for argument in get_infobox_arguments(site, article): res[argument].add(category) return res if __name__ == '__main__': mapping = arguments_to_categories( wikis=[ 'muppet.wikia.com', 'nfs.fandom.com', 'gta.wikia.com', ], env='sandbox-s6', proxy='border-http-s3:80' ) for arg, items in mapping.items(): print('{} -> {}'.format( arg, items))
nilq/baby-python
python
""" BaMi_optimal.py - compares BaMiC with BaMiF and includes the (according to us) optimal integration strategies. """ import sys import matplotlib.pyplot as plt from pywmi.engines.xsdd.literals import LiteralInfo from _pywmi.vtree.bottomup_elimination import bottomup_balanced_minfill as bamif from _pywmi.vtree.topdown_balanced_mincut import topdown_balanced_mincut_hg as bamic from _pywmi.vtree.int_tree import * from _pywmi.vtree.topdown_mincut import conversion_tables from _pywmi.experiment import * from _pywmi.problems import * from pywmi.engines.pyxadd.algebra import PyXaddAlgebra full_reduce = True reduce_strategy = PyXaddAlgebra.FULL_REDUCE if full_reduce else PyXaddAlgebra.ONLY_INIT_INTEGRATION_REDUCE all_strats = [bamic, bamif] xadd = lambda: PyXaddAlgebra(reduce_strategy=reduce_strategy) # %% tpg_star_gen = lambda n: make_from_graph(tpg_star(n)) tpg_3ary_gen = lambda n: make_from_graph(tpg_3ary_tree(n)) tpg_path_gen = lambda n: make_from_graph(tpg_path(n)) # %% size_range = list(range(3, 41)) env_timeout.set(50) ordered = False algebra = xadd verbose = False sys.setrecursionlimit(10**6) # %% def splitpath_int_vtree_gen(literal_info: LiteralInfo): """ Creates an integration order in a split path form x0 - x1 - x2 - x3 - ... """ logic2cont, cont2logic = conversion_tables(literal_info) cont_vars = sorted(list(cont2logic.keys()), key=lambda n: int(n[1:])) assert len(cont_vars) >= 3 middle_index = math.floor(len(cont_vars)/2) # Create left line left_int_tree = IntTreeVar(cont_vars[0]) for cont in cont_vars[1:middle_index]: left_int_tree = IntTreeLine(cont, left_int_tree) # Create right line right_int_tree = IntTreeVar(cont_vars[-1]) for cont in reversed(cont_vars[middle_index+1:-1]): right_int_tree = IntTreeLine(cont, right_int_tree) # Middle split int_tree = IntTreeSplit(cont_vars[middle_index], left_int_tree, right_int_tree) return int_tree.create_vtree(logic2cont.keys(), logic2cont) def star_int_vtree_gen(literal_info: LiteralInfo): """ Creates an integration order for problems with a star primal (star, xor, mutex). """ logic2cont, cont2logic = conversion_tables(literal_info) middle_var, _ = max(cont2logic.items(), key=lambda x: len(x[1])) other_vars_int_trees = [IntTreeVar(v) for v in cont2logic.keys() if v != middle_var] if len(other_vars_int_trees) != 0: int_tree = IntTreeParallel(middle_var, other_vars_int_trees) else: int_tree = IntTreeVar(middle_var) return int_tree.create_vtree(logic2cont.keys(), logic2cont) def dual_int_vtree_gen(literal_info: LiteralInfo): """ Creates an integration order for the dual problem. """ logic2cont, cont2logic = conversion_tables(literal_info) cont_pairs = [list(pair) for pair in logic2cont.values() if len(pair) == 2] int_pairs = [IntTreeLine(x[0], IntTreeVar(x[1])) for x in cont_pairs] int_tree = IntTreeParallel(None, int_pairs) return int_tree.create_vtree(logic2cont.keys(), logic2cont) # %% # DUAL all_strats.append(dual_int_vtree_gen) dual_exp = CompareStrategies( algebra=algebra, problem_generator=dual, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished dual_exp") all_strats.pop() # XOR all_strats.append(star_int_vtree_gen) xor_exp = CompareStrategies( algebra=algebra, problem_generator=xor, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished xor_exp") all_strats.pop() # MUTEX all_strats.append(star_int_vtree_gen) mutex_exp = CompareStrategies( algebra=algebra, problem_generator=mutual_exclusive, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished mutex_exp") all_strats.pop() # STAR all_strats.append(star_int_vtree_gen) tpg_star_exp = CompareStrategies( algebra=algebra, problem_generator=tpg_star_gen, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished star_exp") all_strats.pop() # 3ARY all_strats.append(bamif) # TODO: Optimal strategy tpg_3ary_exp = CompareStrategies( algebra=algebra, problem_generator=tpg_3ary_gen, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished 3ary_exp") all_strats.pop() # PATH all_strats.append(splitpath_int_vtree_gen) tpg_path_exp = CompareStrategies( algebra=algebra, problem_generator=tpg_path_gen, size=size_range, vtree_strategy=all_strats, verbose=verbose, ordered=ordered, ) print("Finished path_exp") all_strats.pop() # %% md # Graph # %% all_data = [ ('dual', dual_exp), ('xor', xor_exp), ('mutex', mutex_exp), ('pg-star', tpg_star_exp), ('pg-3ary', tpg_3ary_exp), ('pg-path', tpg_path_exp) ] vtree_heuristics = [ #('implicit-balanced', 'black', '+'), #('implicit-leftlinear', 'green', 'o'), #('implicit-rightlinear', 'purple', 's'), ('balanced-mincut', 'red', '.'), ('balanced-minfill', 'blue', ','), ('optimal', 'green', 'x') ] # %% from matplotlib.ticker import MaxNLocator fig, axes = plt.subplots(2, 3) fig.set_size_inches(9, 6) fig.subplots_adjust(bottom=0.14, wspace=0.3, hspace=0.3) for i, (name, exp) in enumerate(all_data): i1 = i // 3 i2 = i % 3 ax = axes[i1][i2] ax.xaxis.set_major_locator(MaxNLocator(integer=True)) for (strat_name, color, marker), (_, times) in zip(vtree_heuristics, exp.all_experiments()): # vtree_times = list(times.get_all_results('vtree_time')) total_times = list(times.get_all_results('total_time')) sizes = times.values[:len(total_times)] ax.plot(sizes, total_times, color=color, marker=marker, linestyle='-', label=strat_name) # ax.plot(sizes, vtree_times, color=color, marker='o', linestyle='--') if i1 != 1: ax.set_xlabel(None) else: ax.set_xlabel("Problem size (n)") if i2 == 0: ax.set_ylabel("Time (s)") else: ax.set_ylabel(None) ax.set_title(f"{name}(n)") # Bug: fig.legend not included in pdf ax.legend(loc='lower center', ncol=2, bbox_to_anchor=(0.5, -0.04), bbox_transform=fig.transFigure) # %% filename = 'bami_comparison' if ordered: filename += '-ordered' if algebra == xadd: filename += '-xadd' filename += '-full' if full_reduce else '-init' fig.savefig(filename + '.pdf', bbox_inches='tight') # %%
nilq/baby-python
python
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Asr Model.""" import lingvo.compat as tf from lingvo.core import base_layer from lingvo.core import cluster_factory from lingvo.core import py_utils from lingvo.core import schedule from lingvo.core import summary_utils from lingvo.core import test_helper from lingvo.core import test_utils from lingvo.tasks.asr import decoder from lingvo.tasks.asr import input_generator from lingvo.tasks.asr import model from lingvo.tasks.asr import model_test_input_generator as tig import numpy as np class DecoderForTest(decoder.AsrDecoder): """Unit test class for AsrDecoder with functional.for based unrolling.""" @classmethod def Params(cls): p = super(DecoderForTest, cls).Params() p.use_while_loop_based_unrolling = False return p class AsrModelTest(test_utils.TestCase): def _testParams(self): input_shape = [2, 16, 8, 3] p = model.AsrModel.Params() p.decoder.target_seq_len = 5 p.encoder.input_shape = input_shape p.input = tig.TestInputGenerator.Params() p.input.target_max_length = 5 p.input.source_shape = input_shape p.input.target_shape = [2, 5] p.name = 'test_mdl' return p def testMakeDecoderTheta(self): # Test that decoder theta returns a copy of theta.decoder without changes. with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None) mdl.BProp() self.assertEqual(decoder_theta, mdl.theta.decoder) def testFProp(self): with self.session(use_gpu=False): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() self.evaluate(tf.global_variables_initializer()) test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) actual_var_names = [_.name for _ in tf.trainable_variables()] print('all vars \n', '\n'.join(actual_var_names)) expected_var_names = [ 'test_mdl/enc/conv_L0/w/var:0', 'test_mdl/enc/conv_L0/beta/var:0', 'test_mdl/enc/conv_L0/gamma/var:0', 'test_mdl/enc/conv_L1/w/var:0', 'test_mdl/enc/conv_L1/beta/var:0', 'test_mdl/enc/conv_L1/gamma/var:0', 'test_mdl/enc/f_conv_lstm_0/wm/var:0', 'test_mdl/enc/f_conv_lstm_0/b/var:0', 'test_mdl/enc/b_conv_lstm_0/wm/var:0', 'test_mdl/enc/b_conv_lstm_0/b/var:0', 'test_mdl/enc/conv_lstm_cnn_0/w/var:0', 'test_mdl/enc/conv_lstm_cnn_0/beta/var:0', 'test_mdl/enc/conv_lstm_cnn_0/gamma/var:0', 'test_mdl/enc/fwd_rnn_L0/wm/var:0', 'test_mdl/enc/fwd_rnn_L0/b/var:0', 'test_mdl/enc/bak_rnn_L0/wm/var:0', 'test_mdl/enc/bak_rnn_L0/b/var:0', 'test_mdl/enc/proj_L0/w/var:0', 'test_mdl/enc/proj_L0/beta/var:0', 'test_mdl/enc/proj_L0/gamma/var:0', 'test_mdl/enc/fwd_rnn_L1/wm/var:0', 'test_mdl/enc/fwd_rnn_L1/b/var:0', 'test_mdl/enc/bak_rnn_L1/wm/var:0', 'test_mdl/enc/bak_rnn_L1/b/var:0', 'test_mdl/enc/proj_L1/w/var:0', 'test_mdl/enc/proj_L1/beta/var:0', 'test_mdl/enc/proj_L1/gamma/var:0', 'test_mdl/enc/fwd_rnn_L2/wm/var:0', 'test_mdl/enc/fwd_rnn_L2/b/var:0', 'test_mdl/enc/bak_rnn_L2/wm/var:0', 'test_mdl/enc/bak_rnn_L2/b/var:0', 'test_mdl/dec/emb/var_0/var:0', 'test_mdl/dec/rnn_cell/wm/var:0', 'test_mdl/dec/rnn_cell/b/var:0', 'test_mdl/dec/atten/source_var/var:0', 'test_mdl/dec/atten/query_var/var:0', 'test_mdl/dec/atten/hidden_var/var:0', 'test_mdl/dec/softmax/weight_0/var:0', 'test_mdl/dec/softmax/bias_0/var:0', ] self.assertCountEqual(expected_var_names, actual_var_names) def testDecode(self): with self.session(use_gpu=False): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() input_batch = mdl.input_generator.GetPreprocessedInputBatch() dec_out_dict = mdl.DecodeWithTheta(mdl.theta, input_batch) self.evaluate(tf.global_variables_initializer()) dec_out = self.evaluate(dec_out_dict) print('dec_out', dec_out) metrics_dict = mdl.CreateDecoderMetrics() key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict) self.assertEqual(1.0, metrics_dict['wer'].value) self.assertEqual(1.0, metrics_dict['norm_wer'].value) self.assertEqual(1.0, metrics_dict['ter'].value) self.assertEqual(0, len(key_value_pairs)) def testPostProcessDecodeOut(self): p = self._testParams() p.decoder.beam_search.num_hyps_per_beam = 2 mdl = p.Instantiate() fake_dec_out = { 'utt_id': ['utt1', 'utt2'], 'transcripts': ['a b c d', 'a'], 'topk_decoded': [['a b c d', 'a b c d'], ['wrong', '']], 'topk_scores': [[1.0, 0.9], [1.0, 0.9]], 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]], 'topk_lens': [2, 4, 4, 2], 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]], 'target_paddings': [[0, 0, 0, 1], [0, 0, 0, 1]], 'norm_wer_errors': [[0, 0], [1, 1]], 'norm_wer_words': [[4, 4], [1, 1]], } fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()} metrics_dict = mdl.CreateDecoderMetrics() key_value_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict) self.assertEqual(0 + 1, metrics_dict['wer'].total_value) self.assertEqual(4 + 1, metrics_dict['wer'].total_weight) self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value) self.assertEqual(4 + 1, metrics_dict['norm_wer'].total_weight) self.assertEqual(4, metrics_dict['ter'].total_value) self.assertEqual(6, metrics_dict['ter'].total_weight) self.assertEqual(2, metrics_dict['num_samples_in_batch'].total_value) self.assertEqual(1.0, metrics_dict['num_samples_in_batch'].total_weight) self.assertEqual((4 / 5 * 3 / 3 * 2 / 2 * 1 / 1)**(1 / 4), metrics_dict['corpus_bleu'].value) self.assertEqual((0 + 1) / 2, metrics_dict['sacc'].value) self.assertEqual((0 + 1) / (4 + 1), metrics_dict['oracle_norm_wer'].value) self.assertEqual(0, len(key_value_pairs)) def testPostProcessDecodeOutFiltersEpsilonTokensForWER(self): p = self._testParams() p.decoder.beam_search.num_hyps_per_beam = 1 mdl = p.Instantiate() fake_dec_out = { 'utt_id': ['utt1', 'utt2'], 'transcripts': ['a b c d', 'a b c'], 'topk_decoded': [['a b<epsilon>c d'], ['<epsilon>a b<epsilon>']], 'topk_scores': [[1.0], [1.0]], 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]], 'topk_lens': [3, 4], 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]], 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]], 'norm_wer_errors': [[0], [1]], 'norm_wer_words': [[4], [3]], } fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()} metrics_dict = mdl.CreateDecoderMetrics() kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict) self.assertEqual(0 + 1, metrics_dict['wer'].total_value) self.assertEqual(7, metrics_dict['wer'].total_weight) self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value) self.assertEqual(7, metrics_dict['norm_wer'].total_weight) self.assertEqual(0, len(kv_pairs)) def testPostProcessDecodeOutFiltersNoiseTokensForWER(self): p = self._testParams() p.decoder.beam_search.num_hyps_per_beam = 1 mdl = p.Instantiate() fake_dec_out = { 'utt_id': ['utt1', 'utt2'], 'transcripts': ['a b c d', 'a b c'], 'topk_decoded': [['a b <noise> c d'], ['<noise> a b <noise>']], 'topk_scores': [[1.0], [1.0]], 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]], 'topk_lens': [3, 4], 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]], 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]], 'norm_wer_errors': [[0], [1]], 'norm_wer_words': [[4], [3]], } fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()} metrics_dict = mdl.CreateDecoderMetrics() kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict) self.assertEqual(0 + 1, metrics_dict['wer'].total_value) self.assertEqual(7, metrics_dict['wer'].total_weight) self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value) self.assertEqual(7, metrics_dict['norm_wer'].total_weight) self.assertEqual(0, len(kv_pairs)) def testPostProcessDecodeOutHandlesEmptyRef(self): p = self._testParams() p.decoder.beam_search.num_hyps_per_beam = 1 mdl = p.Instantiate() fake_dec_out = { 'utt_id': ['utt1', 'utt2'], 'transcripts': ['', 'a b c d'], 'topk_decoded': [['a'], ['a b c d']], 'topk_scores': [[1.0], [1.0]], 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]], 'topk_lens': [3, 4], 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]], 'target_paddings': [[1, 1, 1, 1], [0, 0, 1, 1]], 'norm_wer_errors': [[1], [0]], 'norm_wer_words': [[0], [4]], } fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()} metrics_dict = mdl.CreateDecoderMetrics() mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict) self.assertEqual(1 + 0, metrics_dict['wer'].total_value) self.assertEqual(0 + 4, metrics_dict['wer'].total_weight) self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value) self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight) def testBProp(self): with self.session(use_gpu=False): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() self.evaluate(tf.global_variables_initializer()) test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testBPropSmoothDecay(self): with self.session(use_gpu=False): tf.random.set_seed(93820985) p = self._testParams() p.train.lr_schedule = ( schedule.ContinuousSchedule.Params().Set( start_step=350000, half_life_steps=45000)) mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() self.evaluate(tf.global_variables_initializer()) test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testAllLayerParams(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() lps = base_layer.RecursiveFindLayerParams(mdl.params) l_names = sorted([p.cls.__name__ for p in lps]) expected_layers = sorted([ 'Adam', 'AdditiveAttention', 'AsciiTokenizer', 'AsrDecoder', 'AsrEncoder', 'AsrModel', 'BatchNormLayer', 'BeamSearchHelper', 'GreedySearchHelper', 'TargetSequenceSampler', 'ConvLSTMCell', 'Conv2DLayer', 'Conv2DLayer', 'EmbeddingLayer', 'HighwaySkipLayer', 'LSTMCellSimple', 'LSTMCellSimple', 'NullContextualizer', 'NullFusion', 'NullLm', 'Learner', 'PiecewiseConstantSchedule', 'ProjectionLayer', 'SimpleFullSoftmax', 'SpectrumAugmenter', 'StackingOverTime', 'TestInputGenerator', ]) self.assertEqual(expected_layers, l_names) def testParamValueSumSquared(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() all_vars = tf.trainable_variables() py_utils.SumSquared(all_vars) def testCollectVarHistogram(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars) summary_utils.CollectVarHistogram(var_grads) def testGradientMult(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars) py_utils.ApplyGradMultiplier(var_grads, -1.1) def testLRDecay(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() tp = p.train tp.lr_schedule.boundaries = [300000, 400000, 500000] tp.lr_schedule.values = [1.0, 0.1, 0.01, 0.001] lrs = tp.lr_schedule.Instantiate() steps = [299999, 300001, 399999, 400001, 499999, 500001] fetches = [lrs.Value(_) for _ in steps] values = self.evaluate(fetches) self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values) def testBatchSplit(self): def Run(num_splits): p = self._testParams() with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(93820981) p.input.cur_iter_in_seed = False p.input.bucket_batch_limit = [ b * 2 / num_splits for b in p.input.bucket_batch_limit ] with cluster_factory.ForTestingWorker(gpus=num_splits, do_eval=True): mdl = p.Instantiate() metrics = mdl.FPropDefaultTheta()[0] self.evaluate(tf.global_variables_initializer()) return self.evaluate(metrics['loss']) res1, res2 = Run(1), Run(2) self.assertAllClose(res1[0], res2[0]) self.assertAllEqual(res1[1], res2[1]) def testInference(self): def _CreateModelParamsForTest(): p = model.AsrModel.Params() p.name = 'test_config' # Encoder params. ep = p.encoder ep.input_shape = [None, None, 80, 1] ep.lstm_cell_size = 16 ep.num_lstm_layers = 2 ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)] ep.conv_filter_strides = [(2, 2), (2, 2)] ep.num_conv_lstm_layers = 0 # Initialize decoder params. dp = p.decoder dp.rnn_cell_dim = 16 dp.rnn_layers = 2 dp.source_dim = ep.lstm_cell_size * 2 # Use functional while based unrolling. dp.use_while_loop_based_unrolling = False p.input = input_generator.AsrInput.Params() ip = p.input ip.frame_size = 80 ip.append_eos_frame = True ip.pad_to_max_seq_length = False return p with self.session( use_gpu=False, graph=tf.Graph()) as sess, self.SetEval(True): p = _CreateModelParamsForTest() mdl = p.Instantiate() subgraphs = mdl.Inference() self.assertIn('default', subgraphs) fetches, feeds = subgraphs['default'] self.assertIn('wav', feeds) for name in ['hypotheses', 'scores', 'src_frames', 'encoder_frames']: self.assertIn(name, fetches) with open( test_helper.test_src_dir_path('tools/testdata/gan_or_vae.16k.wav'), 'rb') as f: wav = f.read() self.evaluate(tf.global_variables_initializer()) fetches = sess.run(fetches, {feeds['wav']: wav}) self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam), fetches['hypotheses'].shape) self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam), fetches['scores'].shape) self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1), fetches['src_frames'].shape) self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size), fetches['encoder_frames'].shape) if __name__ == '__main__': tf.test.main()
nilq/baby-python
python
#!/usr/bin/env python3 # Copyright 2021 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.from setuptools import setup from requests.auth import HTTPBasicAuth from common import * import json import requests from datetime import datetime from collections import OrderedDict username_column_encoded = base64.b64encode(bytes(cf_name + ":" + username_column, 'utf-8')) message_column_encoded = base64.b64encode(bytes(cf_name + ":" + message_column, 'utf-8')) created_time_column_encoded = base64.b64encode(bytes(cf_name + ":" + created_time, 'utf-8')) # Delete table if it exists request = requests.get(baseurl + "/" + table_name + "/schema", auth=HTTPBasicAuth(DB_USER, DB_PASS)) if is_successful(request): request = requests.delete(baseurl + "/" + table_name + "/schema", auth=HTTPBasicAuth(DB_USER, DB_PASS)) if is_successful(request): print("Deleted table " + table_name) else: print("Error out. Status code was " + str(request.status_code) + "\n" + request.text) # Create Table content = '<?xml version="1.0" encoding="UTF-8"?>' content += '<TableSchema name="' + table_name + '">' content += ' <ColumnSchema name="' + cf_name + '" />' content += '</TableSchema>' request = requests.post(baseurl + "/" + table_name + "/schema", data=content, headers={"Content-Type": "text/xml", "Accept": "text/xml"}, auth=HTTPBasicAuth(DB_USER, DB_PASS)) if is_successful(request): print("Created table " + table_name) else: print("Error out while creating table. Status code was " + str(request.status_code) + "\n" + request.text) quit() def get_current_time(): now = datetime.now() # current date and time date_time = now.strftime("%m/%d/%Y, %H:%M:%S") return date_time rows = [] jsonOutput = {"Row": rows} print("Writing data to " + table_name) for i in range(0, 20): rowKey = username + "-" + str(i) rowKeyEncoded = base64.b64encode(bytes(rowKey, 'utf-8')) usernameEncoded = base64.b64encode(bytes(username + "-" + str(i), 'utf-8')) currentTime = get_current_time() currentTimeEncoded = base64.b64encode(bytes(currentTime, 'utf-8')) testMessage = "test message" + str(i) testMessageEncoded = base64.b64encode(bytes(testMessage, 'utf-8')) cell = OrderedDict([ ("key", rowKeyEncoded.decode('utf-8')), ("Cell", [ {"column": message_column_encoded.decode('utf-8'), "$": testMessageEncoded.decode('utf-8')}, {"column": username_column_encoded.decode('utf-8'), "$": usernameEncoded.decode('utf-8')}, {"column": created_time_column_encoded.decode('utf-8'), "$": currentTimeEncoded.decode('utf-8')}, ]) ]) print("Row key: " + rowKey + "; Username: " + rowKey + "; " + "Message: " + testMessage + "; Created time: " + currentTime) rows.append(cell) request = requests.post(baseurl + "/" + table_name + "/" + rowKey, data=json.dumps(jsonOutput), headers={"Content-Type": "application/json", "Accept": "application/json"}, auth=HTTPBasicAuth(DB_USER, DB_PASS)) if is_successful(request): print("Successfully added messages for " + table_name) else: print("Error out while loading data. Status code was " + str(request.status_code) + "\n" + request.text) quit()
nilq/baby-python
python
# by amounra 0216 : http://www.aumhaa.com # written against Live 9.6 release on 021516 from __future__ import absolute_import, print_function import Live import math from ableton.v2.base import inject, listens from ableton.v2.control_surface import ControlSurface, ControlElement, Layer, Skin, PrioritizedResource, Component, ClipCreator, DeviceBankRegistry from ableton.v2.control_surface.elements import ButtonMatrixElement from ableton.v2.control_surface.components import M4LInterfaceComponent, SessionRingComponent, SessionNavigationComponent, SessionComponent, TransportComponent, DeviceComponent, ViewControlComponent from ableton.v2.control_surface.components.mixer import simple_track_assigner from aumhaa.v2.base import initialize_debug from aumhaa.v2.control_surface import SendLividSysexMode from aumhaa.v2.control_surface.elements import MonoEncoderElement, MonoBridgeElement from aumhaa.v2.control_surface.elements.mono_button import * from aumhaa.v2.control_surface.components import DeviceNavigator, MonoMixerComponent from aumhaa.v2.livid import LividControlSurface, LividRGB from .Map import * debug = initialize_debug() MIDI_NOTE_TYPE = 0 MIDI_CC_TYPE = 1 MIDI_PB_TYPE = 2 MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE) MIDI_NOTE_ON_STATUS = 144 MIDI_NOTE_OFF_STATUS = 128 MIDI_CC_STATUS = 176 MIDI_PB_STATUS = 224 class GuitarWing(LividControlSurface): _sysex_id = 20 _model_name = 'GuitarWing' def __init__(self, *a, **k): super(GuitarWing, self).__init__(*a, **k) self._skin = Skin(GuitarWingColors) with self.component_guard(): self._setup_controls() self._setup_m4l_interface() self._setup_session_control() self._setup_mixer_control() self._setup_device_control() self._setup_transport_control() self._setup_view_control() def _setup_controls(self): is_momentary = True optimized = True resource = PrioritizedResource self._button = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = BUTTONS[index], name = 'Button_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(10)] self._fader = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = SLIDERS[index], name = 'Fader_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)] self._fader_button = [MonoEncoderElement(msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = SLIDERS[index], name = 'Fader_Button_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)] self._ccs = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = CCS[index], name = 'CCs_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(4)] self._pad = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = PADS[index], name = 'Pad_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(5)] self._padCC = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = PADS[index], name = 'PadCC_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(5)] self._accel = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = ACCELS[index], name = 'Accel_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)] self._parameter_control_matrix = ButtonMatrixElement(rows = [ [ self._fader[0], self._fader[1], self._fader[2], self._accel[2], self._ccs[0], self._ccs[1], self._ccs[2], self._ccs[3] ]]) self._scene_launch_matrix = ButtonMatrixElement(rows = [self._pad[:4]]) def _setup_session_control(self): self._session_ring = SessionRingComponent(num_tracks = 1, num_scenes = 4, tracks_to_use = lambda : self.song.visible_tracks + self.song.return_tracks) self._session_ring.set_enabled(False) self._session = SessionComponent(session_ring = self._session_ring, auto_name = True) hasattr(self._session, '_enable_skinning') and self._session._enable_skinning() self._session.layer = Layer(scene_launch_buttons = self._scene_launch_matrix) self._session_navigation =SessionNavigationComponent(name = 'SessionNavigation', session_ring = self._session_ring) self._session_navigation._horizontal_banking.scroll_up_button.color = 'Session.NavigationButtonOn' self._session_navigation._horizontal_banking.scroll_down_button.color = 'Session.NavigationButtonOn' self._session_navigation.layer = Layer(left_button = self._button[1], right_button = self._button[0]) self._session_navigation.set_enabled(True) def _setup_mixer_control(self): self._mixer = MonoMixerComponent(name = 'Mixer', tracks_provider = self._session_ring, track_assigner = simple_track_assigner, invert_mute_feedback = True, auto_name = True, enable_skinning = True) self.song.view.selected_track = self._mixer.channel_strip(0)._track def _setup_transport_control(self): self._transport = TransportComponent() self._transport.layer = Layer(play_button = self._button[6], loop_button = self._button[7], seek_backward_button = self._button[8], record_button = self._button[9]) self._transport.set_enabled(True) def _setup_device_control(self): self._device = DeviceComponent(name = 'Device_Component', device_provider = self._device_provider, device_bank_registry = DeviceBankRegistry()) self._device.layer = Layer(parameter_controls = self._parameter_control_matrix) self._device.set_enabled(True) def _setup_m4l_interface(self): self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard) self.get_control_names = self._m4l_interface.get_control_names self.get_control = self._m4l_interface.get_control self.grab_control = self._m4l_interface.grab_control self.release_control = self._m4l_interface.release_control def _setup_view_control(self): self._view_control = ViewControlComponent() self._view_control.layer = Layer(prev_track_button = self._button[1], next_track_button = self._button[0]) # a
nilq/baby-python
python
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.> """Collection of utility functions for Matplotlib.""" from typing import Any, Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np from descartes.patch import PolygonPatch from matplotlib.animation import FuncAnimation from matplotlib.lines import Line2D from shapely.geometry import LineString, Polygon def draw_polygon_mpl( ax: plt.Axes, polygon: np.ndarray, color: Union[Tuple[float, float, float], str], linewidth: Optional[float] = None ) -> None: """Draw a polygon. The polygon's first and last point must be the same (repeated). Args: ax: Matplotlib axes instance to draw on polygon: Array of shape (N, 2) or (N, 3) color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' """ if linewidth is None: ax.plot(polygon[:, 0], polygon[:, 1], color=color) else: ax.plot(polygon[:, 0], polygon[:, 1], color=color, linewidth=linewidth) def draw_polygonpatch_matplotlib(points: Any, color: Union[Tuple[float, float, float], str]) -> None: """Draw a PolygonPatch. Args: points: Unused argument color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' """ fig = plt.figure(1, figsize=(10, 10), dpi=90) ax = fig.add_subplot(111) ext = [(0, 0), (0, 0.5), (0.5, 0.5), (0.5, 0), (0, 0)] int = [(0.2, 0.3), (0.3, 0.3), (0.3, 0.4), (0.2, 0.4)] polygon = Polygon(ext, [int]) patch = PolygonPatch(polygon, facecolor=color, alpha=0.5, zorder=2) ax.add_patch(patch) def draw_lane_polygons( ax: plt.Axes, lane_polygons: np.ndarray, color: Union[Tuple[float, float, float], str] = "y" ) -> None: """Draw a lane using polygons. Args: ax: Matplotlib axes lane_polygons: Array of (N,) objects, where each object is a (M,3) array color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' """ for i, polygon in enumerate(lane_polygons): ax.plot(polygon[:, 0], polygon[:, 1], color=color, alpha=0.3, zorder=1) def plot_bbox_2D( ax: plt.Axes, pts: np.ndarray, color: Union[Tuple[float, float, float], str], linestyle: str = "-" ) -> None: """Draw a bounding box. 2D bbox vertices should be arranged as:: 0----1 | | 2----3 i.e. the connectivity is 0->1, 1->3, 3->2, 2->0 Args: ax: Matplotlib axes pts: Array of shape (4, 2) representing the 4 points of the bounding box. color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' linestyle: The linestyle to use """ ax.plot(pts[0:2, 0], pts[0:2, 1], c=color, linestyle=linestyle) ax.plot(pts[2:4, 0], pts[2:4, 1], c=color, linestyle=linestyle) ax.plot(pts[np.array([1, 3]), 0], pts[np.array([1, 3]), 1], c=color, linestyle=linestyle) ax.plot(pts[np.array([0, 2]), 0], pts[np.array([0, 2]), 1], c=color, linestyle=linestyle) def animate_polyline(polyline: np.ndarray, axes_margin: int = 5, show_plot: bool = True) -> None: """Draw and animate a polyline on a plot. Args: polyline: Array of shape (N, 2) representing the points of the line axes_margin: How much margin for the axes show_plot: Whether to show the plot after rendering it """ xmin = np.amin(polyline[:, 0]) - axes_margin xmax = np.amax(polyline[:, 0]) + axes_margin ymin = np.amin(polyline[:, 1]) - axes_margin ymax = np.amax(polyline[:, 1]) + axes_margin fig, ax = plt.subplots() xdata, ydata = [], [] (ln,) = plt.plot([], [], "ro", animated=True) def init() -> Tuple[Line2D]: ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) return (ln,) def update(frame: List[Any]) -> Tuple[Line2D]: xdata.append(frame[0]) ydata.append(frame[1]) ln.set_data(xdata, ydata) return (ln,) ani = FuncAnimation(fig, update, frames=polyline, init_func=init, blit=True) if show_plot: plt.show() def plot_lane_segment_patch( polygon_pts: np.ndarray, ax: plt.Axes, color: Union[Tuple[float, float, float], str] = "y", alpha: float = 0.3 ) -> None: """Plot a lane segment using a PolygonPatch. Args: polygon_pts: Array of shape (N, 2) representing the points of the polygon ax: Matplotlib axes color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' alpha: the opacity of the lane segment """ polygon = Polygon(polygon_pts) patch = PolygonPatch(polygon, facecolor=color, edgecolor=color, alpha=alpha, zorder=2) ax.add_patch(patch) def plot_nearby_centerlines( lane_centerlines: Dict[Any, Any], ax: plt.Axes, nearby_lane_ids: List[int], color: Union[Tuple[int, int, int], str] ) -> None: """Plot centerlines. Args: lane_centerlines: Python dictionary where key is lane ID, value is object describing the lane ax: Matplotlib axes nearby_lane_ids: List of integers representing lane IDs color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b' """ for curr_lane_id in nearby_lane_ids: centerline = lane_centerlines[curr_lane_id]["centerline"] ax.plot(centerline[:, 0], centerline[:, 1], color=color, linestyle="--", alpha=0.4) def visualize_centerline(centerline: LineString) -> None: """Visualize the computed centerline. Args: centerline: Sequence of coordinates forming the centerline """ line_coords = list(zip(*centerline)) lineX = line_coords[0] lineY = line_coords[1] plt.plot(lineX, lineY, "--", color="grey", alpha=1, linewidth=1, zorder=0) plt.text(lineX[0], lineY[0], "s") plt.text(lineX[-1], lineY[-1], "e") plt.axis("equal")
nilq/baby-python
python
from fastapi.testclient import TestClient from app.main import app client = TestClient(app) def test_valid_input(): """Return 200 Success when input is valid.""" response = client.post( '/predict', json={ 'title': 'Water bike', 'blurb': 'A bike that floats', 'goal': '5000', 'launch_date': '08/06/2020', 'deadline': '10/20/2020', 'category': 'sports' } ) body = response.json() def test_invalid_input(): """Return 422 Validation Error when x1 is negative.""" response = client.post( '/predict', json={ 'title': 'Water bike', 'blurb': 'A bike that floats', 'goal': '5000', 'launch_date': '08/06/2020', 'deadline': '10/20/2020', 'category': 'sports' } ) body = response.json()
nilq/baby-python
python
encode,decode=lambda s:''.join(c//200*"🫂"+c%200//50*"💖"+c%50//10*"✨"+c%10//5*"🥺"+c%5*","+(c==0)*"❤️"+"👉👈"for c in s.encode()),lambda s:bytes([200*(c:=b.count)("🫂")+50*c("💖")+10*c("✨")+5*c("🥺")+c(",")for b in s.split("👉👈")[:-1]]).decode()
nilq/baby-python
python
# -*- coding: utf-8 -*- """Sweep config interface.""" from .cfg import SweepConfig, schema_violations_from_proposed_config from .schema import fill_validate_schema, fill_parameter, fill_validate_early_terminate __all__ = [ "SweepConfig", "schema_violations_from_proposed_config", "fill_validate_schema", "fill_parameter", "fill_validate_early_terminate", ]
nilq/baby-python
python
from typing import Callable from fastapi import FastAPI from app.db.init_db import init_db, create_engine def create_startup_handler(app: FastAPI, db_url: str) -> Callable: async def startup() -> None: engine = create_engine(db_url) await init_db(engine) app.state.alchemy_engine = engine return startup def create_shutdown_handler(app: FastAPI) -> Callable: async def shutdown() -> None: await app.state.alchemy_engine.dispose() return shutdown
nilq/baby-python
python
__author__ = 'socialmoneydev' from jsonBase import JsonBase from programlimit import ProgramLimit from programinterestrate import ProgramInterestRate class ProgramChecking(JsonBase): def isHashedPayload(self): return True def __init__(self): self.category = None self.type = None self.balanceLimit = None self.interestRates = [] self.isExternalWithdrawEnabled = None self.isInterestEnabled = None self.isRecurringContributionEnabled = None self.perTransactionDepositLimit = None self.perTransactionWithdrawLimit = None def fromDict(self, dct, classDefs): classDefs = classDefs or dict() classDefs['interestRates'] = ProgramInterestRate classDefs['perTransactionWithdrawLimit'] = ProgramLimit classDefs['perTransactionDepositLimit'] = ProgramLimit super(ProgramChecking, self).fromDict(dct, classDefs)
nilq/baby-python
python
#!/usr/local/bin/python3.5 -u answer = 1 + 7 * 7 - 8 print(answer)
nilq/baby-python
python
__version__ = '0.1.5' name = "drf_scaffold"
nilq/baby-python
python
def count_prime_fuctors(n, c): # count the number of primes in particular number # argument `c` should be Counter class if n<2: return m=n i=2 while i<=m: while m%i==0: m//=i c[i]+=1 i+=1 from collections import Counter n=int(input()) d=Counter() for i in range(1,n+1): count_prime_fuctors(i,d) ans=1 mod=10**9+7 for v in d.values(): ans*=v+1 ans%=mod print(ans)
nilq/baby-python
python