index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
16,000 | 81e1d55d476dca19ecbc262bc95398eb0ddacbcf | import os
import glob
import logging
import sys
import torch
import matplotlib.pyplot as plt
from .file_io import *
def set_gpu_devices(devices):
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = devices
print(f'Setting GPU devices is done. '
f'CUDA_VISIBLE_DEVICES: {os.environ["CUDA_VISIBLE_DEVICES"]}, '
f'device count: {torch.cuda.device_count()}')
def show_num_params(model):
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Model total params: {total_params:,} - trainable params: {trainable_params:,}')
def files_with_suffix(directory, suffix, pure=False):
"""
retrieving all files with the given suffix from a folder
:param suffix: -
:param directory: -
:param pure: if set to True, only filenames are returned (as opposed to absolute paths)
"""
files = [os.path.abspath(path) for path in glob.glob(os.path.join(directory, '**', f'*{suffix}'), recursive=True)]
if pure:
files = [os.path.split(file)[-1] for file in files]
return files
def get_logger():
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
fmt = "[%(filename)s line %(lineno)d] %(message)s" # also get the function name
handler.setFormatter(logging.Formatter(fmt))
root.addHandler(handler)
return root
def waited_print(string):
print(string)
print('====== Waiting for input')
input()
def parse_log_file(file, mode='general'):
lines = read_file_to_list(file)
lines = [line for line in lines if line.startswith('Epoch')] # remove initial lines
acc_at1_list, acc_at1_avg_list = [], []
acc_at5_list, acc_at5_avg_list = [], []
loss_list, loss_avg_list = [], []
if mode == 'epoch_select':
lines_to_consider = []
for epoch in range(200):
epoch_lines = [line for line in lines if line.startswith(f'Epoch: [{epoch}]')]
# print(f'Epoch {epoch} lines: {len(epoch_lines)}')
if len(epoch_lines) > 0: # if there are any lines in the log file with that epoch
lines_to_consider.append(epoch_lines[-1]) # last line for each epoch before saving checkpoint
else:
lines_to_consider = lines # general mode, consider all lines
# waited_print('')
for line in lines_to_consider:
the_list = line.split('\t')
loss_part = the_list[3]
loss, loss_avg = float(loss_part.split(' ')[1]), float(loss_part.split(' ')[2][1:-1])
acc_at1_part = the_list[4]
acc_at5_part = the_list[5]
acc_at_1, acc_at1_avg = float(acc_at1_part[6:12].strip()), float(acc_at1_part[14:20].strip())
acc_at_5, acc_at5_avg = float(acc_at5_part[6:12].strip()), float(acc_at5_part[14:20].strip())
loss_list.append(loss)
loss_avg_list.append(loss_avg)
acc_at1_list.append(acc_at_1)
acc_at1_avg_list.append(acc_at1_avg)
acc_at5_list.append(acc_at_5)
acc_at5_avg_list.append(acc_at5_avg)
return {
'acc_at1_list': acc_at1_list,
'acc_at1_avg_list': acc_at1_avg_list,
'acc_at5_list': acc_at5_list,
'acc_at5_avg_list': acc_at5_avg_list,
'loss_list': loss_list,
'loss_avg_list': loss_avg_list
}
def visualize_log_file(file, metrics, title, parse_mode, vis_mode):
dicts = parse_log_file(file, parse_mode)
if vis_mode == 'do_prints':
acc_at1_avg_list = dicts['acc_at1_avg_list']
max_acc_at_1_avg = max(acc_at1_avg_list)
inds = [i for i, j in enumerate(acc_at1_avg_list) if j == max_acc_at_1_avg]
print(f'max_acc_at_1_avg: {max_acc_at_1_avg}, inds: {inds}')
else:
plt.title(title)
if 'loss' in metrics:
plt.plot(dicts['loss_list'], label='loss')
plt.plot(dicts['loss_avg_list'], label='loss_avg')
if 'acc_at1' in metrics:
plt.plot(dicts['acc_at1_list'], label='acc_at1')
plt.plot(dicts['acc_at1_avg_list'], label='acc_at1_avg')
if 'acc_at5' in metrics:
plt.plot(dicts['acc_at5_list'], label='acc_at5')
plt.plot(dicts['acc_at5_avg_list'], label='acc_at5_avg')
plt.legend()
plt.grid()
plt.show()
|
16,001 | 02b9fc8189e61242a35febea03da0a442db59fef | """
Module containing the Blueprint class.
This class allows to store an organization of handlers
to be used in the construction of a bot
"""
from .exception.Exceptions import *
from .Conversation import Conversation
from telegram.ext import Filters
class Blueprint():
def __init__(self):
self.command_handlers = {}
self.error_handler = None
self.message_handlers = []
self.conversations = []
def add_command_handler(self,command,command_handler):
"""
This method adds a handler to a command
Parameters
----------
command: String
The command that will trigger the handler
command_handler: Callable
The Callable object that will execute. Must receive 2 parameters:
bot : the bot object from python-telegram-bot
update : the update object from python-telegram-bot
"""
if(callable(command_handler)):
if isinstance(command, str):
self.command_handlers[command] = command_handler
else:
raise NotAStringException("{} isn't a valid command name. Command names must be string")
else:
raise NotCallableException("{} is not a function".format(command_handler))
def get_command_handlers(self):
"""
Returns a dict of command handlers in the structure
command : handler
Returns
-------
message_handlers : dict()
dict object where the key is the command and the value is the handler function
"""
return self.command_handlers
def get_commands(self):
"""
Returns the list of commands defined
Returns
-------
commands : List(String)
List of commands defined
"""
return list(self.command_handlers.keys())
def get_command(self,command):
"""
Returns the command handler from a command
Parameters
----------
command : String
The command that will be returned
Returns
-------
handler : Callable
Function that handle the command
"""
return self.command_handlers[command]
def set_error_handler(self,error_handler):
"""
This method set the error handler
Parameters
----------
error_handler : Callable
Function that will handle the error event
"""
if(callable(error_handler)):
self.error_handler = error_handler
else:
raise NotCallableException("{} object is not callable".format(type(error_handler)))
def get_error_handler(self):
"""
Returns the function to handle errors
Returns
-------
error_handler : Callable
Handler of error event
"""
return self.error_handler
def add_message_handler(self,message_handler,message_filter=Filters.text):
"""
This method adds a handler to a message type
Parameters
----------
message_handler: Callable
The Callable object that will execute. Must receive 2 parameters:
bot : the bot object from python-telegram-bot
update : the update object from python-telegram-bot
message_filter: Filter from python-telegram-bot.
A filter that will defines wich kind of message will trigger this event.
The default is text.
"""
if(callable(message_handler)):
self.message_handlers.append((message_handler,message_filter))
else:
raise NotCallableException("{} is not callable".format(type(message_handler)))
def get_message_handlers(self):
"""
Returns a list of message handlers in the structure
(message_handler,messager_filter)
Returns
-------
message_handlers : List((message_handler,messager_filter))
List of messages handlers and its filters
"""
return self.message_handlers
def add_conversation(self,conversation):
"""
Add a conversation flow to the blueprint
Parameters
----------
conversation : Conversation object
The conversation that will be added in the blueprint
"""
if isinstance(conversation,Conversation):
self.conversations.append(conversation)
else:
raise NotAConversation("Must pass Conversation object, not {}".format(type(conversation)))
def get_conversations(self):
"""
Returns the list of conversations on the blueprint
Returns
-------
conversations : List(Conversation)
List of conversations
"""
return self.conversations |
16,002 | 556ee4b5ee1790b693c0801ef7e985d4f0cec410 | #!env/bin/python
import time
from slackclient import SlackClient
from emoji_parser import EmojiParser
from command_handler import CommandHandler
# starterbot's ID as an environment variable
BOT_ID = 'U3YLPLY5C'
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "do"
# read in the authentication token for bot
f = open('permissions.txt')
key = f.readline().rstrip()
f.close()
slack_client = SlackClient(key)
"""
Determine which pipeline message should be sent to (Command or NLP).
"""
def determine_message_type(slack_rtm_output):
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
# print output_list
for output in output_list:
# slack output should be parsed as a command
if output and 'text' in output and AT_BOT in output['text']:
return 'command'
# slack output should be parsed by NLP engine
if output and 'text' in output:
return 'nlp'
return None, None, None, None
if __name__ == "__main__":
text_parser = EmojiParser(slack_client)
command_handler = CommandHandler(slack_client)
READ_WEBSOCKET_DELAY = 0.5 # 1 second delay between reading from data stream
if slack_client.rtm_connect():
print("ReactionAdder connected and running!")
while True:
output_list = slack_client.rtm_read()
msg_type = determine_message_type(output_list)
if msg_type == 'command':
__message, channel = command_handler.get_command_info(output_list)
command_handler.parse_command(__message.split(), channel)
elif msg_type == 'nlp':
print ("in nlp branch")
emoji_list, channel, timestamp, user = text_parser.parse_message(output_list)
print emoji_list
for emoji_text in emoji_list:
if emoji_text is not None:
slack_client.api_call("reactions.add", channel=channel, name=emoji_text, timestamp=timestamp, as_user=True)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
|
16,003 | 7e71849a1a6d0db36232e4562137ddd76a541205 | elemento=[]
def separarL(lista):
n = int(input("Cuantos valores desea agregar: "))
for i in range(n):
no = int(input("Valor: "))
elemento.append(no)
i+=1
print(elemento)
lista.sort()
pares= []
impares=[]
for i in lista:
if i % 2 ==0:
pares.append(i)
else:
impares.append(i)
return pares, impares
pares,impares= separarL(elemento)
print("Los números pares son: ",pares)
print("Los números impares son: ",impares)
if __name__ == "__main__":
separarL(elemento)
|
16,004 | e219fd5e985f135428a236b5565397a4af403493 | # -*- coding: utf-8 -*-
import csv
from app.cobranza.models import Cobranza
from app.investigacion.models import Investigacion
init_row = 1
def cobranza_upload(file_path):
items = get_items(file_path)
save_items(items)
def get_items(file_path):
index = 0
limit = 1000
items = []
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if index >= init_row:
row = get_row(row)
items.append(row)
if index > limit:
break
index += 1
return items
def get_row(data):
return {
"investigacion_id": data[0],
"monto": data[7],
"folio": data[8],
"razon_social": data[11],
"obs_cobranza": data[13],
"tipo": data[14]
}
def parse_float(value):
try:
return float(value)
except Exception as e:
print (e)
return None
def parse_int(value):
try:
return int(value)
except Exception as e:
print (e)
return None
def parse_string(value):
if not value:
return ""
try:
string_parsed = value.decode('cp1252').encode("utf-8").replace("€?", "É")
except Exception as e:
print (e)
string_parsed = value.decode('utf-8','ignore').encode("utf-8")
return string_parsed
def update_cobranza(investigacion_id, monto, folio):
cobranza = Cobranza.objects.get(investigacion=investigacion_id)
cobranza.monto = parse_float(monto)
cobranza.folio = folio
cobranza.save()
def update_compania(investigacion_id, razon_social):
inv = Investigacion.objects.get(id=investigacion_id)
inv.compania.razon_social = razon_social
inv.compania.save()
def update_investigacion(investigacion_id, obs_cobranza, tipo):
inv = Investigacion.objects.get(id=investigacion_id)
if inv.sucursal:
inv.sucursal.nombre = parse_string(obs_cobranza)
inv.sucursal.save()
tipo = parse_int(tipo)
tipos_validos = [item[0] for item in Investigacion.TIPO_INVESTIGACION_OPCIONES]
if tipo in tipos_validos:
inv.tipo_investigacion_status = tipo
inv.save()
def save_items(items):
index = 1
for item in items:
index += 1
investigacion_id = item['investigacion_id']
if investigacion_id:
update_cobranza(investigacion_id, item['monto'], item['folio'])
update_compania(investigacion_id, item['razon_social'])
update_investigacion(investigacion_id, item['obs_cobranza'], item["tipo"])
|
16,005 | f4c120ccffa9fd730c87339c89188138547cb06f |
class PStats():
def __init__(self, hp = 0, ep = 0, attack = 0, eattack = 0, defense = 0, edefense = 0, tough = 0, level = 0, XP = 0, AP = 0):
self.hp = hp
self.ep = ep
self.attack = attack
self.eattack = eattack
self.defense = defense
self.edefense = edefense
self.tough = tough
self.level = level
self.XP = XP
self.AP = AP
self.status = 0
''' Always start status as 0. This will be filled later as needed will values'''
''' that represent things like poison, speed up, or whatever. '''
|
16,006 | 4a22339d4d84920ed6ef4cfda56c07270e5f5697 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='article',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('article', models.CharField(max_length=50)),
('noSeries', models.CharField(max_length=75)),
('codeBar', models.CharField(max_length=75)),
('measureSystem', models.IntegerField(choices=[(1, 'Unidades'), (2, 'Libras'), (3, 'Kilos'), (4, 'Docenas')])),
('presentation', models.IntegerField(choices=[(1, 'Caja'), (2, 'Bolsa'), (3, 'Otro')])),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='articlePrice',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('price', models.DecimalField(decimal_places=2, max_digits=4)),
('isActive', models.BooleanField(default=True)),
('article', models.ForeignKey(to='product.article')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='brand',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('brand', models.CharField(max_length=45)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='category',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('category', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='currencyControl',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('currency', models.CharField(max_length=60)),
('Simbolo', models.CharField(max_length=5)),
('isPrincipal', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='model',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('note', models.CharField(max_length=255)),
('create', models.DateField(auto_now_add=True)),
('update', models.DateField(auto_now=True)),
('model', models.CharField(max_length=50)),
('brand', models.ForeignKey(to='product.brand')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='category',
name='model',
field=models.ManyToManyField(to='product.model', through='product.article'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(to='product.category'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='model',
field=models.ForeignKey(to='product.model'),
preserve_default=True,
),
]
|
16,007 | a373f2f883b72e8c9f4061175ba0e8c96a198ea5 | import boto3
import json
import psycopg2
from django.conf import settings
class UploadToPostgres():
def __init__(
self,
county,
rate_energy_peak,
rate_energy_partpeak,
rate_energy_offpeak,
rate_demand_peak,
rate_demand_partpeak,
rate_demand_overall
):
with open(settings.BASE_DIR + '/postgres_info.json') as json_file:
postgres_info = json.load(json_file)
self.db_host = postgres_info['DB_HOST']
self.table_name = "script_config_load_controller"
self.postgres_db = postgres_info['POSTGRES_DB']
self.postgres_user = postgres_info['POSTGRES_USER']
self.postgres_password = postgres_info['POSTGRES_PASSWORD']
self.county = county
self.rate_energy_peak = rate_energy_peak
self.rate_energy_partpeak = rate_energy_partpeak
self.rate_energy_offpeak = rate_energy_offpeak
self.rate_demand_peak = rate_demand_peak
self.rate_demand_partpeak = rate_demand_partpeak
self.rate_demand_overall = rate_demand_overall
self.num_of_run = 4
def run(self, baseline_profiles, controlled_profiles):
conn = psycopg2.connect(
host=self.db_host,
dbname=self.postgres_db,
user=self.postgres_user,
password=self.postgres_password,
port='5432'
)
cur = conn.cursor()
# upload data into Postgres
baseline_profiles_list = []
controlled_profiles_list = []
start_hour = 0
start_minute = 0
lines = len(baseline_profiles / 4)
for line in range(lines):
hour_str = str((start_hour + line % 4)% 24)
minute = 15 * (line % 4)
if minute is 0:
minute_str = '00'
else:
minute_str = str(minute)
baseline_profiles_list.append(
{
'time': hour_str + ':' + minute_str,
'load': str(baseline_profiles[line][self.num_of_run - 1])
}
)
controlled_profiles_list.append(
{
'time': hour_str + ':' + minute_str,
'load': str(controlled_profiles[line][self.num_of_run - 1])
}
)
cur.execute("INSERT INTO " + self.table_name + \
" (county, rate_energy_peak, rate_energy_partpeak, rate_energy_offpeak," + \
" rate_demand_peak, rate_demand_partpeak, rate_demand_overall)" + \
" VALUES (%s, %s, %s, %s, %s, %s, %s)",
(
self.county, str(self.rate_energy_peak), str(self.rate_energy_partpeak), str(self.rate_energy_offpeak),
str(self.rate_demand_peak), str(self.rate_demand_partpeak), str(self.rate_demand_overall)
)
)
conn.commit()
cur.execute("SELECT id FROM " + self.table_name + " ORDER BY id DESC LIMIT 1")
config_id = cur.fetchone()
cur.execute("INSERT INTO script_algorithm_load_controller" + \
" (config, uncontrolled_load, controlled_load)" + \
" VALUES (%s, %s, %s)",
(
config_id, json.dumps(baseline_profiles_list), json.dumps(controlled_profiles_list)
)
)
print('Insertion finished...')
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
|
16,008 | 36282dcb01840b4e0e1ef803ec7f987e70c7eeaf | import gym
import ant_hrl_maze
from PIL import Image
env = gym.make("AntWaypointHierarchical-v4")
env.reset()
while True:
try:
env.render()
except KeyboardInterrupt:
break
while True:
inp = input("Action")
if inp == "r":
env.reset()
elif inp == "c":
env.render("rgb_array")
elif inp.isnumeric():
print(env.step(int(inp), render=True)[0])
env.render()
|
16,009 | 668a5fc9e22e71c11b50cc4697c3411cdaca6631 | a=int(input())
k=0
while a>0:
m=a%10
k=k*10+m
a=a//10
print(k)
|
16,010 | f6b1464f09f596f42d5dd94b9dc27ad7d7c92b50 | from qt5 import *
def main():
app = QApplication(sys.argv)
# login = Login()
# login.show()
# window = MainUI()
# window.show()
test = Test()
test.show()
app.exec_()
if __name__ == "__main__":
main() |
16,011 | b444203b78a399a4d6fb7bca6b0d816d637d4f9e | #!/usr/bin/python3
# @File:.py
# -*- coding:utf-8 -*-
# @Author:von_fan
# @Time:2020年04月16日23时07分17秒
from rest_framework.views import APIView
from .api_response import APIResponse
from rest_framework import status
class ManyOrOne(APIView,APIResponse):
def IsMany(self,request_data):
# request_data=request_data.dict()
# if("QuerySet" in type(request_data)):
# Many = True
# return Many
if isinstance(request_data, dict) and request_data is not None:
Many = False
return Many
elif isinstance(request_data, list) and request_data is not None:
Many = True
return Many
elif len(request_data)>1:
Many=True
return Many
elif len(request_data)<=1:
Many=False
return Many
else:
print("草你爹")
return APIResponse(
401, "数据错误,无法新增",
results=[],
status=status.HTTP_400_BAD_REQUEST
)
ManyOrOne=ManyOrOne()
|
16,012 | 0931117495102761435111dcd30fb386e6ecebe3 | # map() 함수와 filter() 함수
# map : 리스트의 요소를 함수에 넣고 리턴된 값으로 리스트를 구성해 주는 함수
# filter : 리스트의 요소를 함수에 넣고 리턴된 값이 True인 것으로, 새로운 리스트를 구성해주는 함수
def power(item) :
return item * item
def under_3(item) :
return item < 3
list_input_a = [1,2,3,4,5]
# map() 함수를 사용한다.
output_a = map(power, list_input_a)
print("# map() 함수의 실행결과")
print("map(power, list_input_a):", output_a)
print("map(power, list_input_a):", list(output_a))
print()
# filter() 함수를 사용한다.
output_b = filter(under_3, list_input_a)
print("# filter() 함수의 실행결과")
print("filter(under_3, list_input_a):", output_b)
print("filter(under_3, list_input_a):", list(output_b)) |
16,013 | db2d630b2825c5d52b8fae389b04f65a830b3120 | import argparse
import os
import json
from server import server_start
from client import client_start
def parse_args():
"""
Parses command line arguments
Returns:
Namespace: Namespace of arguments.
"""
description = 'Synchronizes files and directories between a client and a server.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--config', type=str, default=None,
help='Path to a configuration file. A valid configuration file will enable silent mode.')
parser.add_argument('--root', type=str, default=None,
help='Path of directory to Synchronize')
parser.add_argument('--host', type=str, default=None,
help='Server or Client')
parser.add_argument('--hostname', type=str, default=None,
help='Hostname to connect to or broadcast as.')
parser.add_argument('--port', type=int, default=None,
help='Port to connect to or bind to.')
parser.add_argument('--timeout', type=int, default=None,
help='How long the connect will hang before timeing out.')
parser.add_argument('--encryption', type=bool,
default=None, help='Use TLS or not.')
parser.add_argument('--cert', type=str, default=None,
help='Certificate file path for TLS handshake.')
parser.add_argument('--key', type=str, default=None,
help='Key file path for TLS handshake.')
parser.add_argument('--purge', type=bool, default=None,
help='Deletions are included in Syncs.')
parser.add_argument('--purge_limit', type=int, default=None,
help='How long, in days, deleted items are still monitored before being forgotten.')
parser.add_argument('--backup', type=bool, default=None,
help='Deletions are stored in backup location.')
parser.add_argument('--backup_path', type=str, default=None,
help='Path to place backup files within.\nDefaults to ~/conf/pysync/{root}/backups')
parser.add_argument('--backup_limit', type=int, default=None,
help='Length of time files are held in backup location. (days)')
parser.add_argument('--ram', type=int, default=None,
help='Maximum amount of RAM to use for Syncs. (Bytes)\n-1 for unlimited.')
parser.add_argument('--compression', type=int, default=None,
help='Compression level to use on large files. Follows'\
+ ' the zlib compression levels. 0 is no compression'\
+ ' and 9 is most compression.')
parser.add_argument('--compression_min', type=int, default=None,
help='Minimum file size before compression is applied. (Bytes)')
logging_help = 'Information will be kept in log file.\n0 - Nothing logged\
\n1 - Only Errors are logged\n2 - Errors and Summary activity are logged\n3 - \
Errors, Summary Activity, and Deletions are logged.\n4 - Nearly all activity is logged.'
parser.add_argument('--logging', type=int, default=None, help=logging_help)
parser.add_argument('--logging_limit', type=int, default=None,
help='Maximum size limit of log file. (Bytes)\n-1 for unlimited.')
parser.add_argument('--gitignore', type=bool, default=None,
help='Read and exclude items from children gitignores in the sync.')
return parser.parse_args()
def print_intro():
"""
Welcome message
"""
print('Welcome to PySync')
print('Version 0.1')
print('Created by Maximilian Terenzi')
print()
def check_for_config(conf, confs_path):
"""
Checks if configuration file should be loaded.
Args:
conf (dict): Configuration dictionary
confs_path (str): Default path configuration files
Returns:
dict: Configuration file
"""
if yes_no('Is there a configuration file you would like to load?'):
options = os.listdir(confs_path)
if len(options) > 0:
options.append('Specify a Path')
option = ask_options(
'Pick a configuration file', options, title=False)
if option == 'Specify a Path':
conf['config'] = ask_path(
'Enter the path for the configuration file')
else:
conf['config'] = os.path.join(confs_path, option)
else:
conf['config'] = ask_path(
'Enter the path for the configuration file')
conf = get_config_file(conf)
conf, _ = confirm_conf(conf)
conf.pop('config')
return conf
def configure(conf):
"""
Checks configuration for missing values and prompts for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
unit_prompt = '\nUnits:\nUnit\t-\tExample\nGB\t-\t10GB\nMB\t-\t10MB\nKB\t-\t10KB\nB\t-\t10'
units = [
('GB', lambda x: int(x * 1e9)),
('MB', lambda x: int(x * 1e6)),
('KB', lambda x: int(x * 1e3)),
('B', lambda x: int(x)),
]
if conf.get('root', None) is None:
conf['root'] = simple_response(
'What is the path of the directory you wish to sync?')
conf['root'] = os.path.abspath(conf['root'])
conf = configure_handshake(conf)
conf = configure_deletes(conf)
conf = configure_limits(conf, unit_prompt, units)
conf = configure_logging(conf, unit_prompt, units)
conf = configure_misc(conf)
return conf
def configure_handshake(conf):
"""
Checks configuration for missing handshake values and prompts for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
print()
if conf.get('host', None) is None or conf['host'] not in ['Server', 'Client']:
conf['host'] = ask_options('Is this the Server or Client?',
['Server', 'Client']).title()
if conf.get('hostname', None) is None:
if conf['host'] == 'Server':
conf['hostname'] = simple_response('What is your hostname?')
else:
conf['hostname'] = simple_response(
'What is the hostname that you are connecting to?')
if conf.get('port', None) is None:
if conf['host'] == 'Server':
conf['port'] = numeric_response('What port do you want to use?')
else:
conf['port'] = numeric_response(
'What port on the host are you connecting to?')
if conf.get('timeout', None) is None:
conf['timeout'] = numeric_response('How long, in seconds, can a connection hang before timing out?',
default=30)
if conf.get('encryption', None) is None:
conf['encryption'] = yes_no(
'Would you like to use TLS encryption?', default=False)
if conf['encryption'] and conf.get('cert', None) is None:
conf['cert'] = ask_path('Enter the path for the certificate file')
if conf['encryption'] and conf['host'] == 'Server' \
and conf.get('key', None) is None:
conf['key'] = ask_path('Enter the path for the key file')
return conf
def configure_deletes(conf):
"""
Checks configuration for missing deletion related values and prompts
for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
print()
if conf.get('purge', None) is None:
conf['purge'] = yes_no(
'Would you like the sync to be able to delete files between devices?', default=False)
if conf['purge'] and conf.get('purge_limit') is None:
conf['purge_limit'] = numeric_response(
'How long, in days, should deleted items still be monitored before being forgotten?', default=7)
if conf['purge'] and conf.get('backup', None) is None:
conf['backup'] = yes_no(
'Would you like to backup deleted files?', default=False)
if conf['backup'] and conf.get('backup_path', None) is None:
prompt = 'Provide a path for the backups'
conf['backup_path'] = simple_response(prompt, default='DEFAULT')
if conf['backup'] and conf.get('backup_limit', None) is None:
prompt = 'How long, in days, would you like to keep backed up files? (-1 to never delete)'
conf['backup_limit'] = numeric_response(prompt, default=7)
return conf
def configure_limits(conf, unit_prompt, units):
"""
Checks configuration for missing performance limitations related values and
prompts for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
print()
if conf.get('ram', None) is None:
if conf['host'] == 'Server':
prompt = 'How much RAM would you like the Sync to use per thread?'
else:
prompt = 'How much RAM would you like the Sync to use?'
prompt += unit_prompt + '\nEnter -1 for unlimited.'
conf['ram'] = numeric_response(prompt, units, default='1MB')
if conf.get('compression', None) is None:
conf['compression'] = ask_range(
prompt='How much would you like to compress large files?',
min=0, max=9, tips=['No Compression', 'Max Compression'], default=0)
if conf['compression'] and conf.get('compression_min', None) is None:
prompt = 'What is the minimum file sized that can be compressed?' + unit_prompt
conf['compression_min'] = numeric_response(prompt, units, default=70)
return conf
def configure_logging(conf, unit_prompt, units):
"""
Checks configuration for missing logging related values and prompts
for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
print()
if conf.get('logging', None) is None:
prompt = 'Would you like to log information?'
options = ['Nothing Logged', 'Errors Only', 'Errors and Summary Activity',
'Errors, Summary Activity, and Deletions', 'Nearly all Activity']
conf['logging'] = options.index(ask_options(
prompt, options, default='Nothing Logged'))
if conf['logging'] > 0 and conf.get('logging_limit', None) is None:
prompt = 'What is the maximum file size of the log file?' + \
unit_prompt + '\nEnter -1 for unlimited.'
conf['logging_limit'] = numeric_response(prompt, units, default='10MB')
return conf
def configure_misc(conf):
"""
Checks configuration for missing miscellaneous values and prompts
for values.
Args:
conf (dict): Configuration dictionary
Returns:
dict: Configuration dictionary
"""
if conf.get('gitignore', None) is None:
conf['gitignore'] = yes_no(
'Would you like items from children gitignores to be excluded from the sync?', default=False)
if conf['host'] == 'Client' and conf.get('sleep_time', None) is None:
prompt = 'How long, in seconds, would you like the client to sleep before re-syncing? Enter -1 for single use.'
conf['sleep_time'] = numeric_response(prompt, default=-1)
return conf
def ask_options(prompt, options, confirm=True, title=True, default=None, hints=None):
"""
Presents options for response from user. Response is checked and returned.
Args:
prompt (str): Question prompt.
options (list): List of options to display.
confirm (bool, optional): Echos user selections. Defaults to True.
title (bool, optional): Capitalize first letter of each option. Defaults to True.
default (object, optional): Default input value presented. Must be
contained in the options parameter. Defaults to None.
hints (list): List of hints to be displayed alongside choices.
Raises:
IndexError: Default value is not in options parameter.
Returns:
object: Returns user selection from options.
"""
print(prompt + ':')
for idx, option in enumerate(options):
if title:
option = str(option).title()
if hints is None:
print(f'{idx+1} - {option}')
else:
try:
print(f'{idx+1} - {option}: {hints[idx]}')
except IndexError:
print(f'{idx+1} - {option}')
if default is None:
hint = f'Pick an option (1-{len(options)}): '
else:
hint = f'Pick an option (1-{len(options)}) [{options.index(default)+1}]: '
option = input(hint)
if option == '' and default is not None:
return default
try:
option = int(option)
try:
if option < 1:
raise IndexError
option = options[option-1]
if confirm:
print(f'User selected: {option}')
return option
except IndexError:
print(f'Invalid option. Must be between 1 and {len(options)}')
return ask_options(prompt, options, confirm, title, default)
except ValueError:
print('Invalid option. Must be integer.')
return ask_options(prompt, options, confirm, title, default)
def simple_response(prompt, default=None):
"""
Presents prompt and returns response.
Args:
prompt (str): Question to present.
default (obj, optional): Value to present as default. Defaults to None.
Returns:
obj: Response string or default object.
"""
if default is None:
response = input(prompt + ': ')
else:
response = input(prompt + f' [{default}]' + ': ')
if response != '':
return response
elif response == '' and default is not None:
return default
else:
print('Please enter a valid response')
return simple_response(prompt, default)
def yes_no(prompt, default=None):
"""
Presents yes or no question and returns response.
Args:
prompt (str): Question to be presented.
default (bool, optional): Default value to be presented. Defaults to None.
Raises:
KeyError: Default value was not boolean.
Returns:
obj: User input string or default value.
"""
if default is None:
response = input(prompt + ' (y/n): ')
elif default:
response = input(prompt + ' ([y]/n): ')
elif not default:
response = input(prompt + ' (y/[n]): ')
else:
raise KeyError('Default must be True or False')
if response.lower() == 'y':
return True
elif response.lower() == 'n':
return False
elif response == '' and default is not None:
return default
else:
print('Please enter \'y\' or \'n\' as a valid response.')
return yes_no(prompt, default)
def numeric_response(prompt, units=[], num_type=int, default=None):
"""
Presents question that requires a numeric response.
Args:
prompt (str): Question to present.
units (list, optional): Units to evaluate answer with. List of tuples.
(symbol, func) Defaults to [].
num_type (type, optional): Variable type to cast response to. Defaults to int.
default (obj, optional): Default value to present. Defaults to None.
Returns:
obj: User or default response casted to num_type parameter.
"""
if default is None:
response = input(prompt + ': ')
else:
response = input(prompt + f' [{default}]' + ': ')
try:
if response == '' and default is not None:
return standardize_response(default, units, num_type)
elif response == '':
print('Please enter a response.')
return numeric_response(prompt, units, num_type, default)
return standardize_response(response, units, num_type)
except ValueError:
print('Number must be an integer or a unit was incorrectly entered.')
return numeric_response(prompt, units, num_type, default)
def standardize_response(response, units, num_type):
"""
Standardize response containing units to base unit.
Args:
response (str): User inputted response.
units (list[Tuple(str, func)]): List of tuples containing unit symbol
and conversion function.
num_type (type): Type to cast response into.
Returns:
type: Response value casted to num_type parameter.
"""
if len(units) > 0:
response = str(response)
units.sort(key=len, reverse=True)
for unit, callback in units:
_slice = len(unit) * -1
if response[_slice:].upper() == unit.upper():
response = num_type(response[:_slice])
return callback(response)
return num_type(response)
else:
return num_type(response)
def ask_path(prompt, default=None):
"""
Presents question which requires a response that is a valid path.
Args:
prompt (str): Question to present.
default (str, optional): Default path to presented. Defaults to None.
Returns:
str: User response or default path.
"""
response = simple_response(prompt, default)
if os.path.exists(response):
return response
else:
print('That path does not exist. Try again.')
return ask_path(prompt, default)
def ask_range(prompt, min, max, tips=[], default=None):
"""
Prompts user with a range of values to choose from. Formats differently
depending on tips.
Args:
prompt (str): Question to present.
min (int): Minimum integer value.
max (int): Maximum integer value.
tips (list, optional): List of tips to display.. Defaults to [].
default (int, optional): Default option to display. Defaults to None.
Raises:
KeyError: Raises if default is not in range.
Returns:
int: Integer response
"""
print(prompt + ':')
keys = [i for i in range(min, max+1)]
if default is not None and default not in keys:
raise KeyError('Default value not in range.')
if len(keys) == len(tips):
for key, tips in zip(keys, tips):
print(key +'\t-\t'+ tips)
elif len(tips) == 2 and len(keys) > 2:
print(f'Range:\n{min} ({tips[0]}) - {max} ({tips[1]})')
else:
print(f'Range: {min} - {max}')
if default is None:
hint = f'Pick an option ({min}-{max}): '
else:
hint = f'Pick an option ({min}-{max}) [{default}]: '
option = input(hint)
try:
if option == '' and default is not None:
return default
elif option == '' or int(option) not in keys:
print(f'Invalid option. Must be between {min} and {max}')
return ask_range(prompt, min, max, tips, default)
else:
return int(option)
except ValueError:
print(f'Response must be and integer between {min} and {max}')
return ask_range(prompt, min, max, tips, default)
def confirm_conf(conf):
"""
Asks user if configuration dictionary is correct.
Args:
conf (dict): Configuration dictionary.
Returns:
Tuple(dict, bool): Tuple containing configuration dictionary and
whether confirmed.
"""
print()
print('Your configuration:')
for key, value in conf.items():
print(f'{key.title()}: {value}')
if not yes_no('Is this correct?'):
key = ask_options('Which would you like to change?', list(conf.keys()),
hints=list(conf.values()))
conf[key] = None
return conf, False
return conf, True
def save_config(conf, default):
"""
Saves configuration dictionary to file.
Args:
conf (dict): Configuration dictionary
default (str): Default save location.
"""
print()
if yes_no('Would you like to save your configuration?'):
name = simple_response(
'What would you like to name your configuration?')
path = ask_path(
'Please enter the path you would like your configuration saved to',
default=default)
file_path = os.path.join(path, name)
if file_path.find('.json') == -1:
file_path += '.json'
with open(file_path, 'w+') as f:
json.dump(conf, f, indent=4)
def get_config_file(conf):
"""
Get configuration dictionary from file specified.
Args:
conf (str): Configuration dictionary.
Returns:
dict: Configuration dictionary.
"""
with open(conf['config'], 'r') as f:
saved_conf = json.load(f)
for key, value in conf.items():
if value is not None:
saved_conf[key] = value
return saved_conf
def main(conf=None):
"""
Main function
Args:
conf (dict, optional): Configuration dictionary. If left to default
command line arguments will be parsed. Defaults to None.
"""
if conf is None:
conf = vars(parse_args())
home_dir = os.path.expanduser('~')
home_conf_path = os.path.join(home_dir, '.conf')
pysync_path = os.path.join(home_conf_path, 'pysync')
confs_path = os.path.join(pysync_path, 'configs')
os.makedirs(confs_path, exist_ok=True)
if conf.get('config', None) is None:
print_intro()
conf = check_for_config(conf, confs_path)
print()
else:
if not os.path.exists(conf['config']):
test_path = os.path.join(confs_path, conf['config'])
if not os.path.exists(test_path):
test_path += '.json'
if not os.path.exists(test_path):
print('The configuration file specified does not exist!')
return
conf['config'] = test_path
conf = get_config_file(conf)
while True:
_conf = configure(conf.copy())
if _conf != conf:
conf, done = confirm_conf(_conf)
if done:
save_config(conf, confs_path)
break
else:
conf = _conf
break
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
if conf['host'] == 'Server':
server_start(conf)
else:
client_start(conf)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print('\n')
print('Keyboard Interrupt')
print('Exiting...')
|
16,014 | 855bfc8dba2e4c13698c599509868b852697be14 | #!/usr/bin/python
# hashbang, для исполнения скрипта в *nix - системах, указывает интерпретатор
# -*- coding: utf-8 -*-
# Кодировка файла, необходимо для правильного отображения не англ. строк в интерпретаторе
""" Программа реализует примитивный метод шифрования - шифр Цезаря.
Сам алгоритм прост - циклициски сдвигаем буквы алфавита в строке на k позиций
Между тем, это - многострочный коментарий, он же docstring"""
import sys # используем чтобы подключить сторонний модуль. В момент подключения он интерпретируется.
# В таком виде, содержимое модуля sys доступно через точку - sys.exit() - функция exit() из модуля sys.
# Можно также использовать такой видм импорта:
from math import *
# Теперь все содержимое модуля datetime доступно напрямую. Вместо * можно указать конкетные функции\классы\т.д.
def encrypt(k): # функция, которая принимает один аргумент
plaintext = input('Введите сообщение: ') # Самый простой пользовательский ввод через клавиатуру. Аргумент input - приглашение, которое увидит пользователь
cipher = '' # объявили переменную-строку, учтите - строки неизменяемы.
for each in plaintext: # пример цикла for по _строке_ plaintext
c = (ord(each)+k) % 126 # ord - возвращает ASKII код символа
if (c < 32):
c += 31
cipher += chr(c) # Не смотря на то, как это выглядит, мы не расширяем строку, а каждый раз создаем новую
print('Шифротекст: ' + cipher)
def decrypt(k):
cipher = input('Введите шифротекст: ')
plaintext = ''
for each in cipher:
p = (ord(each)-k) % 126
if (p < 32):
p+=95
plaintext += chr(p)
print('Ваше сообщение: ' + plaintext)
def math_example(first_positional_arg, second_positional_arg = 100, *unamed_args_list, **named_args_dict):
""" Функция n переменных.
Первый аргумент обычный,
второй - со значением по умолчанию,
третий - там будет список всех безымянных лишних аргументов,
четвертый - словарь всех лишних именованных аргументов. См приминение
Подробности - http://docs.python.org/3.3/library/math.html"""
assert type(first_positional_arg) is int
a = first_positional_arg
result = factorial(a)
print(result)
print("Exponent: " + str(exp(second_positional_arg)))
print(log(a) + log2(a) )
print(sqrt(result))
for x in unamed_args_list:
print(str(x) + ' ', end='') # именованный аргумент end - строка, которую припишут к концу, по умолчанию - символ перевода
for key, value in named_args_dict.items():
print("Key: {}, value: {}".format(key, value))
return a, result
def main(argv):
if (len(sys.argv) != 3):
sys.exit('Порядок запуска: ceaser.py <k> <mode>')
if sys.argv[2] == 'e':
encrypt(int(sys.argv[1]))
elif sys.argv[2] == 'd':
decrypt(int(sys.argv[1]))
elif sys.argv[2] == 'b':
math_example(3, 1, 3, 4 ,5, 6, gg=4, aa= 1, bb= 6)
else:
sys.exit('Несуществующий режим')
if __name__ == "__main__":
main(sys.argv[1:])
|
16,015 | 42674473ebd49442278530e5285b717237f2bb0f | #__author__:"jcm"
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
opts_dir=os.path.join(BASE_DIR,r'data\accounts')
def collect_cfg():
cfg_dir_list = [os.path.join(opts_dir,i) for i in [file for a,b,file in os.walk(opts_dir)][0]]
return cfg_dir_list
|
16,016 | bac892662b3b4c54f267f17db9254205933d46d6 | # -*- coding: utf-8 -*-
import numpy
import matplotlib.image as mpimg
from os import listdir
import operator
import copy
import time
def img2vector(filename):
img=mpimg.imread(filename)
vec=img.copy()
return vec
def GetTrainData():
filelist=listdir("E:\\深度学习\\训练集数据\\手写字符\\numbers\\train\\")
TrainNum=len(filelist)
TrainData=numpy.zeros((TrainNum,16,16))
Table=numpy.zeros(TrainNum)-1
for i in range(TrainNum):
if(filelist[i].find("png")!=-1):
filename="E:\\深度学习\\训练集数据\\手写字符\\numbers\\train\\"+filelist[i]
img=mpimg.imread(filename)
vec=img.copy()
TrainData[i,:,:]=vec
Table[i]=int(filelist[i][4:6])-1
return TrainData,Table
def classfiy(inData,TrainData,Lable):
i=0
len=Lable.shape[0]
dis=numpy.zeros(len)
TrainData1=copy.deepcopy(TrainData)
while i<len and Lable[i]!=-1:
TrainData1[i,:,:]=TrainData1[i,:,:]-inData
TrainData1[i,:,:]=TrainData1[i,:,:]**2
dis[i]=TrainData1[i,:,:].sum()
i=i+1
SortedDistIndex=dis.argsort()
classCount={}
for i in range(50):
voteLable=Lable[SortedDistIndex[i]]
classCount[voteLable]=classCount.get(voteLable,0)+1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
(TrainData1,Table1)=GetTrainData()
filelist=listdir("E:\\深度学习\\训练集数据\\手写字符\\numbers\\test\\")
TrainNum=len(filelist)
all=right=0
ticks1=time.time()
for i in range(TrainNum):
if(filelist[i].find("png")!=-1):
filename="E:\\深度学习\\训练集数据\\手写字符\\numbers\\test\\"+filelist[i]
indata1=img2vector(filename)
c=classfiy(indata1,TrainData1,Table1)
d=int(filelist[i][4:6])-1
#print(filelist[i],"识别为",c,"真实为",d,c==d,'\n')
all+=1
if c==d:
right+=1
else :
print(filelist[i])
print("总共",all,"张,识别正确",right,"张,正确率为",right/all)
ticks2=time.time()
print(ticks2-ticks1)
#复制一次57.05274844169617s正确率96%
#不复制50.629085063934326s但结果错误 |
16,017 | bac312bfe93abdbec4c061375200c2d60bea8ae5 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import pandas as pd
import tensorflow as tf
import random as rn
import numpy as np
import os
from sklearn.metrics import precision_recall_curve, roc_auc_score
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
import logging
def seed_everything(seed):
np.random.seed(seed)
rn.seed(seed)
tf.set_random_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def auc_roc(y_true, y_pred):
value, update_op = tf.contrib.metrics.streaming_auc(y_pred, y_true)
metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
def recall_at_precision10(y_true, y_pred):
precision, recall, _ = precision_recall_curve(y_true, y_pred)
try:
idx = precision.tolist().index(0.1 + np.min(abs(precision - 0.1)))
except:
idx = precision.tolist().index(0.1 - np.min(abs(precision - 0.1)))
return recall[idx]
|
16,018 | f636797571c850ed4eeed7a3372efca90bb450ab | '''
Copied from Julian Bautista to be used as a general script to execute eBOSS reconstruction
'''
#from ebosscat import Catalog
from recon import Recon
import argparse
from astropy.io import fits
import numpy as np
from cattools import *
dir = '/Users/ashleyross/fitsfiles/' #directory on Ashley's computer where catalogs are
'''
argument parser allows script to be run like
> python do_recon.py -d 'datafile' -r 'randomfile' ...
'''
parser = argparse.ArgumentParser()
parser.add_argument('-reg', '--region', help='SGC or NGC',default='SGC')
parser.add_argument('-v', '--version', help='version',default='test')
parser.add_argument('-o', '--output', help='Output catalogs root name',default='rec')
parser.add_argument('-t', '--type', help='Target class',default='ELG')
parser.add_argument('--nthreads', \
help='Number of threads', type=int, default=1)
parser.add_argument('--niter', \
help='Number of iterations', type=int, default=3)
parser.add_argument('--nbins', \
help='Number of bins for FFTs', type=int, default=512)
parser.add_argument('--padding', default=200., \
help='Size in Mpc/h of the zero padding region', type=float)
parser.add_argument('--zmin', help='Redshift lower bound', type=float,default=.6)
parser.add_argument('--zmax', help='Redshift upper bound', type=float,default=1.1)
parser.add_argument('--smooth', help='Smoothing scale in Mpc/h', \
type=float, default=15.)
#parser.add_argument('--bias', \
# help='Estimate of the bias of the sample', type=float, required=True)
#parser.add_argument('--f', \
# help='Estimate of the growth rate', type=float, required=True, default=0.817)
args = parser.parse_args()
print args
'''
First thing that is needed is data and randoms with ra,dec,z,weight columns
'''
if args.type == 'ELG':
from mksimpELG import *
mkgalELGsimp(args.region,zmin=args.zmin,zmax=args.zmax,vo=args.version)
mkranELGsimp(args.region,vo=args.version)
cat = fits.open(dir+args.type+args.region+args.version+'.dat.fits')[1].data
ran = fits.open(dir+args.type+args.region+args.version+'.ran.fits')[1].data
cat.weight = cat.WEIGHT_SYSTOT
ran.weight = ran.WEIGHT_SYSTOT
bias = 1.4 #get good value for this
f = .82 #eventually calculate from Cosmo
nbins=args.nbins
nthreads=args.nthreads
padding =args.padding
zmin=args.zmin
zmax=args.zmax
smooth=args.smooth
#bias = args.bias
#f = args.f
opt_box = 1 #optimize box dimensions
#-- selecting galaxies
#w = (cat.IMATCH==1)|(cat.IMATCH==2)|(cat.IMATCH==101)|(cat.IMATCH==102)
#w = w & ((cat.Z>=zmin)&(cat.Z<=zmax))
#cat.cut(w)
#wr = ((ran.Z>=zmin)&(ran.Z<=zmax))
#ran.cut(wr)
#cat = cutrange(cat,'Z',zmin,zmax)
#ran = cutrange(ran,'Z',zmin,zmax)
rec = Recon(cat, ran, nbins=nbins, smooth=smooth, f=f, bias=bias, \
padding=padding, opt_box=opt_box, nthreads=nthreads)
for i in range(args.niter):
rec.iterate(i)
rec.apply_shifts()
rec.summary()
cat.RA, cat.DEC, cat.Z = rec.get_new_radecz(rec.cat)
ran.RA, ran.DEC, ran.Z = rec.get_new_radecz(rec.ran)
cols = []
RAc = fits.Column(name='RA',format='D', array=cat.RA)
cols.append(RAc)
DECc = fits.Column(name='DEC',format='D', array=cat.DEC)
cols.append(DECc)
Zc = fits.Column(name='Z',format='D', array=cat.Z)
cols.append(Zc)
fkpc = fits.Column(name='WEIGHT_FKP',format='D', array=cat.WEIGHT_FKP)
cols.append(fkpc)
sysc = fits.Column(name='WEIGHT_SYSTOT',format='D', array=cat.WEIGHT_SYSTOT)
cols.append(sysc)
hdulist = fits.BinTableHDU.from_columns(cols)
header = hdulist.header
hdulist.writeto(dir+args.type+args.region+args.version+args.output+'.dat.fits', overwrite=True)
cols = []
RAc = fits.Column(name='RA',format='D', array=ran.RA)
cols.append(RAc)
DECc = fits.Column(name='DEC',format='D', array=ran.DEC)
cols.append(DECc)
Zc = fits.Column(name='Z',format='D', array=ran.Z)
cols.append(Zc)
fkpc = fits.Column(name='WEIGHT_FKP',format='D', array=ran.WEIGHT_FKP)
cols.append(fkpc)
sysc = fits.Column(name='WEIGHT_SYSTOT',format='D', array=ran.WEIGHT_SYSTOT)
cols.append(sysc)
hdulist = fits.BinTableHDU.from_columns(cols)
header = hdulist.header
hdulist.writeto(dir+args.type+args.region+args.version+args.output+'.ran.fits', overwrite=True)
#cat.export(args.output+'.dat.fits')
#ran.export(args.output+'.ran.fits')
|
16,019 | 670f86bec449206dd9aa1ca65bda6231ae4ee473 | # Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/ebf75b738a784d6dad28494e18113b31
import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
X = N
for i in range(2, N + 1):
if X == 1:
break
while X % i == 0:
X //= i
print(i)
|
16,020 | b572fa9f3df07bf5de0644ce8f1f8cc79cc339a0 |
def backtrack(word,i,j,row,column):
if board[i][j]!=word[backtrack.l]:
return
visited.add((i,j))
#print(visited)
backtrack.l = backtrack.l+1
if backtrack.l == len(word):
backtrack.found = True
return
for di,dj in [(1,0),(0,1),(-1,0),(0,-1)]:
i1,j1 = i+di,j+dj
if 0<=i1<row and 0<=j1<column and (i1,j1) not in visited:
backtrack(word,i1,j1,row,column)
if backtrack.found==True:
return
visited.remove((i,j))
backtrack.l = backtrack.l-1
board =[['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']]
word = "C"
board = [["c","c","f"],
["a","a","i"],
["c","d","e"]]
word = "fie"
board = [["C","A","A"],
["A","A","A"],
["B","C","D"]]
word = "AAB"
row ,column = len(board),len(board[0])
backtrack.found = False
visited = set()
backtrack.l = 0
for i in range(row):
for j in range(column):
backtrack(word,i,j,row,column)
if backtrack.found == True:
break
if backtrack.found==True:
break
if backtrack.found==True:
print("True")
else:
print("False")
|
16,021 | f79a8de5d4b5baa4ffffb2523e7bec85495cea3c | import math
a=int (input("Enter the number"))
print(math.factorial(a))
|
16,022 | 3bf85aaf38f6919050d3601608cacab6fcead857 | import pickle
import torch
import torch.nn as nn
import numpy as np
from bpemb import BPEmb
from common.config import PATH
VOCAB_PATH = PATH['DATASETS']['COCO']['VOCAB']
EMBEDS_PATH = PATH['MODELS']['WORD_EMBEDS']
def save_vocab(vocab, file_path):
with open(file_path, 'wb') as file:
pickle.dump(vocab, file, pickle.HIGHEST_PROTOCOL)
def save_embeds(embeds, file_path):
torch.save(embeds.state_dict(), file_path)
if __name__ == '__main__':
dim = 300
vs = 5000
bpemb_en = BPEmb(lang="en", dim=dim, vs=vs)
vocab = dict((i, i + 1) for i in range(vs))
embeds = bpemb_en.vectors
embeds = np.concatenate((np.zeros((1, dim)), embeds))
embeds = torch.FloatTensor(embeds)
embeds = nn.Embedding.from_pretrained(embeds)
save_vocab(vocab, VOCAB_PATH)
save_embeds(embeds, EMBEDS_PATH) |
16,023 | bc5f5b15f68aa2e9d5edb5083b9b5c6e9dce8318 | from .base_parser import BaseParser
from .const import DIRECTIVE, NUMBER, SYMBOL, NAME, KEYWORD, BOOLEAN, EOF, ANY
from .nodes import (
EvalNode, SuiteNode, SkipNode, IfNode, WhileNode, AssignNode, VariableNode, NotNode,
ConstantNode, MulNode, SubNode, AddNode, CmpNode, EqNode, AndNode, OrNode,
TraceNode, ExitNode, PrintNode, HelpNode, ResetNode, NumericNode,
FromNumericNode, RunNumericNode
)
class Parser(BaseParser):
def program(self):
suite = self.suite()
self.eat(EOF)
return suite
def suite(self):
statements = []
while self._cur.type != EOF:
if self.try_eat(SYMBOL, "("):
statements.append(self.suite())
self.eat(SYMBOL, ")")
else:
statements.append(self.statement())
if self._cur.type == EOF:
break
if not self.try_eat(SYMBOL, ";"):
break
return SuiteNode(statements)
def statement(self):
if (
self._cur.type not in (KEYWORD, NAME)
or (
self._cur.type == NAME
and not (self._next.type == SYMBOL and self._next.meta == ":=")
)
):
return self.expr_a()
token = self.eat_list({
KEYWORD: ("skip", "if", "while"),
NAME: ANY,
})
if token.type == KEYWORD:
if token.meta == "skip":
return SkipNode()
elif token.meta == "if":
if_condition = self.expr_a()
self.eat(KEYWORD, "then")
if_body = self.suite()
if self.try_eat(KEYWORD, "else"):
if_else = self.suite()
else:
if_else = None
return IfNode(if_condition, if_body, if_else)
elif token.meta == "while":
while_condition = self.expr_a()
self.eat(KEYWORD, "do")
while_body = self.suite()
return WhileNode(while_condition, while_body)
elif token.type == NAME:
name = token.meta
self.eat(SYMBOL, ":=")
value = self.expr_a()
return AssignNode(name, value)
def factor(self):
negate = False
if self.try_eat(SYMBOL, "!"):
negate = True
elif self.try_eat(SYMBOL, "¬"):
negate = True
token = self.eat_list({
NAME: ANY,
NUMBER: ANY,
BOOLEAN: ANY,
SYMBOL: ("(", ),
DIRECTIVE: ANY,
})
if token.type == SYMBOL:
token = self.expr_a()
self.eat(SYMBOL, ")")
elif token.type == NAME:
token = VariableNode(token.meta)
elif token.type == DIRECTIVE:
if token.meta == "trace":
return TraceNode(token.location)
elif token.meta == "exit":
return ExitNode()
elif token.meta == "help":
return HelpNode()
elif token.meta == "reset":
return ResetNode()
elif token.meta == "print":
return PrintNode(self.eat(NAME).meta)
elif token.meta == "numeric":
return NumericNode(self.suite())
elif token.meta == "from_numeric":
return FromNumericNode(self.eat(NAME).meta, self.statement())
elif token.meta == "run_numeric":
return RunNumericNode(self.eat(NAME).meta, self.statement())
elif token.meta == "eval":
return EvalNode(self.eat(NAME).meta)
else:
self._error(f"Unknown directive '{token.meta}'", token)
else:
token = ConstantNode(token.meta)
if negate:
token = NotNode(token)
return token
def expr_f(self):
node = self.factor()
while self._cur.type == SYMBOL and self._cur.meta in ("*", "/"):
if self.try_eat(SYMBOL, "*"):
node = MulNode(node, self.factor())
if self.try_eat(SYMBOL, "/"):
node = SubNode(node, self.factor())
return node
def expr_e(self):
node = self.expr_f()
while self._cur.type == SYMBOL and self._cur.meta in ("+", "-"):
if self.try_eat(SYMBOL, "+"):
node = AddNode(node, self.expr_f())
if self.try_eat(SYMBOL, "-"):
node = SubNode(node, self.expr_f())
return node
def expr_d(self):
node = self.expr_e()
while (
self._cur.type == SYMBOL
and self._cur.meta in ("<=", "<", ">", ">=")
):
sym = self.eat(SYMBOL)
node = CmpNode(node, sym.meta, self.expr_e())
return node
def expr_c(self):
node = self.expr_d()
while self._cur.type == SYMBOL and self._cur.meta in ("=", ):
if self.try_eat(SYMBOL, "="):
node = EqNode(node, self.expr_d())
return node
def expr_b(self):
node = self.expr_c()
while self._cur.type == SYMBOL and self._cur.meta in ("&", ):
if self.try_eat(SYMBOL, "&"):
node = AndNode(node, self.expr_c())
return node
def expr_a(self):
node = self.expr_b()
while self._cur.type == SYMBOL and self._cur.meta in ("|", ):
if self.try_eat(SYMBOL, "|"):
node = OrNode(node, self.expr_b())
return node
|
16,024 | 4cc24de7cba58ab1794c02942198eff84db93f61 | #!/usr/bin/env python3
def main():
#"The first line of the input gives the number of test cases, T."
t = int(input())
for i in range(t):
"Each line describes a test case with a single integer N, the last number counted by Tatiana."
ans = solve(int(input()))
print("Case #%d: %s" % (i+1, ans))
def solve(n):
digits = list(map(int, str(n)))
return int(''.join(map(str, recurse(digits))))
def recurse(digits):
first, *rest = digits
if len(rest) == 0:
return [first]
rest = recurse(rest)
if first <= rest[0]:
return [first] + rest
else: #first > 0
return [first-1] + [9]*len(rest)
main()
|
16,025 | 062e61edb7552d6e916034fcbad18b6f740020e1 | import numpy as np
import cv2
import argparse
from features import HoGFeatures, HoGCirclesFeatures
from cpp_ottree.tree import Ensemble
from normalization import norm_grad, norm_max
from utils import DataManager
def iou(src, lab):
src = (src == 255)
lab = (lab == 255)
intersection = np.count_nonzero(np.logical_and(src, lab))
union = np.count_nonzero(np.logical_or(src, lab))
return intersection / union
def main(args):
data_manager = DataManager()
if args.iou:
if args.img is None:
print('please specify img and label param')
return 0
img = cv2.imread(data_manager.get_path_to_img() + args.img)
one_channel = img[:, :, 0]
thresh, dst = cv2.threshold(one_channel, args.thresh, 255, cv2.THRESH_BINARY)
label = cv2.imread(data_manager.get_path_to_labels() + args.lbl)
lab_one_channel = label[:, :, 0]
dst_ = np.zeros(lab_one_channel.shape)
dst_[:dst.shape[0], :dst.shape[1]] = dst
print("IoU = ", iou(dst_, lab_one_channel))
cv2.imwrite(data_manager.get_path_to_results() + args.result, dst)
return 0
ensemble = Ensemble(data_manager.get_path_to_models() + args.model, 6, args.verbose)
bbox_width = 64
bbox_height = 64
bbox_half_width = bbox_width // 2
bbox_half_height = bbox_height // 2
if args.descr_type == 'hog':
descriptor = HoGFeatures(bbox_height, bbox_width)
else:
descriptor = HoGCirclesFeatures(bbox_height, bbox_width)
img, label = data_manager.get_test_image()
if args.img:
img = cv2.imread(data_manager.get_path_to_img() + args.img)
probas = np.zeros(shape=(img.shape[0] // bbox_half_height,
img.shape[1] // bbox_half_width,
bbox_height, bbox_width))
descriptor.set_image(img)
for i in np.arange(0, img.shape[0] - bbox_height, bbox_half_height):
for j in np.arange(0, img.shape[1] - bbox_width, bbox_half_width):
print(i, j)
for x in range(bbox_width):
for y in range(bbox_height):
vec = descriptor.apply(int(i) + x, int(j) + y)
prob = 1. / (1. + np.exp(-ensemble.predict(vec)))
probas[i // bbox_half_height][j // bbox_half_width][x][y] = prob
if args.normalize == 'grad_desc':
dst = norm_grad(probas, bbox_height, bbox_width)
else:
dst = norm_max(probas, bbox_height, bbox_width, img_height=img.shape[0], img_width=img.shape[1])
cv2.imwrite(data_manager.get_path_to_results() + args.result, dst)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--descr_type', required=True, choices=['hog', 'circles'],
help='type of descriptor')
parser.add_argument('--model', required=True, help='model file name')
parser.add_argument('--result', required=True, help='name for result image')
parser.add_argument('--iou', action='store_true', required=False, help='choose when need to compute iou metric')
parser.add_argument('--thresh', type=int, required=False, help='required! when need to compute iou metric')
parser.add_argument('--img', required=False, help='image to test')
parser.add_argument('--lbl', required=False, help='label to test')
parser.add_argument('--normalize', required=False, default='grad_desc',
choices=['max', 'grad_desc'],
help='specifies the type of probas merging')
parser.add_argument('-v', '--verbose', required=False, action='store_true', help='verbose mode')
args = parser.parse_args()
main(args)
|
16,026 | 88396821e05c0f0d11094d26360e371a4a30f233 | # 24. Согласно древней индийской легенде создатель шахмат за своё изобретение попросил у раджи незначительную,
# на первый взгляд, награду: столько пшеничных зёрен, сколько окажется на шахматной доске, если на первую клетку
# положить одно зерно, на вторую — два зерна, на третью — четыре зерна и т. д. Оказалось, что такого количества
# зерна нет на всей планете (оно равно 2**64 − 1 зерен). Посчитайте, начиная с какой клетки по счету, общее количество
# зерен, которое должен был бы отдать раджа изобретателю было больше 1 000 000 зерен и сколько конкретно зерен он должен
# был бы отдать.
# def chess_reward(): # returns 2 ints
# pass
def chess_reward():
corn = 1
for i in range(64):
corn = corn*2
if corn >= 1000000:
corn = corn - 1
break
return i+1, corn
print(chess_reward())
|
16,027 | 56f926c2b9b940a781e7525dda8985d849a17fd8 | import numpy as np
import pandas as pd
# title1
# 背景:数据清洗是数据机器学习中重要的流程之一,数据建模的效果好坏,很大程度上依赖于数据清洗的质量,本案例是银行风控项目中,数据预处理部分的需求,具体要求如下:
# 要求:
# 1. 读入bank数据表,删除列,列名为每月归还额(10分)
# 2. 对指定列做频数统计,列名为贷款期限(10分)
# 3. 对指定列做分组统计,列名为还款状态。在此基础上,计算各分组贷款金额列的均值(8分)
# 4. 统计贷款金额的均值、最小值、最大值、中位数、方差(6分)
# 5. 对数据进行排序,按照发放贷款日期(降序)贷款金额(升序)排序(6分)
# 6. 按照贷款金额除以贷款期限计算生成新列,并命名为每月归还额(5分)
# 7. 提取行(账户号在3000到4500之间)列(发放贷款日期和贷款金额)数据框(3分)
# data = pd.read_excel('bank.xls')
# print(data)
#
# print(data['贷款期限'].value_counts())
# print(data.groupby(by='还款状态')['贷款金额'].mean())
# print(data['贷款金额'].mean())
# print(data['贷款金额'].min())
# print(data['贷款金额'].max())
# print(data['贷款金额'].median())
# print(data['贷款金额'].var())
# # data = pd.DataFrame()
# print(data.sort_values(by=['发放贷款日期', '贷款金额'], ascending=[False, True]))
# print(data[(data['账户号'] >= 3000) & (data['账户号'] <= 4500)])
# title2
# 已知有如下数据集X,求此数据集的协方差矩阵并打印输出。
# X = [[2, 0, -1.4],
# [2.2, 0.2, -1.5],
# [2.4, 0.1, -1],
# [1.9, 0, -1.2]]
# X = [[2, 0, -1.4],
# [2.2, 0.2, -1.5],
# [2.4, 0.1, -1],
# [1.9, 0, -1.2]]
# print(np.cov(X, rowvar=True))
# title3
# Fisher1936年收集了三种鸢尾花分别50个样本数据(Iris Data):Setosa、Virginica、Versicolour。解释变量是花瓣(petals)和萼片(sepals)长度和宽度的测量值,响应变量是花的种类。鸢尾花数据集经常用于分类模型测试,scikit-learn中也有。
# 题目要求:
# 1.把iris数据集降成方便可视化的二维数据(10)
# 2.打印主成分的方差解释率(5分)
# 3.降维后的数据在二维空间可视化(3分)
from sklearn.datasets import load_iris
data = load_iris()
dataX = data.data
data_columns = data.target_names
target = data.target
print(data)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
# print(pca.fit_transform(dataX))
# print(pca.explained_variance_ratio_)
dataX_Dec = pca.fit_transform(dataX)
import matplotlib.pylab as plt
# print(data_columns)
for color, classification, target_name in zip('rgb', [0, 1, 2], data.target_names):
plt.scatter(dataX_Dec[target==classification, 0], dataX_Dec[target==classification, 1], c=color, label=target_name)
plt.show() |
16,028 | 6462e116ceeddd1f9a676f40c08663cd92a03708 | import os
import numpy as N
import scipy.ndimage.filters
from WEM.utils.exceptions import FormatError
class FSS:
def __init__(self,data_fcst,data_obs,itime=False,ftime=False,
lv=False,thresholds=(0.5,1,2,4,8),ns=None,
ns_step=4):
""" Fcst and ob data needs to be on the same grid.
"""
self.data_fcst = data_fcst
self.data_obs = data_obs
self.thresholds = thresholds
self.enforce_2D()
self.do_grid_check()
# self.xdim = data_fcst
# Neighbourhoods
if ns is None:
ns = N.arange(1,max(self.xdim,self.ydim),ns_step)
self.ns = ns
# Computations
# self.compute_MSE()
self.compute_FSS()
return
def enforce_2D(self,):
"""Both data grids need to be 2D.
"""
for data in (self.data_obs,self.data_fcst):
shp = data.shape
if len(shp) == 2:
pass
elif len(shp) == 3:
if shp[0] == 0:
data = data[0,:,:]
elif len(shp) == 4:
if (shp[0] == 0) and (shp[1] == 0):
data = data[0,0,:,:]
else:
raise FormatError("Data needs to be 2D.")
return
def do_grid_check(self,):
""" Make sure grids are identical size.
"""
self.ydim, self.xdim = self.data_fcst.shape
if self.data_obs.shape != (self.ydim,self.xdim):
raise FormatError("Obs and forecast data not same size.")
return
def compute_FSS(self):
maxlen = max(self.ydim,self.xdim)
self.MSE = {}
self.FSS = {}
for th in self.thresholds:
self.MSE[th] = {}
self.FSS[th] = {}
# Convert to binary using thresholds
fc = N.copy(self.data_fcst)
ob = N.copy(self.data_obs)
fc[fc < th] = False
fc[fc >= th] = True
ob[ob < th] = False
ob[ob >= th] = True
for n in self.ns:
self.MSE[th][n] = {}
self.FSS[th][n] = {}
# print("self.FSS for threshold {0} mm and n={1}.".format(th,n))
# self.FSS computation w/ fractions
pad = int((n-1)/2)
On = scipy.ndimage.filters.uniform_filter(ob,size=n,
mode='constant',cval=0)
Mn = scipy.ndimage.filters.uniform_filter(fc,size=n,
mode='constant',cval=0)
# Delete meaningless smoothed data
cutrangex = list(range(0,pad)) + list(range(self.xdim-pad,self.xdim))
cutrangey = list(range(0,pad)) + list(range(self.ydim-pad,self.xdim))
On = N.delete(On,cutrangey,axis=0)
Mn = N.delete(Mn,cutrangey,axis=0)
On = N.delete(On,cutrangex,axis=1)
Mn = N.delete(Mn,cutrangex,axis=1)
cutlenx = On.shape[1]
cutleny = On.shape[0]
# self.MSE
sqdif = (On-Mn)**2
self.MSE[th][n]['score'] = (1/(cutlenx*cutleny))*N.sum(sqdif)
# Reference self.MSE
self.MSE[th][n]['ref'] = (1/(cutlenx*cutleny))*(N.sum(On**2)+N.sum(Mn**2))
# self.FSS
self.FSS[th][n] = 1 - (self.MSE[th][n]['score'] / self.MSE[th][n]['ref'])
return
|
16,029 | 1e394957f290ea4b908fede33f3afc72dbb5ce68 | from .folks import FolksListener, it_folks, it_attrs, it_changes
from kupfer.objects import Source, Action, TextLeaf
from kupfer.obj import contacts
from kupfer import utils
class FolksContact(contacts.ContactLeaf):
def __init__(self, obj):
self.folk = obj
slots = {contacts.LABEL_KEY: obj.get_display_name()}
for attr, key, value in it_attrs(obj):
slots[attr] = slots.get(attr, {})
slots[attr][key] = value
email_addresses = slots.get('email_addresses', None)
im_addresses = slots.get('im_addresses', None)
if email_addresses:
slots[contacts.EMAIL_KEY] = next(iter(email_addresses.values()))
elif im_addresses:
slots[contacts.EMAIL_KEY] = next(iter(im_addresses.values()))
phone_numbers = slots.get('phone_numbers', None)
if phone_numbers:
slots[contacts.PHONE_KEY] = next(iter(phone_numbers.values()))
contacts.ContactLeaf.__init__(self, slots, obj.get_display_name(), None)
def get_description(self):
email = self.object.get(contacts.EMAIL_KEY, '')
phone = self.object.get(contacts.PHONE_KEY, '')
return '{} {}'.format(email, phone)
class FolksSource(Source):
def __init__(self):
Source.__init__(self, _("Folks"))
self.resource = None
self.cached_items = None
def get_items(self):
for contact in self.folks.values():
yield contact
def on_change(self, agg, changes):
for old_folk, new_folk in it_changes(changes):
if new_folk:
self.folks[new_folk.get_id()] = FolksContact(new_folk)
elif old_folk:
del self.folks[old_folk.get_id()]
self.mark_for_update()
def on_ready(self, agg, *args):
for folk in it_folks(agg):
self.folks[folk.get_id()] = FolksContact(folk)
def initialize(self):
self.resource = FolksListener(self.on_ready, self.on_change)
self.resource.initialize()
def finalize(self):
self.folks = {}
self.resource = None
def provides(self):
yield FolksContact
class EmailSource(Source):
def __init__(self, leaf):
Source.__init__(self, _("Emails"))
self.resource = leaf.object['email_addresses']
def item_types(self):
yield TextLeaf
def get_items(self):
for i, email in self.resource.items():
yield TextLeaf(email)
class NewMailAction(Action):
def __init__(self):
Action.__init__(self, _('Compose Email Using...'))
def activate(self, leaf, email_leaf=None):
if email_leaf:
email = email_leaf.object
utils.show_url("mailto:%s" % email)
def item_types(self):
yield FolksContact
def valid_for_item(self, leaf):
print('email_addresses' in leaf.object, leaf.object)
return 'email_addresses' in leaf.object
def get_icon_name(self):
return "mail-message-new"
def requires_object(self):
return True
def object_source(self, for_item=None):
if for_item:
return EmailSource(for_item)
def object_types(self, for_item=None):
yield TextLeaf
def valid_object(self, iobj, for_item=None):
return type(iobj) is TextLeaf
def has_result(self):
return True
|
16,030 | 9440453f4de955bee68c4d08b367a8befcfc81c3 | #!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from bs4 import BeautifulSoup
import re
import urllib2
import xlwt
#得到页面全部内容
def askURL(url):
#发送请求
request = urllib2.Request(url)
try:
#取得响应
response = urllib2.urlopen(request)
#获取网页内容
html= response.read()
#print html
except urllib2.URLError, e:
if hasattr(e,"code"):
print e.code
if hasattr(e,"reason"):
print e.reason
return html
#获取相关内容
def getData(baseurl):
#找到影片详情链接
findLink=re.compile(r'<a href="(.*?)">')
#找到影片图片
findImgSrc=re.compile(r'<img.*src="(.*jpg)"',re.S)
#找到片名
findTitle=re.compile(r'<span class="title">(.*)</span>')
#找到评分
findRating=re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
#找到评价人数
findJudge=re.compile(r'<span>(\d*)人评价</span>')
#找到概况
findInq=re.compile(r'<span class="inq">(.*)</span>')
datalist=[]
for i in range(0,10):
url=baseurl+str(i*25)
html=askURL(url)
soup = BeautifulSoup(html,'html.parser')
#找到每一个影片项
for item in soup.find_all('div',class_='item'):
data=[]
#转换成字符串
item=str(item)
#print item
link=re.findall(findLink,item)[0]
#添加详情链接
data.append(link)
imgSrc=re.findall(findImgSrc,item)[0]
#添加图片链接
data.append(imgSrc)
titles=re.findall(findTitle,item)
#片名可能只有一个中文名,没有外国名
if(len(titles)==2):
ctitle=titles[0]
#添加中文片名
data.append(ctitle)
#去掉无关符号
otitle=titles[1].replace(" / ","")
#添加外国片名
data.append(otitle)
else:
#添加中文片名
data.append(titles[0])
#留空
data.append(' ')
rating=re.findall(findRating,item)[0]
#添加评分
data.append(rating)
judgeNum=re.findall(findJudge,item)[0]
#添加评论人数
data.append(judgeNum)
inq=re.findall(findInq,item)
#可能没有概况
if len(inq)!=0:
#去掉句号
inq=inq[0].replace("。","")
#添加概况
data.append(inq)
else:
data.append(' ')#留空
if(len(data)!=12):
data.insert(8,' ')#留空
datalist.append(data)
return datalist
#将相关数据写入excel中
def saveData(datalist,savepath):
book=xlwt.Workbook(encoding='utf-8',style_compression=0)
sheet=book.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True)
col=('电影详情链接','图片链接','影片中文名','影片外国名',
'评分','评价数','概况')
for i in range(0,7):
sheet.write(0,i,col[i])#列名
for i in range(0,250):
data=datalist[i]
for j in range(0,7):
sheet.write(i+1,j,data[j])#数据
book.save(savepath)#保存
def main():
baseurl='https://movie.douban.com/top250?start='
datalist=getData(baseurl)
savapath=u'豆瓣电影Top250.xlsx'
saveData(datalist,savapath)
main()
|
16,031 | 2e07f784a9a9430f51359bf380aaeed3cf8fb3a1 | import argparse
import math
import subprocess
def read_pred_file(file_name, args):
fin = open(file_name)
result_list = []
qid_list = []
docid_list = []
pair_list = []
for line in fin:
items = line.strip().split()
qid = int(items[0])
docid = int(items[2])
score = math.exp(float(items[4])) if args.model_use_exp else float(items[4])
pair_list.append((qid, docid, score))
pair_list = sorted(pair_list, key=lambda x: (x[0], x[1]))
for item in pair_list:
result_list.append(item[2])
qid_list.append(item[0])
docid_list.append(item[1])
return qid_list, docid_list, result_list
def evaluation(baseline_result_list, model_result_list, qid_list, docid_list, alpha, pred_file_name):
gold_fname = "data/qrels.txt"
fout = open(pred_file_name, "w")
for qid, docid, baseline_score, model_score in zip(qid_list, docid_list, baseline_result_list, model_result_list):
score = model_score + alpha * math.log(baseline_score)
fout.write("{} Q0 {} 0 {} Inter\n".format(qid, docid, score))
fout.flush()
fout.close()
trec_eval_path = 'trec_eval-9.0.5/trec_eval'
trec_out = subprocess.check_output([trec_eval_path, gold_fname, pred_file_name])
trec_out_lines = str(trec_out, 'utf-8').split('\n')
mean_average_precision = float(trec_out_lines[5].split('\t')[-1])
mean_reciprocal_rank = float(trec_out_lines[9].split('\t')[-1])
p_30 = float(trec_out_lines[25].split('\t')[-1])
return p_30, mean_average_precision
def parameter_selection(baseline_result_list, model_result_list, qid_list, docid_list, args):
best_p30_alpha = 0
best_map_alpha = 0
best_map = 0
best_p30 = 0
print("ALPHA\tP30\tMAP")
for i in range(2001):
alpha = i / 10000
p30, map = evaluation(baseline_result_list, model_result_list, qid_list, docid_list, alpha, "temp{}".format(args.train_dataset))
print("{}\t{}\t{}".format(alpha, p30, map))
if p30 > best_p30:
best_p30_alpha = alpha
best_p30 = p30
if map > best_map:
best_map_alpha = alpha
best_map = map
return best_p30_alpha, best_map_alpha
if __name__=="__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--baseline_data", type=str)
argparser.add_argument("--model_data", type=str)
argparser.add_argument("--baseline_eval", type=str)
argparser.add_argument("--model_eval", type=str)
argparser.add_argument("--model_use_exp", action="store_true", default=False)
argparser.add_argument("--train_dataset", type=str)
args = argparser.parse_args()
baseline_qid_list, baseline_docid_list, baseline_result_list = read_pred_file(args.baseline_data, args)
model_qid_list, model_docid_list, model_result_list = read_pred_file(args.model_data, args)
assert(model_qid_list == baseline_qid_list)
assert(model_docid_list == baseline_docid_list)
best_p30_alpha, best_map_alpha = parameter_selection(baseline_result_list, model_result_list, model_qid_list, model_docid_list, args)
print("Best P30 alpha:", best_p30_alpha, "Best map alpha:", best_map_alpha)
eval_baseline_qid_list, eval_baseline_docid_list, eval_baseline_result_list = read_pred_file(args.baseline_eval, args)
eval_model_qid_list, eval_model_docid_list, eval_model_result_list = read_pred_file(args.model_eval, args)
#assert (eval_model_qid_list == eval_baseline_qid_list)
#assert (eval_model_docid_list == eval_baseline_docid_list)
p_30_eval, map_eval = evaluation(eval_baseline_result_list, eval_model_result_list, eval_model_qid_list, eval_model_docid_list, best_map_alpha, "interpolation/{}".format(args.train_dataset))
print("result:", p_30_eval, map_eval)
|
16,032 | 68ec55811486be28d037b4cf4e69b792be745973 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 18 20:23:42 2021
@author: chanchanchan
"""
import streamlit as st
import pandas as pd
import plotly.graph_objects as go
def app():
st.header('Summary of Shear Wave Velocity')
st.write('The data used in this bender element anaylsis were obtained based on samples of stiff and overconsolidated London Clay.The measurements are taken in the vertical direction with horizontal polarisation.')
st.subheader('Bender Element Parameters:')
st.write('Soil mass density (kg/m3): 2000')
st.write('Distance between two BE components (mm):98.36')
st.write('Bender element length (mm):6')
st.write('Travel distance (mm):93.36')
st.subheader('Time Domain Interpretation:')
st.write('The results obtained via the arrival time identification method and cross correlation method are shown below:')
st.write('The time identification method is based on the start-to-start arrival time of the signal.')
fig = go.Figure(data=[go.Table(
header=dict(values=['Input signal frequency (kHz)', 'Interpretation method','Shear wave arrival time (ms)', 'Shear wave velocity (m/s)', 'Shear modulus (MPa)','L/ λ'],
line_color='darkslategray',
fill_color='light blue',
align='center'),
cells=dict(values=[[3, 3, 4, 4, 5, 5, 6, 6, 7, 7], # 1st column
['Arrival time', 'Cross correlation','Arrival time', 'Cross correlation','Arrival time', 'Cross correlation','Arrival time', 'Cross correlation','Arrival time', 'Cross correlation'],# 2nd column
[0.55, 0.533, 0.53, 0.525, 0.50, 0.518, 0.48, 0.518, 0.47, 0.516],
[169.75, 175.16, 176.15, 177.83, 186.72, 180.23, 193.50, 180.23, 198.64, 180.93],
[57.63, 61.36, 62.06, 63.25, 69.73, 64.97, 74.88, 64.97, 78.92, 65.47],
[1.65, 1.60, 2.12, 2.10, 2.50, 2.59, 2.88, 3.11, 3.29, 3.61]],
line_color='darkslategray',
fill_color='white',
align='center'))
])
fig.update_layout(width=700, height=600,)
st.write(fig)
st.subheader('Frequency Domain Interpretation:')
st.write('The results obtained via the transfer function are shown below:')
fig = go.Figure(data=[go.Table(
header=dict(values=['Input signal frequency (kHz)', 'Interpretation method','Shear wave arrival time (ms)', 'Shear wave velocity (m/s)', 'Shear modulus (MPa)','L/ λ'],
line_color='darkslategray',
fill_color='light blue',
align='center'),
cells=dict(values=[[3, 4, 5, 6, 7], # 1st column
['Transfer function', 'Transfer function','Transfer function','Transfer function','Transfer function'],# 2nd column
[0.396, 0.402, 0.402, 0.390, 0.406],
[235.76, 232.24, 232.24, 239.38, 229.95],
[111.17, 107.87, 107.87, 114.61, 105.75],
[1.58, 1.61, 1.61, 1.56, 1.62]],
line_color='darkslategray',
fill_color='white',
align='center'))
])
fig.update_layout(width=700, height=500,)
st.write(fig)
st.subheader('Shear Wave Velocity Summary:')
st.write('The shear wave velocity obtained via the TD and FD methods are shown below:')
fig = go.Figure()
fig.add_trace(go.Scatter(
x=[3, 4, 5 ,6, 7],
y=[169.75, 176.15, 186.72, 193.50, 198.64],
name="S-S arrival method"
))
fig.add_trace(go.Scatter(
x=[3, 4, 5 ,6, 7],
y=[175.16, 177.83, 180.23, 180.23, 180.93],
name="Cross correlation"
))
fig.add_trace(go.Scatter(
x=[3, 4, 5 ,6, 7],
y=[235.76, 232.24, 232.24, 239.38, 229.95],
name="Transfer function"
))
fig.update_layout(
xaxis_title="Input signal frequency (kHz)",
yaxis_title="Shear wave velocity (m/s)",
)
st.write(fig)
|
16,033 | 484ccf8dac1bc814cbf6d9cab62d4f0ada0094f8 | """
Capstone Team Project. Code to run on a ROBOT (NOT a laptop).
This module intentionally has NO main function.
Instead, the one and only main function for ROBOT code is in module
m0_run_this_on_ROBOT
When m0_run_this_on_ROBOT runs, it calls its main to construct a robot
(with associated objects) and sits in an infinite loop waiting to RECEIVE
messages from the LAPTOP code. When the m0_run_this_on_ROBOT code receives
a message from the LAPTOP that is destined for YOUR "delegate" code, it calls
the relevant method which YOU define in the MyRobotDelegate class below.
See the doc-string in m0_run_this_on_ROBOT for details.
Your professor will explain further when talking about MQTT and this code.
Authors: Your professors (for the framework)
and PUT_YOUR_NAME_HERE.
Fall term, 2019-2020.
"""
# TODO: 1. Put your name in the above.
import m1_robot_code as m1
import m3_robot_code as m3
import m4_robot_code as m4
import mqtt_remote_method_calls as mqtt
import rosebot
class MyRobotDelegate(object):
"""
Defines methods that are called by the MQTT listener when that listener
gets a message (name of the method, plus its arguments)
from a LAPTOP via MQTT.
"""
def __init__(self, robot):
self.robot = robot # type: rosebot.RoseBot
self.mqtt_sender = None # type: mqtt.MqttClient
self.is_time_to_quit = False # Set this to True to exit the robot loop
def set_mqtt_sender(self, mqtt_sender):
self.mqtt_sender = mqtt_sender
# -------------------------------------------------------------------------
# TODO: Add methods here as needed.
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# TODO: Add more functions here as needed.
# -----------------------------------------------------------------------------
def print_message_received(method_name, arguments):
print()
print("The robot's delegate has received a message")
print(" for the ", method_name, " method")
print(" with arguments", arguments)
def print_message_sent(method_name, arguments):
print()
print("The robot has SENT a message to the LAPTOP")
print(" for the ", method_name, " method")
print(" with arguments", arguments)
|
16,034 | c9e8a25d61502618eee6ad656e1c2ce892264093 | from igf_data.igfdb.platformadaptor import PlatformAdaptor
from igf_data.utils.dbutils import read_dbconf_json, read_json_data
def load_new_platform_data(data_file, dbconfig):
'''
A method for loading new data for platform table
'''
try:
formatted_data=read_json_data(data_file)
dbparam=read_dbconf_json(dbconfig)
pl=PlatformAdaptor(**dbparam)
pl.start_session()
pl.store_platform_data(data=formatted_data)
pl.close_session()
except:
raise
def load_new_flowcell_data(data_file, dbconfig):
'''
A method for loading new data to flowcell table
'''
try:
flowcell_rule_data=read_json_data(data_file)
dbparam=read_dbconf_json(dbconfig)
pl=PlatformAdaptor(**dbparam)
pl.start_session()
pl.store_flowcell_barcode_rule(data=flowcell_rule_data)
pl.close_session()
except:
raise |
16,035 | 9150e8f24842ffe38e4f663352c8694c1beff9a7 | import numpy as np
import math
all_crd = []
center_crd = []
rotated_crd = []
prefix = []
appendix = []
matrixA = np.array([[],[],[]])
matrixB = np.array([[],[],[]])
def vector_gen(atomX,atomY):
vector = [0]*3
vector[0] = float(all_crd[atomY*3-3]) - float(all_crd[atomX*3-3])
vector[1] = float(all_crd[atomY*3-2]) - float(all_crd[atomX*3-2])
vector[2] = float(all_crd[atomY*3-1]) - float(all_crd[atomX*3-1])
return vector
def import_crd(file_name,center_atm):
_inp_crd = []
inp_crd = []
with open (file_name, 'r') as fref:
for i, line in enumerate(fref.readlines()):
if i == 0:
prefix.extend(line)
elif i == 1:
prefix.extend(line)
tmp = line.split()
total_atom_num = int(tmp[0])
elif (i < 2 + total_atom_num/2):
_inp_crd.extend(line.split())
else:
appendix.extend(line)
del _inp_crd[total_atom_num*3:]
for i in range(len(_inp_crd)):
tmp2 = float(_inp_crd[i]) - float(_inp_crd[int(center_atm)*3-3+i%3])
inp_crd.append(tmp2)
return inp_crd, _inp_crd[int(center_atm)*3-3:int(center_atm)*3]
def determine_rotation(atomA,atomB,atomC):
vec1 = vector_gen(atomA,atomB)
vec2 = vector_gen(atomA,atomC)
_vert = np.cross(vec1,vec2)
vert = [0]*3
for i in range(len(vert)):
vert[i] = _vert[i]/math.sqrt(_vert[0]*_vert[0]+_vert[1]*_vert[1]+_vert[2]*_vert[2])
beta = math.asin(vert[2])
alpha = math.asin(vert[1]/math.cos(beta))
_axis1 = np.cross([0,0,1],vert)
axis1 = [0]*3
for i in range(len(axis1)):
axis1[i] = _axis1[i]/math.sqrt(_axis1[0]*_axis1[0]+_axis1[1]*_axis1[1]+_axis1[2]*_axis1[2])
sina = math.sin(-alpha)
cosa = math.cos(-alpha)
matrixA = np.array([[cosa,-sina,0],[sina,cosa,0],[0,0,1]])
inv_matrixA = np.linalg.inv(matrixA)
sinb = math.sin(beta)
cosb = math.cos(beta)
x,y,z = axis1[0],axis1[1],axis1[2]
matrixB = np.array([[(cosb+x*x*(1-cosb)),(x*y*(1-cosb)-z*sinb),(z*x*(1-cosb)+y*sinb)],[(x*y*(1-cosb)+z*sinb),(cosb+y*y*(1-cosb)),(y*z*(1-cosb)-x*sinb)],[(z*x*(1-cosb)-y*sinb),(y*z*(1-cosb)+x*sinb),(cosb+z*z*(1-cosb))]])
inv_matrixB = np.linalg.inv(matrixB)
_temp = np.dot(matrixB,vert)
temp = np.dot(matrixA,_temp)
print (temp)
return matrixA, matrixB
def rotate_crd():
crd_mod = [0]*3
center = np.array(center_crd)
print center
for i in range(len(all_crd)/3):
crd = np.array([float(all_crd[i*3]),float(all_crd[i*3+1]),float(all_crd[i*3+2])])
_temp = np.dot(matrixB,crd)
temp = np.dot(matrixA,_temp)
for i in range(len(center)):
crd_mod[i] = float(center[i]) + temp[i]
#crd_mod = center + temp
rotated_crd.extend(crd_mod)
crd = np.array([float(all_crd[53*3-3]),float(all_crd[53*3-2]),float(all_crd[53*3-1])])
print (crd)
_temp = np.dot(matrixB,crd)
temp = np.dot(matrixA,_temp)
print (temp)
print (float(center[0]) + temp[0])
print (rotated_crd[53*3-3])
return rotated_crd
def output_crd():
with open ("rmout_00210cycle_mod.crd","w") as fout:
for i in range(len(prefix)):
fout.write(prefix[i])
for i in range(len(all_crd)/6):
for j in range(6):
rotated_crd[i*6+j] = "{0:.7f}".format(rotated_crd[i*6+j])
rotated_crd[i*6+j] = rotated_crd[i*6+j].rjust(11)
fout.write(" "+str(rotated_crd[i*6+j]))
fout.write("\n")
for i in range(len(appendix)):
fout.write(appendix[i])
if __name__ == '__main__':
all_crd, center_crd = import_crd("rmout_00210cycle.crd",53)
print all_crd[53*3-3]
matrixA, matrixB = determine_rotation(53,23,24)
rotated_crd = rotate_crd()
output_crd()
|
16,036 | 4c8572da10545466ca3b1b3157cc41a6810b8706 | from unittest.mock import Mock
import pytest
from shopping.repeatingitems.shopping_repeating_items_worksheet import RepeatingItemsWorksheet, \
RepeatingItemsAlreadyPresent
ALL_VALUES = ["item-1", "item-2", "item-3"]
@pytest.fixture
def generate_repeating_items_worksheet():
worksheet_mock = Mock()
worksheet_mock.get_all_values.return_value = list(map(lambda x: [x], ALL_VALUES))
repeating_items_worksheet = RepeatingItemsWorksheet(worksheet_mock)
return worksheet_mock, repeating_items_worksheet
def test_get_repeating_items(generate_repeating_items_worksheet):
_, repeating_items_worksheet = generate_repeating_items_worksheet
items = repeating_items_worksheet.get_repeating_items()
assert items == ALL_VALUES
def test_add_repeating_item(generate_repeating_items_worksheet):
worksheet_mock, repeating_items_worksheet = generate_repeating_items_worksheet
repeating_items_worksheet.add_repeating_item("item-4")
worksheet_mock.insert_row.assert_called_once_with(["item-4"], 1)
def test_add_repeating_item_already_present_raises_exception(generate_repeating_items_worksheet):
worksheet_mock, repeating_items_worksheet = generate_repeating_items_worksheet
with pytest.raises(RepeatingItemsAlreadyPresent):
repeating_items_worksheet.add_repeating_item("item-3")
worksheet_mock.insert_row.assert_not_called()
|
16,037 | 0bf8ecc1859e4fb02594fa6267770a2cc266a3ef | # http://codecombat.com/play/level/serpent-savings
# todo this logic need to be improved
# You cannot collect coins.
# Summon peasants to collect coins for you.
# Collecting coins spawns a growing 'tail' behind the peasants.
# When a peasant touches a tail, they die.
# Collect 500 coins to pass the level.
# The following APIs are available on your team's peasants: "snakeBackward"
# The following APIs are available on neutral peasants: "snakeBackward", "snakeHead", "snakeForward"
def moveTo(position, fast=True):
if (self.isReady("jump") and fast):
self.jumpTo(position)
else:
self.move(position)
# pickup coin
def pickUpNearestItem(items):
nearestItem = self.findNearest(items)
if nearestItem:
moveTo(nearestItem.pos)
def commandPeasant(peasant):
item = peasant.findNearestItem()
goalf = peasant.pos
if item:
vectorToH = Vector.subtract(item.pos, goalf)
vectorToH = Vector.normalize(vectorToH)
vectorToH = Vector.multiply(vectorToH, 10)
goalf = Vector.add(goalf, vectorToH)
enemies = peasant.findEnemies()
for enemy in enemies:
if peasant.distanceTo(enemy) < 5:
vectorToH = Vector.subtract(friend.pos, enemy.pos)
vectorToH = Vector.normalize(vectorToH)
vectorToH = Vector.multiply(vectorToH, 5)
goalf = Vector.add(vectorToH, goalf)
self.command(peasant, 'move', goalf)
def CommandArcher(soldier):
target = self.findNearest(self.findEnemies())
if target:
self.command(soldier, "attack", target)
summonTypes = ['peasant']
def summonTroops():
type = summonTypes[len(self.built) % len(summonTypes)]
if self.gold > self.costOf(type):
self.summon(type)
while True:
friends = self.findFriends()
summonTroops()
tails = self.findEnemies()
coins = self.findItems()
# pickUpNearestItem(coins)
for friend in friends:
if friend.type == 'archer':
CommandArcher(friend)
elif friend.type == 'peasant':
commandPeasant(friend)
|
16,038 | 10098591081162b8083fda0f96632512ba91ff22 | # dependencies
from config import database, connect_string
# relational database class with our data retrieval functions
from BellyButtonData import BellyButtonData
# mongodb database class with the same function signitures ( same functions)
from BellyButtonMongo import BellyButtonMongo
from flask import Flask, jsonify, render_template
#################################################
# Database Setup
#################################################
if database == "mongo":
data = BellyButtonMongo()
else:
data = BellyButtonData()
#################################################
# Flask Setup
#################################################
application = Flask(__name__)
#################################################
# Flask Routes
#################################################
@application.route("/")
def welcome():
users = data.get_subject_ids()
return render_template("index.html", user_ids=users)
@application.route("/api/v1.0")
def show_apis():
"""List all available api routes."""
return (
f"<h4>Available Routes:</h4>"
f'<a href="/api/v1.0/ids">/api/v1.0/ids</a><br/>'
f'<a href="/api/v1.0/info/1286">/api/v1.0/info/subject_id</a><br/>'
f'<a href="/api/v1.0/subjects">/api/v1.0/subjects</a><br/>'
f'<a href="/api/v1.0/subjects/1286">/api/v1.0/subjects/subject_id</a><br/>'
f'<a href="/"><h4>Back</h4></a><br/>'
)
@application.route("/api/v1.0/ids")
def get_all_ids():
return jsonify(data.get_subject_ids())
@application.route("/api/v1.0/info")
def get_all_results():
return jsonify(data.get_data_for_all())
@application.route("/api/v1.0/info/<subject_id>")
def get_one_user_results(subject_id):
return jsonify(data.get_data_by_user(subject_id))
@application.route("/api/v1.0/subjects")
def get_all_subjects():
return jsonify(data.get_subjects())
@application.route("/api/v1.0/subjects/<subject_id>")
def get_one_subject(subject_id):
return jsonify(data.get_subjects(subject_id))
if __name__ == '__main__':
application.run(debug=True) |
16,039 | 4192269b478dafff0c986ea7ebbc504327c0e42f | import requests
import csv
import getpass
# Set the request parameters
# Change the URL according to what information is desired.
subdomain = input("Enter your Zendesk Subdomain (not full URL, but something such as your company name): ")
url = 'https://' + subdomain +'.zendesk.com/api/v2/help_center/en-us/articles.json?sort_by=title&sort_order=asc'
# Use Your Zendesk Support Sign-On Credentials
user = input("Enter your the Email Address tied to your Zendesk Account: ")
pwd = getpass.getpass("Enter your Zendesk Password: ")
# Path of the outputted csv file
csvfile = f'{subdomain}_articles.csv'
# Comment out or remove the unnecessary attributes:
attributes = {
'id': 'Article ID',
'title': 'Article Title',
'html_url': 'URL',
'vote_sum': 'Vote Sum',
'vote_count': 'Vote Count',
'author_id': 'Author ID',
'section_id': 'Section ID',
'draft': 'Draft (True if Draft, False if not)',
'updated_at': 'Updated At',
'label_names': 'Label Names'
}
list_of_lists = []
label_names_tuples_list = []
for key in attributes.keys():
list_of_lists.append([attributes[key]])
# This loop cycles through all pages of articles
while url:
response = requests.get(url, auth = (user, pwd))
data = response.json()
for article in data['articles']:
label_names_tuples_list.append(tuple(article['label_names']))
list_id = 0
for key in attributes.keys():
if key == 'label_names':
list_of_lists[list_id].append('')
else:
list_of_lists[list_id].append(str(article[key]))
list_id += 1
print(data['next_page'])
url = data['next_page']
print("Number of articles:")
print (len(list_of_lists[0]))
# Data Transposition
transposed_data = zip(*list_of_lists)
# Write to a csv file
with open(csvfile, 'w', newline='') as fp:
writer = csv.writer(fp, dialect = 'excel')
article_no = 0
for article_attr in transposed_data:
if article_no != 0:
article_attr += label_names_tuples_list[article_no - 1]
writer.writerows([article_attr])
article_no += 1
|
16,040 | d9cf562a37c2c5f12cf2f85f29e67df8a212e6e1 | import unittest
from chenyao.CLASS.members import MemberHelper
class TestMemberHelperLast(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('1.setup test class')
@classmethod
def tearDownClass(cls):
print('2.teardown test class')
def setUp(self):
print('3.set up test case')
def tearDown(self):
print('4.tear down test case')
def test_case01_Last_msg(self):
tel_last=5672
act_msg=MemberHelper.member_get(tel_last)
exp_msg=1
self.assertEqual(exp_msg, act_msg)
# def test_case02_Last_msg(self):
# tel_last=567a
# act_msg=MemberHelper.member_get(tel_last)
# exp_msg=1
# self.assertEqual(exp_msg, act_msg)
def test_case03_Last_msg(self):
tel_last=1111
act_msg=MemberHelper.member_get(tel_last)
exp_msg=False
self.assertEqual(exp_msg, act_msg)
def test_case04_Last_msg(self):
tel_last=672
act_msg=MemberHelper.member_get(tel_last)
exp_msg=False
self.assertEqual(exp_msg, act_msg)
def test_case05_Last_msg(self):
tel_last=-5672
act_msg=MemberHelper.member_get(tel_last)
exp_msg=False
self.assertEqual(exp_msg, act_msg)
if __name__ == '__main__':
unittest.main()
|
16,041 | efa21870f583e66115ceb635a48bc13efc727b5b | #!/usr/bin/python3
""" module containts unittests for class Review """
import unittest
import json
from models.base_model import BaseModel
from models.review import Review
class testReview(unittest.TestCase):
""" unittests for Review """
def setUp(self):
""" Sets up the class """
self.review = Review()
def tearDown(self):
""" Test for tear down """
del self.review
def test_attribute(self):
""" Test if attributes are being saved """
r1 = Review()
self.assertEqual(r1.place_id, "")
if __name__ == "__main__":
testReview()
|
16,042 | f5def2c87945663fa80b2f5f29795f2be8fb46de | # aList=[1,2,3,4,5,6,3,8,9]
# sign=False #初始值为没找到
# x=int(input("请输入要查找的整数:"))
# for i in range(len(aList)):
# if aList[i]==x:
# print("整数%d在列表中,在第%d个数"%(x,i+1))
# sign=True
# if sign==False:
# print("整数%d不在列表中"%x)
#
#
#
#
# def binary_chop2(alist, data):
# """
# 递归解决二分查找
# """
# n = len(alist)
# if n < 1:
# return False
# mid = n // 2
# if alist[mid] > data: #0(n)
# return binary_chop2(alist[0:mid], data) #O(K)
# elif alist[mid] < data:
# return binary_chop2(alist[mid+1:], data)
# else:
# return True
#HasH查找
class HashTable:
def __init__(self):
self.size = 10
self.slots = [None]*self.size #key
self.data = [None]*self.size #value
def hash(selfself,key,size):
return key % size
def rehash(self, old_hash, size):
return (old_hash + 1) % size
def put(self,key,data):
hash_value = self.hash(key,len(self.slots))
print(hash_value)
if self.slots[hash_value] is None:
self.slots[hash_value] = key
self.data[hash_value] = data #$存储完成A:不存在
else:
if self.slots[hash_value] == key:
self.data[hash_value] = data #$存储完成B: 无key值
else:
next_slot = self.rehash(hash_value,len(self.slots)) #$存储完成C:此key已存在值 位置+1
while self.slots[next_slot] is not None and self.slots[next_slot]!=key: # u 确定为新的标注点key已存在value不同
next_slot = self.rehash(next_slot,len(self.slots)) #继续找新位置
if self.slots[next_slot] is None:
self.slots[next_slot] = key
self.data[next_slot] = data
else:
self.data[next_slot] = data #此key无value
def get(self,key):
start_slot = self.hash(key,len(self.slots))
data = None
stop = False
found = False
pos = start_slot
while self.slots[pos] is not None and not found and not stop: #查出的值不为空
if self.slots[pos] == key: #是否存在值
found = True
data = self.data[pos]
else:
pos = self.rehash(pos,len(self.slots))
if pos == start_slot:
stop = True
return data
def __getitem__(self, item):
return self.get(item)
def __setitem__(self, key, value):
self.put(key, value)
h = HashTable()
h[54] = 'cat'
h[26] = 'dog'
h[93] = 'lion'
# h[17] = 'tiger'
# h[77] = 'bird'
# h[85] = 'bee'
# h[34] = 'fish'
print(h.slots)
print(h.data)
print(h.get(54))
|
16,043 | 9499ec765a7b24681b102f860cd83ddb1cabad78 | import asyncio
from moex.models import Price
from scripts.main import get_history_price
def pull_price(security, start, end):
data = asyncio.run(get_history_price(security.code, start, end))
batch = [Price(date=price['TRADEDATE'], price=price['CLOSE'], security=security) for price in data]
Price.objects.bulk_create(batch)
|
16,044 | 69e398c891b5ceacbc31b2cbe3d880032428acc9 | class SomeClass(object):
@property
def x(self):
return 5
def y(self):
return 6
var = SomeClass()
print(var.x)
print(var.y)
|
16,045 | 1137435538552accb0053f02bf3c141489561e58 | import pandas
if __name__ == '__main__':
df=pandas.read_excel("you_chongfu_data.xlsx")
print("去重之前总数:",df.count())
df=df.drop_duplicates()
print("去重之后的总数",df.count())
|
16,046 | 53d3d83d892e8330ecd5edbdca384fb0edb8b25e | from upthor.views import FileUploadView
from django.conf.urls import url
urlpatterns = [
# for Testing
url('^thor-upload/', FileUploadView.as_view(), name='thor-file-upload'),
]
|
16,047 | 962419ab6b62b0ba358aaff100d8e6cf36fc17c5 | ls = [10,20,20,40]
for element in ls:
#print(element)
pass
for char in "techcamp":
#print(char)
pass
student = {
"name": "Emma",
"class": 9,
"marks": 75
}
for each in student:
#print(each,student[each])
#print("Key : {} , Value : {}".format(each,student[each]))
pass
for k,v in student.items():#unpacking the dictionary
#print(k,v)
pass
for number in range(1,11):
#print(number)
pass
#find the sum of all even numbers from 10 to 20
evensum = 0
for number in range(10,21):
if number%2==0:
#evensum = evensum +number
evensum+=number
#number+=number
print(evensum)
#find the sum of all odd numbers from 10 to 20
oddsum = 0
for number in range(10,21):
if number%2==1:
#evensum = evensum +number
oddsum+=number
#number+=number
print(oddsum) |
16,048 | eaf15eafcd58d3b458ca1d3c402b421ff1e69085 | from .basefuncs import *
try:
from functional import compose
except ImportError:
raiseErrN("You need functional!\nInstall it from http://pypi.python.org/pypi/functional\nor run pip install functional.")
def ParserError(message, token):
# print(token)
raiseErr("ParserError: %s '%s'" % (message, token.value), token.args)
class FunctionalFunction(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __add__(self, other):
return FunctionalFunction(compose(other, self))
class ParseRunner:
def __init__(self, numArgs):
self.numArgs = numArgs
def __call__(self, func):
@FunctionalFunction
def wrapped(tks):
p = 0
while p < len(tks): # - self.numArgs:
try:
r = func(tks, p)
except IndexError:
r = None
if r is not None:
del tks[p:p + self.numArgs]
tks.insert(p, r)
else:
p += 1
return tks
return wrapped
@FunctionalFunction
def condParser(ts):
p = 1
stops = []
toks = []
while p <= len(ts):
tok = ts[-p]
if tok.tag == ENDCON:
stops.append(p)
toks.append(tok)
elif tok.tag == CONDSTATE:
try:
end = -stops.pop()
toks.pop()
except IndexError:
ParserError("Unmatched Conditional", ts[-p])
states = ts[1 - p:end]
if all(t.tag in STATEMENT for t in states[1:]) and states[0].tag in EXPRESSION:
op = ts[-p].value
tok = Token(value=op, tag='CONDSTATES', args=states)
del ts[-p:end + 1]
ts.insert(end + 1, tok)
p = -end
else:
ParserError("Non-statement in the conditional starting", tok)
p += 1
if stops != []:
ParserError("Unmatched Token", toks.pop())
return ts
@ParseRunner(3)
def biopParser(tkens, pos):
if (tkens[pos].tag in EXPRESSION and
tkens[pos + 1].tag == BIOP and
tkens[pos + 2].tag in EXPRESSION):
# parses expression
return Token(value=tkens[pos + 1].value, tag='BIOPEXP', args=[tkens[pos], tkens[pos + 2]])
else:
return None
@ParseRunner(3)
def parenParser(tkens, pos):
if (tkens[pos].tag == LPAREN and
tkens[pos + 1].tag in EXPRESSION and
tkens[pos + 2].tag == RPAREN):
# parses expression
return tkens[pos + 1]
else:
return None
@ParseRunner(3)
def asopParser(tkens, pos):
if (tkens[pos].tag == ID and
tkens[pos + 1].tag == ASOP and
tkens[pos + 2].tag in EXPRESSION):
# parses expression
return Token(value=tkens[pos].value, tag='ASOPS', args=tkens[pos + 2])
else:
return None
@ParseRunner(2)
def uniopParser(tkens, pos):
if (tkens[pos].tag == UNIOP and
tkens[pos + 1].tag in EXPRESSION):
return Token(value=tkens[pos].value, tag='UNIOPEXP', args=tkens[pos + 1])
else:
return None
@ParseRunner(2)
def iStateParser(tkens, pos):
if (tkens[pos].tag == IOSTATE and
tkens[pos].value[-1] == '>' and
tkens[pos + 1].tag == ID):
return Token(value=tkens[pos].value, tag='IOSTATES', args=tkens[pos + 1])
else:
return None
@ParseRunner(2)
def oStateParser(tkens, pos):
if (tkens[pos].tag == IOSTATE and
tkens[pos].value[-1] == '<' and
tkens[pos + 1].tag in EXPRESSION):
return Token(value=tkens[pos].value, tag='IOSTATES', args=tkens[pos + 1])
else:
return None
runParser = uniopParser + iStateParser + biopParser + parenParser
stateParse = oStateParser + asopParser + condParser
def Parse(tokenlist):
tokenlist.append(Token('', ''))
origr = ''
while origr != repr(tokenlist):
origr = repr(tokenlist)
tokenlist = runParser(tokenlist)
tokenlist = stateParse(tokenlist)
tokenlist.pop()
tst = [a for a in tokenlist if a.tag in (LPAREN, RPAREN)]
if tst != []:
ParserError("Unmatched Parenthesis", tst[0])
tst = [a for a in tokenlist if a.tag not in STATEMENT + ["CONDSTATE", "ENDCON"]]
if tst != []:
if tst[0].tag.endswith("EXP"):
raiseErrN(
"ParserError: Unused Expression with Operation '%s'" % (tst[0].value))
else:
ParserError("Unused Token", tst[0])
return tokenlist
|
16,049 | 858192871464e7a3d8750427088dcbd0f72959db | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1.1, s, t 2.1, s, t 3.1, s, t 4.1, s, t 5.1, s, t 6.1, s, q"
tags = "CallFunc, tiles"
import pyglet
pyglet.resource.path.append(pyglet.resource.get_script_home())
pyglet.resource.reindex()
import cocos
from cocos import tiles, layer
from cocos.actions import CallFunc, ScaleTo, Delay
from cocos.director import director
class TestScene(cocos.scene.Scene):
def __init__(self):
super(TestScene, self).__init__()
scroller = layer.ScrollingManager()
scrollable = tiles.load('road-map.xml')['map0']
scroller.add(scrollable)
self.add(scroller)
template_action = ( CallFunc(scroller.set_focus, 0, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 768) +Delay(1) +
CallFunc(scroller.set_focus, 1500, 768) +Delay(1) +
ScaleTo(0.75, 1) +
CallFunc(scrollable.set_debug, True) + Delay(1) +
CallFunc(director.window.set_size, 800, 600)
)
scroller.do(template_action)
def main():
director.init(width=600, height=300, autoscale=False, resizable=True)
main_scene = TestScene()
director.run(main_scene)
if __name__ == '__main__':
main()
|
16,050 | 67951f9e69587eb49ed4ce2e047f21185474c821 | # permitted by applicable law. You may use it, redistribute it and/or modify
# it, in whole or in part, provided that you do so at your own risk and do not
# hold the developers or copyright holders liable for any claim, damages, or
# other liabilities arising in connection with the software.
#
# Developed by Mario Van Raemdonck, 2013;
# (c) Ghent University, 2013
#!/usr/bin/env python
import numpy as np
import pylab as pl
import os,sys,shutil
import re
import matplotlib
import math
import datareader as dr
#matplotlib.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) #adjust fonts
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#matplotlib.rc('text', usetex=True)
def makemovie(name = None):
# makes a movie from all the .png files in the current directory
print 'Starting to create a movie, with all the .png files in directory: %s ' %str(os.getcwd())
if name != None:
dirname = name
else:
dirname = str(os.getcwd())
command = ('mencoder',
'mf://*.png',
'-mf',
'type=png:w=800:h=600:fps=5',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
dirname+'.avi')
os.spawnvp(os.P_WAIT, 'mencoder', command)
class File_Collector(object):
def __init__(self, rootdir , search , notsearch = '.png' , notdir = 'xyvwa' , filelist = None , sortfunction = None , rev = False):
if filelist != None:
self.plotfiles = filelist
else:
self.plotfiles = []
self.sortfunction = sortfunction
self.readfiles(rootdir , search , notsearch = notsearch , notdir = notdir)
self.sortplotfiles(rev)
print self.plotfiles
def addfiles(self , *args):
for i in args:
self.plotfiles.append(i)
def sortplotfiles(self, rev = False):
if self.sortfunction != None:
self.plotfiles = sorted(self.plotfiles ,key = self.sortfunction , reverse = rev )
else:
print 'No sort function given so the order of the files doesn\'t matter for the figure'
def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):
"""
If you want to plot data from a single file use readdata instead, this is a wrapper for readdata if you want to plot data from multiple files
"""
print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))
dirlist = os.listdir(dirname)
for filep in dirlist:
filep = os.path.join(dirname,filep)
if os.path.islink(filep):
pass
elif os.path.isdir(filep):
m = re.search(notdir , filep)
if m is None:
self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )
elif os.path.isfile(filep) and '.dat' in filep:
nm = re.search(notsearch, filep)
m = re.search(search , filep)
#print m , nm
if m is not None and nm is None:
self.plotfiles.append(filep)
else:
pass
class Plot_RG_Files(object):
def __init__(self):
"""
initialisation of the plotting class of the Richardson-Gaudin solver, this boils
down to the creation of a figure to which we add one axes
"""
self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')
self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]
self.separated = True #if we have a list and need to plot the plots separated
def add_axes(self,pos = [0.5 , 0.2 , 0.4 , 0.3], axisbg = None , projection = 'rectilinear'):
self.fig.add_axes(pos , axisbg = axisbg, projection = projection)
def readdata(self, reflist , comment = '#' , regexp = None , substr = None, filename = True):
"""
read data from files in reflist is really important this function has to be called every time because it gathers the plotting data.
If you put a regular expression the function reads the data in line by line and extracts information it finds in between the data: if the regular expression is definedRemark ,the line by line reading is also used when the data is not compatible with np.loadtxt
possible regexp are : r'^#\s+((-|\d)\d+\.*\d*)\s+kritisch' to find the critical points
r'seniority\s\[(.+?)\]' to find the seniority's in an allstates file
"""
self.kpunten = []
datalist = []
prefixlist = []
if os.path.isfile(str(reflist)):
reflist = [reflist] #if we work with only one file this wraps it automatically in right format
for ref in reflist:
print('start with the collection of data from file %s' %ref)
plotf = open(ref, 'r')
if not filename:
prefixlist.append( os.path.dirname(ref) + '/')
else:
prefixlist.append(re.sub('\.dat$' , '' , ref))
try:
if regexp != None:
raise ValueError
dataf = np.loadtxt(plotf,comments = comment)
print 'we readed data in with np.loadtxt'
except:
print('reading in data with numpy loadtxt failed or use reg exp to extract information')
dataf = np.array([])
kpuntenf = []
plotf.seek(0) #go back to beginning of file
for line in plotf:
if regexp is not None:
analyse = re.search(regexp,line)
if analyse:
kpuntenf.append((analyse.group(1), len(dataf)-1 ))
print 'we found the following matches: %s' % analyse.group(0)
if substr != None:
line = re.sub(substr, '' , line)
if line[0] != comment:
#print line
pline = np.array(map(float,line.split()))
if len(dataf) <= 1:
dataf = pline
else:
try:
dataf = np.vstack((dataf,pline))
except:
continue
self.kpunten.append(kpuntenf)
datalist.append(dataf)
plotf.close()
self.datarg = datalist
self.prefix = prefixlist
self.reader = dr.ReaderOutput(reflist[0]) #Some plotting functions need a bit more information this info is extracted from the header of the files
self.reader.depvar['depvar'] += ' (a.u.)'
def procesfiles(self, dirname , search , notsearch = r'\.sw*|\.png', notdir = 'awfwfr', sortfunction = None , rev = False , regexp = None , substr = None , filelist = None , filename = True):
filecol =File_Collector(dirname , search , notsearch = notsearch ,filelist = filelist , sortfunction = sortfunction , rev =rev )
self.readdata(filecol.plotfiles, regexp = regexp , substr = substr, filename = filename)
def generate_plot(self, xlimg = None , ylimg =None , exname = '' , prefix = True , save = True):
"""
some nice plots to visualize the data with matplotlib, plotg = true if you plot the energylevels of the sp levels of a geometry file
"""
print ('start with the generation of plots')
#plot of condensation energy
self.plotwrap(0,2, 'energy (a.u.)' , name = 'ge'+ exname, titel = 'the energy (a.u.)', xlim = xlimg , ylim = ylimg , prefix = prefix ,save = save )
self.plotwrap(0,1, 'condensation energy (a.u.)' , name = 'ce' + exname ,titel = 'the condensation energy (a.u.)',xlim = xlimg , ylim = ylimg , prefix = prefix,save = save )
def plotwrap(self, xindex, yindex, yas, name = None, titel = None ,color = 'r' , sort = '' , label = None , xlim = None , ylim = None , prefix = False,save = True):
for i in range(len(self.datarg)):
self.fig.axes[0].plot(self.datarg[i][:,xindex],self.datarg[i][:,yindex], color+sort , label = label)
if self.separated == True and save:
self.layout(self.reader.depvar['depvar'] , yas , tit = titel, xlim = xlim , ylim = ylim)
self.savefig(name, filenum = i , prefix = prefix)
if self.separated == False and save:
self.layout(self.reader.depvar['depvar'] , yas , tit = titel, xlim = xlim , ylim = ylim)
self.savefig(name + 'together' , prefix = prefix)
def plotrgvarscplane(self, interval = (-20 , 0), label = None):
for k in xrange(len(self.datarg) ):
for j in xrange(len(self.kpunten[filenum])):
self.layout('real part rgvars (a.u.)' , 'imaginary part rgvgrs (a.u.) ', tit = 'Richardson-Gaudin variables')
for i in xrange(self.rgindex,2*self.reader.npair+self.rgindex,2):
self.fig.axes[0].plot(self.datarg[k][self.kpunten[filenum][j][1] + interval[0]:self.kpunten[k][j][1] + interval[1],i],self.datarg[k][self.kpunten[k][j][1]+interval[0]:self.kpunten[k][j][1] + interval[1],i+1] , 'b' , label = label)
self.savefig('%f' % (float(self.kpunten[k][j][0])), filenum = k) # you never want this together
def plotrgvars(self,cplane = False , begin = 0 , stop = None, name = '' , save = True , axnum = 0, xlim = None , ylim = None , prefix = True):
print('starting to plot the Richardson-Gaudin variables')
self.plotrgwrap(self.rgindex, 2*self.reader.npair+self.rgindex , self.reader.depvar['depvar'] , 'real part rgvars (a.u.)',axnum = axnum ,tit = 'Richardson-Gaudin variables', name = 're'+ name , begin = begin , stop =stop , save = save, xlim = xlim , ylim = ylim, prefix = prefix)
self.plotrgwrap(self.rgindex+1, 2*self.reader.npair+self.rgindex+1 , self.reader.depvar['depvar'] ,'imaginary part rgvars (a.u.)',axnum = axnum , tit = 'Richardson-Gaudin variables', name = 'im'+ name, begin = begin , stop = stop , save = save, xlim = xlim , ylim = ylim, prefix = prefix)
if cplane:
self.plotrgwrap(self.rgindex, 2*self.reader.npair+self.rgindex ,'real part rgvars (a.u.)' ,'imaginary part rgvars (a.u.)',axnum = axnum ,tit = 'Richardson-Gaudin variables', name = 'cp' + name, begin= begin , stop = stop , save = save, xlim = xlim , ylim = ylim, prefix = prefix)
def plotrgwrap(self, columnstart ,columnend ,xas , yas , axnum = 0 ,tit = None , begin = 0 , stop = None, name = '' , color = 'b' , sort = '-' ,label = None , save = True , xlim = None , ylim = None, prefix = True):
for j in xrange(len(self.datarg)):
#self.plotstar( number = 6 , length = 1 , sort = ['dashed', 'dashdot' ] , color = ['b','r']) #used to create the nice star plot in my factorisable interaction paper
for i in xrange(columnstart,columnend,2):
if 'cp' in name:
sort = ':'
"""
if j % 2 == 1:
color = 'r'
if i == columnstart:
label = r'$g > \frac{-1}{7}$'
else:
label = None
else:
color = 'b'
if i == columnstart:
label = r'$g < \frac{-1}{7}$' #to create the legend uncomment the automatical legend line in the layout
else:
label = None
"""
self.fig.axes[axnum].plot(self.datarg[j][begin:stop,i],self.datarg[j][begin:stop,i+1], color+sort , label = label , markersize = 3)#, mfc = 'None')
else:
self.fig.axes[axnum].plot(self.datarg[j][begin:stop,0],self.datarg[j][begin:stop,i] , color, label = label)
if self.separated == True and save:
self.layout(xas , yas , tit = tit, xlim = xlim , ylim = ylim)
self.savefig(name , filenum = j, prefix = prefix)
if self.separated == False and save:
self.layout(xas , yas , tit = tit, xlim = xlim , ylim = ylim)
self.savefig(name + 'together', prefix = prefix)
def plotstar(self, number = 6 , length = 2 , sort =[ 'dashed', 'dashdot'], color = ['b','r']):
colorv = color[0] ; sortv = sort[0]
for shift in [0, math.pi/number]:
for angle in [math.pi *2/6. * i + shift for i in range(number)]:
x = [0, math.cos(angle) * length]
y = [0, math.sin(angle) * length]
self.fig.axes[0].plot(x,y, colorv , linestyle = sortv)
colorv = color[1] ; sortv = sort[1]
print 'plotted star'
def plotintofmotion(self,name = 'iom',stop =None,begin = 0 , xlim = None , ylim = None , samedir = False , colormap = None, axbg = None):
columns = self.rgindex + 2 * self.reader.npair
if colormap != None:
cm = pl.cm.get_cmap(colormap)
normf = self.normalizefunction([ dat[begin,2] for dat in self.datarg ])
for j in xrange(len(self.datarg)):
for i in xrange(columns,self.reader.nlevel+ columns):
lines = self.fig.axes[0].plot(self.datarg[j][begin:stop,0],self.datarg[j][begin:stop,i] , c = 'b')
if colormap != None:
pl.setp(lines, color = cm(normf(self.datarg[j][begin,2])))
if self.separated == True:
self.layout(self.reader.depvar['depvar'] , 'integrals of motion (a.u.)', tit = 'integrals of motion of the Richardson-Gaudin model' , xlim = xlim , ylim = ylim)
self.savefig(name , filenum = j)
if self.separated == False:
self.layout(self.reader.depvar['depvar'] , 'integrals of motion (a.u.)', tit = 'integrals of motion of the Richardson-Gaudin model' , xlim = xlim , ylim = ylim , axbg = axbg)
if colormap != None:
sm = pl.cm.ScalarMappable(cmap= 'hot', norm=pl.normalize(vmin=0, vmax=1))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
pl.colorbar(sm)
self.savefig(name + 'together' , samedir = samedir)
def perezlattice(self, xlim = None , ylim = None , name = 'perezl'):
"""
If the datafiles of all the states are read in with the right regexp, kpunten contains all the list indices of self.datarg of the interaction constants of interest
use the following if you want the perezlattice at particular g (if used regexp to extract info in self.kpunten): to determine the index number in self.datarg: self.kpunten[k][j][1] remark: if this is done the problem exists that the particular g value searched for doens't exist because of change in stepwidht because of circumvention of critical points
REMARK: at the moment the integrals are colorcoded: full green is integral of motion corresponding to lowest sp level, full red is integral of motion corresponding to the highest sp level, in between is the transition
"""
colstart = self.rgindex + 2 * self.reader.npair ; nplots = 300
for j in range(nplots):
rowindex = int(len(self.datarg[0])/float(nplots))*j + 7 ; intc = self.datarg[0][rowindex,0]
for i in xrange(colstart,self.reader.nlevel+ colstart):
for k in range(len(self.datarg)):
try:
lines = self.fig.axes[0].plot(self.datarg[k][rowindex,2],self.datarg[k][rowindex,i] , c = ((i-colstart)/float(self.reader.nlevel),1- (i-colstart)/float(self.reader.nlevel),0), marker = '.')
except IndexError:
pass
self.layout( 'Energy (a.u.)', 'integrals of motion (a.u.)', tit = 'integrals of motion of the Richardson-Gaudin model at g = %s' % intc , xlim = xlim , ylim = ylim)
namek = str(intc).translate(None,'.-') + name
self.savefig(namek , filenum = j , prefix = False)
#print 'we plotted a perez lattice around %s' %self.kpunten[0][j][0]
def normalizefunction(self , values):
"""
normalizes the values between 0 and 1
"""
maxv = np.max(values)
minv = np.min(values)
def f(x):
return (x - minv)/(maxv-minv)
return f
def scatterplot(self , xvars , yvars , colorvars , colormap = 'hot' ):
cm = pl.cm.get_cmap(colormap)
sc = self.fig.axes[0].scatter(xvars ,yvars, c=colorvars, cmap = cm )
pl.colorbar(sc)
def normalize_to_groundstate(self):
print('Warning we normalize all the excited states to the groundstate energy')
gronddat = self.datarg[0]
for i in range(1,len(self.datarg)):
dif = np.shape(gronddat )[0] - np.shape(self.datarg[i])[0]
print dif
if dif < 0 :
self.datarg[i] = self.datarg[i][0:dif ,:]
elif dif > 0:
gronddat = gronddat[: -1.*dif , :]
print np.shape(gronddat) , np.shape(self.datarg[i])
self.datarg[i][:,1:3] = self.datarg[i][:,1:3] - gronddat[:,1:3] #we made sure that the data of the groundstateenergy is first in the rgdata list
del(self.datarg[0], self.prefix[0])
def slow_butsure_normalization(self):
print('Warning we normalize all the excited states to the groundstate energy')
gronddat = self.datarg[0][:,2]
depvals = list(self.datarg[0][:,0] )
for i in range(1,len(self.datarg)):
j = 0 ; gj = 0 ; end = np.shape(self.datarg[i])[0]
while j < end :
if depvals[gj] != self.datarg[i][j,0]:
try:
gj = depvals.index(self.datarg[i][j,0])
except ValueError:
self.datarg[i] = np.delete(self.datarg[i],j, axis=0)
end -= 1
print 'skipped some non-matching values for the normalization with the ground-state'
continue
self.datarg[i][j,2] = self.datarg[i][j,2] - gronddat[gj] #we made sure that the data of the groundstate-energy is first in the rgdata list
j += 1 ; gj += 1
del(self.datarg[0], self.prefix[0])
def layout(self , xlab , ylab , xlim = None , ylim = None , tit = None , axnum = 0 , legendhand = None , legendlab = None , legendpos = 'best' , finetuning = False , axbg = None , fs = 22, ticksize = 10):
"""
In this function we finetune some aspects of the axes for all the tuning possibilitys see: http://matplotlib.org/api/axes_api.html
especially the set functions ;)
"""
print('We are starting with the layout')
self.fig.axes[axnum].set_xlabel(xlab, fontsize = fs)
self.fig.axes[axnum].set_ylabel(ylab , fontsize = fs)
if xlim != None:
self.fig.axes[axnum].set_xlim(xlim) #good value for xlim in the case of a xi path is : (2*self.rgeq.energiel[0]-5*(self.rgeq.energiel[1]-self.rgeq.energiel[0]),2*self.rgeq.energiel[-1]+0.5)
if ylim != None:
self.fig.axes[axnum].set_ylim(ylim)
if tit != None:
self.fig.axes[axnum].set_title(tit , fontsize = fs)
if legendlab != None:
self.fig.axes[axnum].legend(legendhand , legendlab, loc = legendpos) #if you want to add extra info
#self.fig.axes[axnum].ticklabel_format(style='sci', axis='y') #force scientifique notation for y axis
#self.fig.axes[axnum].yaxis.major.formatter.set_powerlimits((0,0))
for tick in self.fig.axes[axnum].xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
for tick in self.fig.axes[axnum].yaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
leg = self.fig.axes[axnum].legend(loc = legendpos) #draws the legend on axes[axnum] all the plots that you labeled are now depicted in legend
if axbg != None:
self.fig.axes[axnum].set_axis_bgcolor(axbg)
"""
if you forgot to add a label to a line with linenumber: lnum you can do: self.fig.axes[axnum].lines[lnum].set_label('this is my new label')
the underneath is the same as : h , l = self.fig.axes[axnum].get_legend_handles_labels()
self.fig.axes[axnum].legend(h,l)
"""
if finetuning == True:
# the matplotlib.patches.Rectangle instance surrounding the legend
frame = leg.get_frame()
frame.set_facecolor('0.80') # set the frame face color to light gray
# matplotlib.text.Text instances you can change all properties of labels
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
# matplotlib.lines.Line2D instances
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
def savefig(self , name , filenum = 0 , samedir = False , prefix = True):
"""
After we are satisfied with our figure we save it with this function: dpi = pixels per inch, under a name determined by the savestring function().
"""
#REMARK watch out with the translation of the dot to nothing when you gave as arguments the current working directory '.' because
#if you do this it is not possible to save the file in the appropriate place because the folder doesn't exist anymore
#because the first . dissapeared you can only remove . from floats or extensions not from current dir (maybe build in check that if the first letter of the filename is a dot then that dot is not removed)
figname = self.savestring(name , filenum , samedir = samedir , prefix = prefix )
self.fig.savefig(figname , dpi = 80 , facecolor = 'w' , edgecolor = 'w')
self.fig.clf()
self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear')
def savestring(self , name , filenum , samedir = False , prefix = True):
"""
This function generates the name whereunder the figure is going to be saved
"""
if prefix == True:
if samedir:
"""
Making use of some implementation detail of savefig, if we read in files from all different directory's, the prefixes contain the path of those files relative to the rootdirectory. So if you save the file we save it with first the prefix and then the name , so the figures end up in the same directory as the files. If you don't want this behaviour we need to remove the / in the prefixs so fig.savefig will not recognize it as a path so all the figures end up in the current working directory. Remark we only remove the / because if all the figures end up in same dir we need the path information to distinguish them.
"""
self.prefix = [pre.translate(None , '/.') for pre in self.prefix]
return '%s%s%d.png' %(self.prefix[filenum], name, filenum )
#return '%s%s.png' %(self.prefix[filenum], name)
else:
return '%s%d.png' %(name, filenum )
def writetext(self ,text , pos , axnum = 0, hor = None ,ver = None , rot = None ,fs =14 , transform = None):
self.fig.axes[axnum].text(pos[0] ,pos[1] ,text , rotation = rot ,horizontalalignment = hor, verticalalignment = ver , fontsize = fs, transform = transform) #, color = 'black', style = 'italic')
def least_sqr_fit(self,x, y):
"""
Calculates the least square fit of a list of independend variables x and dependend variables y.
It returns a list of function values of the best fitted straight line, with the given x values as independend variables and also a list with the parameters
that define the line. It's also possible to fit at the same time multiple datasets with the same xvalues just give y the form [(v1 , v2 , v3) , (v1 , v2 , v3), ... ]
Where the first tuple consists of the function values of x1 the second of x2 .... , So you get immediately three fitted lines, with the coefficients in a[0][0] , a[0][1]
, a[0][2] for the first, second and third rico for the three lines same for the bisection point with y axis
"""
A = np.array([ x, np.ones(len(x))])
# linearly generated sequence
a,f,g,h = np.linalg.lstsq(A.T,y) # obtaining the parameters
print 'de gevonden rechte = %.10f x + %.10f' %(a[0], a[1])
lined = map(lambda g: a[0]*g +a[1],x) # regression line
return lined , a
def standard_plot(self , rgw = True , intm = True):
self.generate_plot()
if rgw:
self.plotrgvars(cplane = False , begin = 0 , stop = None)
if intm:
self.plotintofmotion()
class Plot_Geo_File(Plot_RG_Files):
"""
remark before the Richardson-Gaudin variables start this file has 6 columns, extra: nig , meandistance , number of levels
"""
def __init__(self , name = 'x', searchstr = 'plotenergy'):
self.rgindex = 6
if os.path.isdir(name):
self.procesfiles(name , searchstr)
elif os.path.isfile(name):
self.readdata([name])
super(Plot_Geo_File,self).__init__()
def generate_plot(self):
super(Plot_Geo_File,self).generate_plot()
print('plot non-interacting groundstate')
self.plotwrap(0,3, 'energy of the non-interacting groundstate (a.u.)','nig', titel = 'aantal paren = %f' %(self.reader.npair))
try:
self.plotwrap(0,4,"d (a.u.)" ,'meandistance', titel = "number of sp levels = %f" %self.reader.nlevel)
except:
print 'the plot of d failed'
class Plot_Data_File(Plot_RG_Files ):
def __init__(self, name = 'x', searchstr = 'plotenergy' , notsearch =r'\.swp|\.png' , regexp = None, sortfunction = None):
self.rgindex = 3
if os.path.isdir(name):
self.procesfiles(name , searchstr, notsearch = notsearch , regexp = regexp , sortfunction = sortfunction)
elif os.path.isfile(name):
self.readdata([name], regexp = regexp)
super(Plot_Data_File,self).__init__()
def addlevel(self , g ):
genergy = [k[0][0] for k in self.kpunten]
x = range(0,len(genergy ))
y , coef = self.least_sqr_fit(x,genergy )
self.fig.axes[0].plot(x, y ,'r-',label = '%f*x %f' %(coef[0],coef[1]))
self.fig.axes[0].plot(x, genergy, 'bo',label= 'datapoints')
print genergy
self.layout('number of added continuum sp levels', 'groundstate energy (MeV)', tit = 'the groundstate energy of Sn120 with i.c.: %.3f' %g )
self.savefig('g=%fal.png' % g)
def plotrgcloud(self ,begin = 0, step = 1 , colormap = 'hot'):
while begin <= np.shape(self.datarg[0])[0]:
revars = [rerg for dat in self.datarg for rerg in dat[begin,self.rgindex:self.rgindex+2*self.reader.npair:2]]
imvars = [imrg for dat in self.datarg for imrg in dat[begin,self.rgindex+1:self.rgindex+2*self.reader.npair + 1:2]]
energy = [[dat[begin,2]] *self.reader.npair for dat in self.datarg ]
self.scatterplot(revars , imvars , energy , colormap = colormap)
self.layout( 'real part of rgvars (a.u)' , 'imaginary part of rgvars (a.u.)', xlim = None , ylim = None , tit = 'RG vars g = %f all states'%(self.datarg[0][begin , 0]) , axnum = 0 , legendhand = None , legendlab = None , legendpos = 'best' , finetuning = False)
self.savefig('allstates%f' % (self.datarg[0][begin,0]) , samedir = True)
begin += step
makemovie(name = 'allstatesrgcloud')
def plot_spectrum(self,xlim = None , ylim = None, search = 'plotenergy', rgw = True, intm = True, name = 'spectrum', readgreen = False, standard = True, save = True):
self.procesfiles(os.getcwd(), search, notsearch = r'\.swp|\.png', sortfunction = lambda x : -1. if '/0/' in x or 'ground' in x else 0.) #sortfunction makes sure the groundstate is first this is important for the normalization
if standard: self.standard_plot(rgw , intm)
#self.normalize_to_groundstate()
self.slow_butsure_normalization()
self.separated = False
if readgreen: mine , pre = self.readgreen()
self.generate_plot(xlimg = xlim , ylimg = ylim, prefix = False , exname = name, save = save)
if readgreen: print mine , ' prefix is :', pre
def plot_gapsurvey(self,xlim = None , ylim = None, search = 'plotenergy', rgw = True, intm = True, name = 'spectrum', readgreen = True, standard = True, save = False, dir = '.'):
for i in os.listdir(dir):
if os.path.isdir(i):
os.chdir(i)
self.plot_spectrum(xlim = xlim, ylim = ylim, search = search, rgw = rgw, intm = intm, name = name , readgreen = readgreen, standard = standard , save = save )
os.chdir('..')
for pos,text in [((-0.28,300),'6p' ) ,((-0.28,490),'7p'),((-0.28,790),'8p'),((-0.22,900),'9p'),((-0.17,700),'10p'),((-0.1,900),'15p'),((-0.062,800),'20p'),((-0.040,860),'25p'),((-0.0215,950),'30p')]:
self.writetext(text , pos ,axnum = 0, hor = 'left', ver = 'bottom', rot = 0,fs =14)
self.layout(self.reader.depvar['depvar'] , 'energy (a.u.)' , tit = 'exploring the gap', xlim = xlim , ylim = ylim)
self.savefig( 'master' , filenum = 0 , samedir = False , prefix = False)
def readgreen(self):
readgreenpoint = self.reader.eta/(2.*(self.reader.npair-1) -2.*np.sum(np.array(self.reader.degeneracies)/4. -np.array(self.reader.seniorities)))
step = abs(self.datarg[0][0,0] -self.datarg[0][1,0])
self.fig.axes[0].axvline(x = readgreenpoint,ymin = 0 , ymax = 1, c = 'b', linewidth = 1)
datareadgreen = [row[2] for data in self.datarg for row in data if ((row[0] - step/2. <= readgreenpoint) and (row[0] + step/2. >= readgreenpoint ) )]
#datareadgreen = []
#for data in self.datarg:
# f = False
# for row in data:
# if ((row[0] - step/2.< readgreenpoint) and (row[0] + step/2. > readgreenpoint ) ):
# datareadgreen.append(row[2])
# f = True
# break
# if f == False:
# datareadgreen.append(1e9)
#assert(len(datareadgreen) == len(self.prefix))
lowest = min(datareadgreen)
file = open('readgreenfile.dat' , 'w')
file.write('#the read-green point is at: %f \n#the value of the lowest excited state is: %f\n#the filename of this state is: %s \n' %(readgreenpoint ,lowest, self.prefix[datareadgreen.index(lowest)] ))
file.close()
return lowest, self.prefix[datareadgreen.index(lowest)]
class Plot_Xi_File(Plot_RG_Files):
def __init__(self, name , search , regexp =r'constant:\s*([\-0-9.]+)'):
self.rgindex = 2
if os.path.isdir(name):
self.procesfiles(name ,search, notsearch =r'\.swp|\.png', regexp = regexp)
elif os.path.isfile(name):
self.readdata([name], regexp = regexp)
super(Plot_Xi_File,self).__init__()
def plot_spectrumxichange(self):
"""
Plot the entire spectrum at a particular g in function of xi
args = directory with all the data
"""
countgood = 0 ; countbad = 0
for idata in self.datarg:
if idata[-1, 0] == 1.:
self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'b')
countgood += 1
print countgood , 'good solution'
else:
self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'r')
print countbad, 'bad solution'
countbad += 1
print 'We found %g good solutions and %g tda startdistributions that broke down before xi = 1, we hope that\'s what you expected' %(countgood,countbad)
#Create custom artistsr[goodline,badline],['solution','breakdown']
goodline = pl.Line2D((0,1),(0,0), color='b')
badline = pl.Line2D((0,1),(0,0), color='r')
self.layout(self.reader.depvar['depvar'] , r'energy spectrum (a.u.)' , tit = r'All tda start distributions $\xi$' , legendhand = [goodline , badline] , legendlab = ['solution', 'breakdown'] )
self.savefig('xispec')
def plotrgvarsxi(self, name = 'rgvxi' ,xlim = None , ylim = None):
for j in xrange(len(self.datarg)):
for i in np.arange(self.rgindex,2*self.reader.npair+self.rgindex,2):
self.fig.axes[0].plot(self.datarg[j][0,i],self.datarg[j][0,i+1],'b.', markersize = 23) #Richardson-Gaudin solutions (xi = 1)
self.fig.axes[0].plot(self.datarg[j][len(self.datarg[j][:,0])-1,i],self.datarg[j][len(self.datarg[j][:,0])-1,i+1],'b.',mfc = 'None', markersize = 23) # Corresponding tda solutions (xi = 0 )
self.fig.axes[0].plot(self.datarg[j][:,i],self.datarg[j][:,i+1],'b-' , lw =2) # intermediate values of xi
if self.reader.eta == None:
sing = np.array(self.reader.elevels)* 2
else:
sing = self.reader.eta * np.array(self.reader.elevels) * np.array(self.reader.elevels)
for i in range(2):#self.reader.nlevel):
self.fig.axes[0].axvline(x = sing[i] ,c= 'k',linestyle = '--')
if self.separated == True:
self.layout('real part of rgvars (a.u)', 'imaginary part of rgvars (a.u.)', xlim =xlim, ylim = ylim, tit = 'g = %s (a.u.)' %(self.kpunten[j][0][0]) , fs = 20)
self.savefig(name , filenum = j, prefix = False)
if self.separated == False:
self.layout('real part of rgvars (a.u)', 'imaginary part of rgvars (a.u.)', xlim =xlim, ylim = ylim, tit = 'g = %s (a.u.)' %(self.kpunten[j][0][0]) , fs = 20)
self.savefig(name + 'together' , prefix = False )
class Plot_All_File(Plot_RG_Files):
def __init__(self,name, g , regexp = r'seniority\s\[(.+?)\]',substr = r'\{.*\}'):
self.chardata = g
self.rgindex = 2
super(Plot_All_File,self).__init__()
self.readdata(name, regexp = regexp ,substr = substr)
def plotrgcloud(self):
"""
This function needs it own datareader because it's to specific
"""
print self.kpunten
for i in range(len(self.kpunten[0])):
self.writetext('sen ='+ self.kpunten[0][i][0], (0.65,0.85), axnum = 0, hor = None ,ver = None , rot = None ,fs =14 , transform = self.fig.axes[0].transAxes)
if i == len(self.kpunten[0]) -1 :
end = None
else:
end = self.kpunten[0][i+1][1] + 1
print end
self.plotrgwrap( self.rgindex,2*self.reader.npair+self.rgindex,'real part of rgvars (a.u)' , 'imaginary part of rgvars (a.u.)', tit ='RG vars g = %f all states'%(self.chardata) , begin = self.kpunten[0][i][1] , stop = end , name = 'cpcloud'+ self.kpunten[0][i][0] , filenum = 0)
def main(option, args):
plotter = Plot_Data_File()
plottergeo = Plot_Geo_File()
if option == 'pexcited':
plotter.plot_spectrum(xlim = (-0.3,0), ylim = (0,1000), search = 'plotenergy', rgw = True, intm = True, name = 'spectrum' , readgreen = True, standard = False, save = False)
if option == 'gapsurvey':
plotter.plot_gapsurvey(xlim = (-0.3,0), ylim = (0,1000), search = 'plotenergy', rgw = True, intm = True, name = 'spectrum' , readgreen = True, standard = False, save = False,dir = '.')
if option == 'wpairing':
if args[1] == True:
plottergeo.procesfiles(args[0], 'plotenergy')
plottergeo.generate_plot()
else:
plotter.procesfiles(args[0],'plotenergy')
plotter.generate_plot(xlimg = None, ylimg = None)
if option == 'inset':
"""
Example of how you need to draw a small inset in a larger plot of a particular
area of interest with matplotlib
"""
plotter.readdata([args[0]])
plotter.reader.depvar['depvar'] = r'$\eta$ (a.u.)' #change the future x-axis label to latex
begin =0
stop = None
plotter.plotrgvars(begin = begin , stop = stop , name = 'etanul2', save = False)
begin = 9880
stop = None
plotter.rgindex = 5
plotter.reader.npair = 9
plotter.add_axes([0.5,0.2,0.3,0.3])
plotter.fig.axes[1].xaxis.set_major_locator(matplotlib.ticker.LinearLocator(5))
#see also: http://scipy-lectures.github.io/intro/matplotlib/matplotlib.html#ticks
plotter.plotrgvars(begin = begin , stop =stop, axnum = 1)
if option == 'rgclouddata':
plotter.procesfiles(args[0] , 'plotenergy' , notdir = 'movie')
plotter.plotrgcloud(step = 10)
if option == 'addlevel':
plotter.procesfiles( '.' , 'plotenergy' , sortfunction = lambda s : int(re.search(r'\d+' , s).group()), rev = True , regexp = r'^%f\s+[\-+\.\d]+\s+([\-+\.\d]+)\s' % args[0])
plotter.addlevel(args[0])
if option == 'rgvar':
ref = args[0]
begin =0
stop = None
cp = args[1]
plotter.procesfiles(args[0],'plotenergy',filename = False)
plotter.reader.depvar['depvar'] = 'g (a.u.)' #change the future x-axis label to latex
plotter.separated = False
plotter.plotrgvars(cplane = cp , begin = begin , stop = stop , name = '', xlim = (-1,1.), ylim = (-1,1.), prefix = True)
if option is 'rgcloud':
name = 'newstyleDang120neutronwin5_5sen2.dat'
plottera = Plot_All_File(name, -0.137 , regexp = r'seniority\s\[(.+?)\]',substr = r'\{.*\}')
plottera.plotrgcloud()
if option is 'cprgvar':
ref = args[0]
plotter.readdata([ref], regexp = r'^#\s+((-|\d)\d+\.*\d*)\s+kritisch', begin = 1)
plotter.plotrgvarscplane(interval = (-20,0))
if option is 'intmotion':
#plotter.readdata([args])
plotter.separated = True
if args[3] != 'perez':
plotter = Plot_Data_File(args[0] , args[1] , args[2])
plotter.plotintofmotion(name = 'intmotion',xlim = (-1.5,0.), ylim = (-2 , 2) , samedir =True , colormap ='hot' , axbg = 'g')
else:
plotter = Plot_Data_File(args[0] , args[1] , args[2])# , regexp = r'^(-0.003100|-0.063100|-0.123100|-0.213100|-0.363100|-0.993100)')
plotter.perezlattice()
if 'xi' in option:
plotterxi = Plot_Xi_File(args[0], args[1], regexp = r'constant:\s*([\-0-9.]+)')
if option is 'xipath':
#plotterxi.procesfiles(args[0],args[1] , regexp = r'constant:\s*([\-0-9.]+)')
plotterxi.separated = True
plotterxi.plotrgvarsxi(ylim = None , xlim = None)
if option is 'specxichange':
#to plot entire spectra with broken down in red and states who went from xi = 0 to xi =1 in blue
plotterxi.plot_spectrumxichange()
def defineoptions():
'''
possible options: 'pexcited' plots all the excited states relative to the groundstate, 'wpairing' plots the
results from a writepairing call in writepairing.py(main), 'addlevel' from a set of outputfiles from generating_datak generated
by adding empty sp levels and get from those files the groundstate energy at a constant g and plot them and perform lin.regression
'''
#common options are: wpairing, rgvar, intmotion
option = 'rgvar'
#args = -0.137 , None
args = '.',True , 'xipath',True, 'xipath' , False,'.',r'constant:\s*([\-0-9.]+)', r'xi[0-9\.a-zA-Z\-]+.dat$','g' ,False
main(option,args)
if __name__ == '__main__':
defineoptions()
#makemovie()
|
16,051 | 6be7979ac5ff37416b819085091b09e17418aa76 | ###Marie Hemmen, 05.09.16###
import sys
from shmooclass import shmoo
from spline_interpolation import Spline_Interpolation
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import scipy
from scipy import misc
from scipy import ndimage
import math
import pdb
import Image
from random import randint
import re
from scipy import pi,sin,cos
from numpy import linspace
from numpy import (array, dot, arccos)
from numpy.linalg import norm
import pylab as pl
import os, sys
np.set_printoptions(threshold=np.nan)
def makeEllipse1(x0,y0,a,b,an):
points=1000 #Number of points which needs to construct the elipse
cos_a=cos(an*pi/180.)
sin_a=sin(an*pi/180.)
the=linspace(0,2*pi,points)
X=a*cos(the)*cos_a-sin_a*b*sin(the)+x0
Y=a*cos(the)*sin_a+cos_a*b*sin(the)+y0
x_values=np.array([X])
pos_y_values=np.array([Y])
array_ellipse = np.append(x_values,pos_y_values, axis = 0)
return array_ellipse
def makeArray(array,mx,my):
modelarray=np.zeros((my,mx))
for m in range(array[0].size):
x=array[0][m]-1
y=array[1][m]-1
if x<0:
x = 0
if y<0:
y = 0
modelarray[y][x]=1
return modelarray
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
if len1 >=1 and len2 >= 1:
return math.acos(inner_product/(len1*len2))
else:
return 0
def calculate(pt, pt2):
ang = angle(pt,pt2)*180/math.pi
#print "ang: ",ang
return ang
def rotate(image,rot):
#print "rotateangle: ", rot
rad = np.radians(rot)
a = np.cos(rad)
b = np.sin(rad)
R = np.mat([[a, -b], [b,a]])
#print "R: ", R
Y = np.array(R*image) # rotation and scaling
return Y
def MakeImage(array,titel):
plt.imshow(array,cmap = "YlGnBu") #YlGnBu
plt.colorbar()
plt.title(titel)
def MakeImagetr(array,title,x0t,y0t):
print "x0av: ", x0t, "y0av: ", y0t
h, w = array.shape
ax = plt.gca()
plt.imshow(array,cmap = "YlGnBu",
extent=[-x0t, w-x0t, h-y0t, -y0t]) #YlGnBu
#plt.colorbar()
cbar = plt.colorbar()
cbar.ax.set_ylabel('#Number of Cells')
ax.xaxis.set_label_coords(0.89, 0.45)
ax.yaxis.set_label_coords(0.33, 0.93)
plt.xlabel('Width [Pixel]', fontsize = 13)
plt.ylabel('Length [Pixel]', fontsize = 13).set_rotation(0)
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(12)
plt.title(title, fontsize = 16)
def MakeImagetrMicro(array,title,x0t2,y0t2):
print "x0av: ", x0t2, "y0av: ", y0t2
h, w = array.shape
ax = plt.gca()
plt.imshow(array,cmap = "YlGnBu",
extent=[-x0t2*0.13, (w-x0t2)*0.13, (h-y0t2)*0.13, -y0t2*0.13]) #YlGnBu
#plt.colorbar()
cbar = plt.colorbar()
cbar.ax.set_ylabel('#Number of Cells')
ax.xaxis.set_label_coords(0.89, 0.45)
ax.yaxis.set_label_coords(0.3, 0.93)
plt.xlabel('Width [$\mu$m]', fontsize = 13)
plt.ylabel('Length [$\mu$m]', fontsize = 13).set_rotation(0)
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
plt.axis((-5,7 ,-5,5))
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(12)
plt.title(title, fontsize = 16)
def MakeNormedImage(array,title):
plt.imshow(array,cmap = "YlGnBu",vmin=0, vmax = 0.4)
plt.colorbar()
def MakeNormalizedImage(array,title,x0t3,y0t3):
h, w = array.shape
ax = plt.gca()
plt.imshow(array,cmap = "YlGnBu",
extent=[-x0t3*0.13, (w-x0t3)*0.13, (h-y0t3)*0.13, -y0t3*0.13])
cbar = plt.colorbar()
cbar.ax.set_ylabel('normalized frequency of cell outlines', fontsize = 16)
ax.xaxis.set_label_coords(0.86, 0.43)
ax.yaxis.set_label_coords(0.28, 0.93)
plt.ylabel('width [$\mu$m]',fontsize = 30).set_rotation(0)
plt.xlabel('length [$\mu$m]',fontsize = 30)
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
plt.axis((-5,7 ,-5,5))
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(25)
plt.title(title)
def MakeImage2(array1,array2, array3):
plt.imshow(array1,cmap = "gray", alpha = 0.5)
plt.imshow(array2,cmap = "gray", alpha = 0.5)
plt.imshow(array3,cmap = "gray", alpha = 0.5)
def ShowImage(path):
plt.draw()
plt.savefig(path + ".png")
plt.show()
parameterfileIn = sys.argv[1] #/home/marie/Master/EllipseparametersNew/EKY360
imagefileIn = sys.argv[2] #/home/marie/Master/Outlinecoordinates/Positives_new
ImageLocOut = sys.argv[3] #/home/marie/Master/Average_Images_New/EKY360Sorted
Matlabfile = sys.argv[4] #/home/marie/Master/Average_Images_New/MatlabFiles/EKY360
valuesfile = sys.argv[5] #/home/marie/Master/Values/EKY360/160
Strain = sys.argv[6] #EKY360
MatlabfileNorm = sys.argv[7] #/home/marie/Master/Average_Images_New/MatlabFilesNorm/EKY360
wf = open(valuesfile,'w')
files = os.listdir(parameterfileIn)
alllengths = []
#max_x = 110
#max_y = 90
for fi in files:
pfile = open(parameterfileIn+"/"+fi, 'r')
pfile.readline()
#print pfile
for fline in pfile:
fline = fline.split("\t")
areaf = float(fline[5])
if areaf<500:
continue
length = float(fline[8])
alllengths.append(length)
#pfile.close()
minlength = np.amin(alllengths)
maxlength = np.amax(alllengths)
print "minlength: ", minlength
print "maxlength: ", maxlength
max_x = 120 #100
max_y = 100 #80
minlength = 20.0
dif = 4.42
l1 = minlength + dif
l2 = l1+dif
l3 = l2+dif
l4 = l3+dif
l5 = l4+dif
l6 = l5+dif
l7 = l6+dif
l8 = l7+dif
l9 = l8+dif
l10 = l9+dif
l11 = l10+dif
l12 = l11+dif
minlengthMicro = minlength * 0.13
l2Micro = l2*0.13
l3Micro = l3*0.13
l4Micro = l4*0.13
l5Micro = l5*0.13
l6Micro = l6*0.13
l7Micro = l7*0.13
l8Micro = l8*0.13
l9Micro = l9*0.13
l10Micro = l10*0.13
l11Micro = l11*0.13
l12Micro = l12*0.13
l1Micro = l1*0.13
print minlength, maxlength, l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11,l12
a_averagelistl1 = []
b_averagelistl1 = []
a2_averagelistl1 = []
b2_averagelistl1 = []
area_averagelistl1 = []
perimeter_averagelistl1 = []
length_averagelistl1 = []
x0_averagelistl1 = []
y0_averagelistl1 = []
x02_averagelistl1 = []
y02_averagelistl1 = []
aMikro_averagelistl1 = []
bMikro_averagelistl1 = []
a2Mikro_averagelistl1 = []
b2Mikro_averagelistl1 = []
areaMikro_averagelistl1 = []
perimeterMikro_averagelistl1 = []
lengthMikro_averagelistl1 = []
x0Mikro_averagelistl1 = []
y0Mikro_averagelistl1 = []
a_averagelistl2 = []
b_averagelistl2 = []
a2_averagelistl2 = []
b2_averagelistl2 = []
area_averagelistl2 = []
perimeter_averagelistl2 = []
length_averagelistl2 = []
x0_averagelistl2 = []
y0_averagelistl2 = []
x02_averagelistl2 = []
y02_averagelistl2 = []
aMikro_averagelistl2 = []
bMikro_averagelistl2 = []
a2Mikro_averagelistl2 = []
b2Mikro_averagelistl2 = []
areaMikro_averagelistl2 = []
perimeterMikro_averagelistl2 = []
lengthMikro_averagelistl2 = []
x0Mikro_averagelistl2 = []
y0Mikro_averagelistl2 = []
a_averagelistl3 = []
b_averagelistl3 = []
a2_averagelistl3 = []
b2_averagelistl3 = []
area_averagelistl3 = []
perimeter_averagelistl3 = []
length_averagelistl3 = []
x0_averagelistl3 = []
y0_averagelistl3 = []
x02_averagelistl3 = []
y02_averagelistl3 = []
aMikro_averagelistl3 = []
bMikro_averagelistl3 = []
a2Mikro_averagelistl3 = []
b2Mikro_averagelistl3 = []
areaMikro_averagelistl3 = []
perimeterMikro_averagelistl3 = []
lengthMikro_averagelistl3 = []
x0Mikro_averagelistl3 = []
y0Mikro_averagelistl3 = []
a_averagelistl4 = []
b_averagelistl4 = []
a2_averagelistl4 = []
b2_averagelistl4 = []
area_averagelistl4 = []
perimeter_averagelistl4 = []
length_averagelistl4 = []
x0_averagelistl4 = []
y0_averagelistl4 = []
x02_averagelistl4 = []
y02_averagelistl4 = []
aMikro_averagelistl4 = []
bMikro_averagelistl4 = []
a2Mikro_averagelistl4 = []
b2Mikro_averagelistl4 = []
areaMikro_averagelistl4 = []
perimeterMikro_averagelistl4 = []
lengthMikro_averagelistl4 = []
x0Mikro_averagelistl4 = []
y0Mikro_averagelistl4 = []
a_averagelistl5 = []
b_averagelistl5 = []
a2_averagelistl5 = []
b2_averagelistl5 = []
area_averagelistl5 = []
perimeter_averagelistl5 = []
length_averagelistl5 = []
x0_averagelistl5 = []
y0_averagelistl5 = []
x02_averagelistl5 = []
y02_averagelistl5 = []
aMikro_averagelistl5 = []
bMikro_averagelistl5 = []
a2Mikro_averagelistl5 = []
b2Mikro_averagelistl5 = []
areaMikro_averagelistl5 = []
perimeterMikro_averagelistl5 = []
lengthMikro_averagelistl5 = []
x0Mikro_averagelistl5 = []
y0Mikro_averagelistl5 = []
a_averagelistl6 = []
b_averagelistl6 = []
a2_averagelistl6 = []
b2_averagelistl6 = []
area_averagelistl6 = []
perimeter_averagelistl6 = []
length_averagelistl6 = []
x0_averagelistl6 = []
y0_averagelistl6 = []
x02_averagelistl6 = []
y02_averagelistl6 = []
aMikro_averagelistl6 = []
bMikro_averagelistl6 = []
a2Mikro_averagelistl6 = []
b2Mikro_averagelistl6 = []
areaMikro_averagelistl6 = []
perimeterMikro_averagelistl6 = []
lengthMikro_averagelistl6 = []
x0Mikro_averagelistl6 = []
y0Mikro_averagelistl6 = []
a_averagelistl7 = []
b_averagelistl7 = []
a2_averagelistl7 = []
b2_averagelistl7 = []
area_averagelistl7 = []
perimeter_averagelistl7 = []
length_averagelistl7 = []
x0_averagelistl7 = []
y0_averagelistl7 = []
x02_averagelistl7 = []
y02_averagelistl7 = []
aMikro_averagelistl7 = []
bMikro_averagelistl7 = []
a2Mikro_averagelistl7 = []
b2Mikro_averagelistl7 = []
areaMikro_averagelistl7 = []
perimeterMikro_averagelistl7 = []
lengthMikro_averagelistl7 = []
x0Mikro_averagelistl7 = []
y0Mikro_averagelistl7 = []
a_averagelistl8 = []
b_averagelistl8 = []
a2_averagelistl8 = []
b2_averagelistl8 = []
area_averagelistl8 = []
perimeter_averagelistl8 = []
length_averagelistl8 = []
x0_averagelistl8 = []
y0_averagelistl8 = []
x02_averagelistl8 = []
y02_averagelistl8 = []
aMikro_averagelistl8 = []
bMikro_averagelistl8 = []
a2Mikro_averagelistl8 = []
b2Mikro_averagelistl8 = []
areaMikro_averagelistl8 = []
perimeterMikro_averagelistl8 = []
lengthMikro_averagelistl8 = []
x0Mikro_averagelistl8 = []
y0Mikro_averagelistl8 = []
a_averagelistl9 = []
b_averagelistl9 = []
a2_averagelistl9 = []
b2_averagelistl9 = []
area_averagelistl9 = []
perimeter_averagelistl9 = []
length_averagelistl9 = []
x0_averagelistl9 = []
y0_averagelistl9 = []
x02_averagelistl9 = []
y02_averagelistl9 = []
aMikro_averagelistl9 = []
bMikro_averagelistl9 = []
a2Mikro_averagelistl9 = []
b2Mikro_averagelistl9 = []
areaMikro_averagelistl9 = []
perimeterMikro_averagelistl9 = []
lengthMikro_averagelistl9 = []
x0Mikro_averagelistl9 = []
y0Mikro_averagelistl9 = []
a_averagelistl10 = []
b_averagelistl10 = []
a2_averagelistl10 = []
b2_averagelistl10 = []
area_averagelistl10 = []
perimeter_averagelistl10 = []
length_averagelistl10 = []
x0_averagelistl10 = []
y0_averagelistl10 = []
x02_averagelistl10 = []
y02_averagelistl10 = []
aMikro_averagelistl10 = []
bMikro_averagelistl10 = []
a2Mikro_averagelistl10 = []
b2Mikro_averagelistl10 = []
areaMikro_averagelistl10 = []
perimeterMikro_averagelistl10 = []
lengthMikro_averagelistl10 = []
x0Mikro_averagelistl10 = []
y0Mikro_averagelistl10 = []
a_averagelistl11 = []
b_averagelistl11 = []
a2_averagelistl11 = []
b2_averagelistl11 = []
area_averagelistl11 = []
perimeter_averagelistl11 = []
length_averagelistl11 = []
x0_averagelistl11 = []
y0_averagelistl11 = []
x02_averagelistl11 = []
y02_averagelistl11 = []
aMikro_averagelistl11 = []
bMikro_averagelistl11 = []
a2Mikro_averagelistl11 = []
b2Mikro_averagelistl11 = []
areaMikro_averagelistl11 = []
perimeterMikro_averagelistl11 = []
lengthMikro_averagelistl11 = []
x0Mikro_averagelistl11 = []
y0Mikro_averagelistl11 = []
a_averagelistl12 = []
b_averagelistl12 = []
a2_averagelistl12 = []
b2_averagelistl12 = []
area_averagelistl12 = []
perimeter_averagelistl12 = []
length_averagelistl12 = []
x0_averagelistl12 = []
y0_averagelistl12 = []
x02_averagelistl12 = []
y02_averagelistl12 = []
aMikro_averagelistl12 = []
bMikro_averagelistl12 = []
a2Mikro_averagelistl12 = []
b2Mikro_averagelistl12 = []
areaMikro_averagelistl12 = []
perimeterMikro_averagelistl12 = []
lengthMikro_averagelistl12 = []
x0Mikro_averagelistl12 = []
y0Mikro_averagelistl12 = []
allarrayreducedl1 = np.zeros((max_y,max_x))
allarrayreducedl2 = np.zeros((max_y,max_x))
allarrayreducedl3 = np.zeros((max_y,max_x))
allarrayreducedl4 = np.zeros((max_y,max_x))
allarrayreducedl5 = np.zeros((max_y,max_x))
allarrayreducedl6 = np.zeros((max_y,max_x))
allarrayreducedl7 = np.zeros((max_y,max_x))
allarrayreducedl8 = np.zeros((max_y,max_x))
allarrayreducedl9 = np.zeros((max_y,max_x))
allarrayreducedl10 = np.zeros((max_y,max_x))
allarrayreducedl11 = np.zeros((max_y,max_x))
allarrayreducedl12 = np.zeros((max_y,max_x))
allarrayl1 = np.zeros((max_y,max_x))
allarrayl2 = np.zeros((max_y,max_x))
allarrayl3 = np.zeros((max_y,max_x))
allarrayl4 = np.zeros((max_y,max_x))
allarrayl5 = np.zeros((max_y,max_x))
allarrayl6 = np.zeros((max_y,max_x))
allarrayl7 = np.zeros((max_y,max_x))
allarrayl8 = np.zeros((max_y,max_x))
allarrayl9 = np.zeros((max_y,max_x))
allarrayl10 = np.zeros((max_y,max_x))
allarrayl11 = np.zeros((max_y,max_x))
allarrayl12 = np.zeros((max_y,max_x))
allcellsl1 = np.zeros((max_y,max_x))
allcellsPosl1 = np.zeros((max_y,max_x))
allcellsFHSpll1 = np.zeros((max_y,max_x))
allcellsSHSpll1 = np.zeros((max_y,max_x))
allcellsl2 = np.zeros((max_y,max_x))
allcellsPosl2 = np.zeros((max_y,max_x))
allcellsFHSpll2 = np.zeros((max_y,max_x))
allcellsSHSpll2 = np.zeros((max_y,max_x))
allcellsl3 = np.zeros((max_y,max_x))
allcellsPosl3 = np.zeros((max_y,max_x))
allcellsFHSpll3 = np.zeros((max_y,max_x))
allcellsSHSpll3 = np.zeros((max_y,max_x))
allcellsl4 = np.zeros((max_y,max_x))
allcellsPosl4 = np.zeros((max_y,max_x))
allcellsFHSpll4 = np.zeros((max_y,max_x))
allcellsSHSpll4 = np.zeros((max_y,max_x))
allcellsl5 = np.zeros((max_y,max_x))
allcellsPosl5 = np.zeros((max_y,max_x))
allcellsFHSpll5 = np.zeros((max_y,max_x))
allcellsSHSpll5 = np.zeros((max_y,max_x))
allcellsl6 = np.zeros((max_y,max_x))
allcellsPosl6 = np.zeros((max_y,max_x))
allcellsFHSpll6 = np.zeros((max_y,max_x))
allcellsSHSpll6 = np.zeros((max_y,max_x))
allcellsl7 = np.zeros((max_y,max_x))
allcellsPosl7 = np.zeros((max_y,max_x))
allcellsFHSpll7 = np.zeros((max_y,max_x))
allcellsSHSpll7 = np.zeros((max_y,max_x))
allcellsl8 = np.zeros((max_y,max_x))
allcellsPosl8 = np.zeros((max_y,max_x))
allcellsFHSpll8 = np.zeros((max_y,max_x))
allcellsSHSpll8 = np.zeros((max_y,max_x))
allcellsl9 = np.zeros((max_y,max_x))
allcellsPosl9 = np.zeros((max_y,max_x))
allcellsFHSpll9 = np.zeros((max_y,max_x))
allcellsSHSpll9 = np.zeros((max_y,max_x))
allcellsl10 = np.zeros((max_y,max_x))
allcellsPosl10 = np.zeros((max_y,max_x))
allcellsFHSpll10 = np.zeros((max_y,max_x))
allcellsSHSpll10 = np.zeros((max_y,max_x))
allcellsl11 = np.zeros((max_y,max_x))
allcellsPosl11 = np.zeros((max_y,max_x))
allcellsFHSpll11 = np.zeros((max_y,max_x))
allcellsSHSpll11 = np.zeros((max_y,max_x))
allcellsl12 = np.zeros((max_y,max_x))
allcellsPosl12 = np.zeros((max_y,max_x))
allcellsFHSpll12 = np.zeros((max_y,max_x))
allcellsSHSpll12 = np.zeros((max_y,max_x))
allcellsl5Spline = np.zeros((max_y,max_x))
allcellsFHl1 = np.zeros((max_y,max_x))
allcellsSHl1 = np.zeros((max_y,max_x))
allcellsFHl2= np.zeros((max_y,max_x))
allcellsSHl2 = np.zeros((max_y,max_x))
allcellsFHl3 = np.zeros((max_y,max_x))
allcellsSHl3 = np.zeros((max_y,max_x))
allcellsFHl4 = np.zeros((max_y,max_x))
allcellsSHl4= np.zeros((max_y,max_x))
allcellsFHl5 = np.zeros((max_y,max_x))
allcellsSHl5 = np.zeros((max_y,max_x))
allcellsFHl6 = np.zeros((max_y,max_x))
allcellsSHl6 = np.zeros((max_y,max_x))
allcellsFHl7 = np.zeros((max_y,max_x))
allcellsSHl7 = np.zeros((max_y,max_x))
allcellsFHl8 = np.zeros((max_y,max_x))
allcellsSHl8 = np.zeros((max_y,max_x))
allcellsFHl9 = np.zeros((max_y,max_x))
allcellsSHl9 = np.zeros((max_y,max_x))
allcellsFHl10 = np.zeros((max_y,max_x))
allcellsSHl10 = np.zeros((max_y,max_x))
allcellsFHl11 = np.zeros((max_y,max_x))
allcellsSHl11 = np.zeros((max_y,max_x))
allcellsFHl12= np.zeros((max_y,max_x))
allcellsSHl12 = np.zeros((max_y,max_x))
allcellsl1pos = np.zeros((max_y,max_x))
allcellsl1posT= np.zeros((max_y,max_x))
allcellsl1posSpline= np.zeros((max_y,max_x))
allcellsl1neg = np.zeros((max_y,max_x))
allcellsl1negT= np.zeros((max_y,max_x))
allcellsl1negSpline= np.zeros((max_y,max_x))
allcellsl1FH= np.zeros((max_y,max_x))
allcellsl1RH= np.zeros((max_y,max_x))
allcellsl1FHSpl= np.zeros((max_y,max_x))
allcellsl1RHSpl= np.zeros((max_y,max_x))
allcellsl2pos = np.zeros((max_y,max_x))
allcellsl2posT= np.zeros((max_y,max_x))
allcellsl2posSpline= np.zeros((max_y,max_x))
allcellsl2neg = np.zeros((max_y,max_x))
allcellsl2negT= np.zeros((max_y,max_x))
allcellsl2negSpline= np.zeros((max_y,max_x))
allcellsl2FH= np.zeros((max_y,max_x))
allcellsl2RH= np.zeros((max_y,max_x))
allcellsl2FHSpl= np.zeros((max_y,max_x))
allcellsl2RHSpl= np.zeros((max_y,max_x))
allcellsl3pos = np.zeros((max_y,max_x))
allcellsl3posT= np.zeros((max_y,max_x))
allcellsl3posSpline= np.zeros((max_y,max_x))
allcellsl3neg = np.zeros((max_y,max_x))
allcellsl3negT= np.zeros((max_y,max_x))
allcellsl3negSpline= np.zeros((max_y,max_x))
allcellsl3FH= np.zeros((max_y,max_x))
allcellsl3RH= np.zeros((max_y,max_x))
allcellsl3FHSpl= np.zeros((max_y,max_x))
allcellsl3RHSpl= np.zeros((max_y,max_x))
allcellsl4pos = np.zeros((max_y,max_x))
allcellsl4posT= np.zeros((max_y,max_x))
allcellsl4posSpline= np.zeros((max_y,max_x))
allcellsl4neg = np.zeros((max_y,max_x))
allcellsl4negT= np.zeros((max_y,max_x))
allcellsl4negSpline= np.zeros((max_y,max_x))
allcellsl4FH= np.zeros((max_y,max_x))
allcellsl4RH= np.zeros((max_y,max_x))
allcellsl4FHSpl= np.zeros((max_y,max_x))
allcellsl4RHSpl= np.zeros((max_y,max_x))
allcellsl5pos = np.zeros((max_y,max_x))
allcellsl5posT= np.zeros((max_y,max_x))
allcellsl5posSpline= np.zeros((max_y,max_x))
allcellsl5neg = np.zeros((max_y,max_x))
allcellsl5negT= np.zeros((max_y,max_x))
allcellsl5negSpline= np.zeros((max_y,max_x))
BothSplines = np.zeros((max_y,max_x))
BothSplinesRed = np.zeros((max_y,max_x))
allcellsl5FH= np.zeros((max_y,max_x))
allcellsl5RH= np.zeros((max_y,max_x))
allcellsl5FHSpl= np.zeros((max_y,max_x))
allcellsl5RHSpl= np.zeros((max_y,max_x))
BothSplines2 = np.zeros((max_y,max_x))
allcellsl6pos = np.zeros((max_y,max_x))
allcellsl6posT= np.zeros((max_y,max_x))
allcellsl6posSpline= np.zeros((max_y,max_x))
allcellsl6neg = np.zeros((max_y,max_x))
allcellsl6negT= np.zeros((max_y,max_x))
allcellsl6negSpline= np.zeros((max_y,max_x))
allcellsl6FH= np.zeros((max_y,max_x))
allcellsl6RH= np.zeros((max_y,max_x))
allcellsl6FHSpl= np.zeros((max_y,max_x))
allcellsl6RHSpl= np.zeros((max_y,max_x))
allcellsl7pos = np.zeros((max_y,max_x))
allcellsl7posT= np.zeros((max_y,max_x))
allcellsl7posSpline= np.zeros((max_y,max_x))
allcellsl7neg = np.zeros((max_y,max_x))
allcellsl7negT= np.zeros((max_y,max_x))
allcellsl7negSpline= np.zeros((max_y,max_x))
allcellsl7FH= np.zeros((max_y,max_x))
allcellsl7RH= np.zeros((max_y,max_x))
allcellsl7FHSpl= np.zeros((max_y,max_x))
allcellsl7RHSpl= np.zeros((max_y,max_x))
BothSplines7 = np.zeros((max_y,max_x))
BothSplinesRed7 = np.zeros((max_y,max_x))
allcellsl8pos = np.zeros((max_y,max_x))
allcellsl8posT= np.zeros((max_y,max_x))
allcellsl8posSpline= np.zeros((max_y,max_x))
allcellsl8neg = np.zeros((max_y,max_x))
allcellsl8negT= np.zeros((max_y,max_x))
allcellsl8negSpline= np.zeros((max_y,max_x))
allcellsl8FH= np.zeros((max_y,max_x))
allcellsl8RH= np.zeros((max_y,max_x))
allcellsl8FHSpl= np.zeros((max_y,max_x))
allcellsl8RHSpl= np.zeros((max_y,max_x))
BothSplines8 = np.zeros((max_y,max_x))
BothSplinesRed8 = np.zeros((max_y,max_x))
allcellsl9pos = np.zeros((max_y,max_x))
allcellsl9posT= np.zeros((max_y,max_x))
allcellsl9posSpline= np.zeros((max_y,max_x))
allcellsl9neg = np.zeros((max_y,max_x))
allcellsl9negT= np.zeros((max_y,max_x))
allcellsl9negSpline= np.zeros((max_y,max_x))
allcellsl9FH= np.zeros((max_y,max_x))
allcellsl9RH= np.zeros((max_y,max_x))
allcellsl9FHSpl= np.zeros((max_y,max_x))
allcellsl9RHSpl= np.zeros((max_y,max_x))
BothSplines9 = np.zeros((max_y,max_x))
BothSplinesRed9 = np.zeros((max_y,max_x))
allcellsl10pos = np.zeros((max_y,max_x))
allcellsl10posT= np.zeros((max_y,max_x))
allcellsl10posSpline= np.zeros((max_y,max_x))
allcellsl10neg = np.zeros((max_y,max_x))
allcellsl10negT= np.zeros((max_y,max_x))
allcellsl10negSpline= np.zeros((max_y,max_x))
allcellsl10FH= np.zeros((max_y,max_x))
allcellsl10RH= np.zeros((max_y,max_x))
allcellsl10FHSpl= np.zeros((max_y,max_x))
allcellsl10RHSpl= np.zeros((max_y,max_x))
BothSplines10 = np.zeros((max_y,max_x))
BothSplinesRed10 = np.zeros((max_y,max_x))
BothFin = np.zeros((max_y,max_x))
allcellsl11pos = np.zeros((max_y,max_x))
allcellsl11posT= np.zeros((max_y,max_x))
allcellsl11posSpline= np.zeros((max_y,max_x))
allcellsl11neg = np.zeros((max_y,max_x))
allcellsl11negT= np.zeros((max_y,max_x))
allcellsl11negSpline= np.zeros((max_y,max_x))
allcellsl11FH= np.zeros((max_y,max_x))
allcellsl11RH= np.zeros((max_y,max_x))
allcellsl11FHSpl= np.zeros((max_y,max_x))
allcellsl11RHSpl= np.zeros((max_y,max_x))
allcellsl12pos = np.zeros((max_y,max_x))
allcellsl12posT= np.zeros((max_y,max_x))
allcellsl12posSpline= np.zeros((max_y,max_x))
allcellsl12neg = np.zeros((max_y,max_x))
allcellsl12negT= np.zeros((max_y,max_x))
allcellsl12negSpline= np.zeros((max_y,max_x))
allcellsl12FH= np.zeros((max_y,max_x))
allcellsl12RH= np.zeros((max_y,max_x))
allcellsl12FHSpl= np.zeros((max_y,max_x))
allcellsl12RHSpl= np.zeros((max_y,max_x))
allcellsPosl1Spline = np.zeros((max_y,max_x))
allcellsPosl2Spline = np.zeros((max_y,max_x))
allcellsPosl3Spline = np.zeros((max_y,max_x))
allcellsPosl4Spline = np.zeros((max_y,max_x))
allcellsPosl5Spline = np.zeros((max_y,max_x))
allcellsPosl6Spline = np.zeros((max_y,max_x))
allcellsPosl7Spline = np.zeros((max_y,max_x))
allcellsPosl8Spline = np.zeros((max_y,max_x))
allcellsPosl9Spline = np.zeros((max_y,max_x))
allcellsPosl10Spline = np.zeros((max_y,max_x))
allcellsPosl11Spline = np.zeros((max_y,max_x))
allcellsNormalizedl1 = np.zeros((max_y,max_x))
allcellsNormalizedl2 = np.zeros((max_y,max_x))
allcellsNormalizedl3 = np.zeros((max_y,max_x))
allcellsNormalizedl4 = np.zeros((max_y,max_x))
allcellsNormalizedl5 = np.zeros((max_y,max_x))
allcellsNormalizedl6 = np.zeros((max_y,max_x))
allcellsNormalizedl7 = np.zeros((max_y,max_x))
allcellsNormalizedl8 = np.zeros((max_y,max_x))
allcellsNormalizedl9 = np.zeros((max_y,max_x))
allcellsNormalizedl10 = np.zeros((max_y,max_x))
allcellsNormalizedl11 = np.zeros((max_y,max_x))
for fi in files:
#print "new round"
pfile = open(parameterfileIn +"/" + fi,'r')
imfile = open(imagefileIn+ "/" +fi,'r')
#print "pfile: ", pfile
#print "imfile: ", imfile
#pdb.set_trace()
All_Ellipses=[]
ellipseDict = {}
count = 0
icount = 0
pfile.readline()
imfile.readline()
sline = imfile.readline()
arrafilledboth = np.zeros((max_y,max_x))
allcellsPosNormed = np.zeros((max_y,max_x))
allcellsPosSpline = np.zeros((max_y,max_x))
allcellsSplBothPos = np.zeros((max_y,max_x))
allcellsSplinenoch = np.zeros((max_y,max_x))
allcellsNormed = np.zeros((max_y,max_x))
allcellsReduced = np.zeros((max_y,max_x))
allarraydoubleReduced = np.zeros((max_y,max_x))
for line in pfile:
if line.startswith('[') or line.startswith('0.') or line.startswith('1.') or line.startswith(' '):
continue
#print "line: ", line
#print "sline1: ",sline
fline = line.split("\t")
ratio = float(fline[7])
a = float(fline[1])
b = float(fline[2])
a2 = float(fline[3])
b2 = float(fline[4])
area = float(fline[5])
perimeter = float(fline[6])
length = float(fline[8])
x0 = float(fline[9])
y0 = float(fline[10])
x02 = float(fline[11])
y02 = float(fline[12])
theta1 = float(fline[13])
theta2 = float(fline[14])
#print area
if area<500:
#print "imfile: ", imfile, "pfile: ", pfile
imfile.readline()
sline = imfile.readline()
continue
#print "not continued"
#print icount
aMikro = a*0.13
bMikro = b*0.13
a2Mikro = a2*0.13
b2Mikro = b2*0.13
areaMikro = area * 0.13
perimeterMikro = perimeter * 0.13
lengthMikro = length * 0.13
x0Mikro = x0 * 0.13
y0Mikro = y0 * 0.13
if minlength < length <= l1:
#print "l1", l1, pfile, fline[0]
a_averagelistl1.append(a)
a2_averagelistl1.append(a2)
b_averagelistl1.append(b)
b2_averagelistl1.append(b2)
length_averagelistl1.append(length)
area_averagelistl1.append(area)
perimeter_averagelistl1.append(perimeter)
aMikro_averagelistl1.append(aMikro)
bMikro_averagelistl1.append(bMikro)
a2Mikro_averagelistl1.append(a2Mikro)
b2Mikro_averagelistl1.append(b2Mikro)
areaMikro_averagelistl1.append(areaMikro)
perimeterMikro_averagelistl1.append(perimeterMikro)
lengthMikro_averagelistl1.append(lengthMikro)
x0_averagelistl1.append(x0)
y0_averagelistl1.append(y0)
x0Mikro_averagelistl1.append(x0Mikro)
y0Mikro_averagelistl1.append(y0Mikro)
elif l1 < length <= l2:
a_averagelistl2.append(a)
a2_averagelistl2.append(a2)
b_averagelistl2.append(b)
b2_averagelistl2.append(b2)
length_averagelistl2.append(length)
area_averagelistl2.append(area)
perimeter_averagelistl2.append(perimeter)
aMikro_averagelistl2.append(aMikro)
bMikro_averagelistl2.append(bMikro)
a2Mikro_averagelistl2.append(a2Mikro)
b2Mikro_averagelistl2.append(b2Mikro)
areaMikro_averagelistl2.append(areaMikro)
perimeterMikro_averagelistl2.append(perimeterMikro)
lengthMikro_averagelistl2.append(lengthMikro)
x0_averagelistl2.append(x0)
y0_averagelistl2.append(y0)
x0Mikro_averagelistl2.append(x0Mikro)
y0Mikro_averagelistl2.append(y0Mikro)
elif l2 < length <= l3:
a_averagelistl3.append(a)
a2_averagelistl3.append(a2)
b_averagelistl3.append(b)
b2_averagelistl3.append(b2)
length_averagelistl3.append(length)
area_averagelistl3.append(area)
perimeter_averagelistl3.append(perimeter)
aMikro_averagelistl3.append(aMikro)
bMikro_averagelistl3.append(bMikro)
a2Mikro_averagelistl3.append(a2Mikro)
b2Mikro_averagelistl3.append(b2Mikro)
areaMikro_averagelistl3.append(areaMikro)
perimeterMikro_averagelistl3.append(perimeterMikro)
lengthMikro_averagelistl3.append(lengthMikro)
x0_averagelistl3.append(x0)
y0_averagelistl3.append(y0)
x0Mikro_averagelistl3.append(x0Mikro)
y0Mikro_averagelistl3.append(y0Mikro)
elif l3 < length <= l4:
a_averagelistl4.append(a)
a2_averagelistl4.append(a2)
b_averagelistl4.append(b)
b2_averagelistl4.append(b2)
length_averagelistl4.append(length)
area_averagelistl4.append(area)
perimeter_averagelistl4.append(perimeter)
aMikro_averagelistl4.append(aMikro)
bMikro_averagelistl4.append(bMikro)
a2Mikro_averagelistl4.append(a2Mikro)
b2Mikro_averagelistl4.append(b2Mikro)
areaMikro_averagelistl4.append(areaMikro)
perimeterMikro_averagelistl4.append(perimeterMikro)
lengthMikro_averagelistl4.append(lengthMikro)
x0_averagelistl4.append(x0)
y0_averagelistl4.append(y0)
x0Mikro_averagelistl4.append(x0Mikro)
y0Mikro_averagelistl4.append(y0Mikro)
elif l4 < length <= l5:
a_averagelistl5.append(a)
a2_averagelistl5.append(a2)
b_averagelistl5.append(b)
b2_averagelistl5.append(b2)
length_averagelistl5.append(length)
area_averagelistl5.append(area)
perimeter_averagelistl5.append(perimeter)
aMikro_averagelistl5.append(aMikro)
bMikro_averagelistl5.append(bMikro)
a2Mikro_averagelistl5.append(a2Mikro)
b2Mikro_averagelistl5.append(b2Mikro)
areaMikro_averagelistl5.append(areaMikro)
perimeterMikro_averagelistl5.append(perimeterMikro)
lengthMikro_averagelistl5.append(lengthMikro)
x0_averagelistl5.append(x0)
y0_averagelistl5.append(y0)
x0Mikro_averagelistl5.append(x0Mikro)
y0Mikro_averagelistl5.append(y0Mikro)
elif l5 < length <= l6:
a_averagelistl6.append(a)
a2_averagelistl6.append(a2)
b_averagelistl6.append(b)
b2_averagelistl6.append(b2)
length_averagelistl6.append(length)
area_averagelistl6.append(area)
perimeter_averagelistl6.append(perimeter)
aMikro_averagelistl6.append(aMikro)
bMikro_averagelistl6.append(bMikro)
a2Mikro_averagelistl6.append(a2Mikro)
b2Mikro_averagelistl6.append(b2Mikro)
areaMikro_averagelistl6.append(areaMikro)
perimeterMikro_averagelistl6.append(perimeterMikro)
lengthMikro_averagelistl6.append(lengthMikro)
x0_averagelistl6.append(x0)
y0_averagelistl6.append(y0)
x0Mikro_averagelistl6.append(x0Mikro)
y0Mikro_averagelistl6.append(y0Mikro)
elif l6 < length <= l7:
a_averagelistl7.append(a)
a2_averagelistl7.append(a2)
b_averagelistl7.append(b)
b2_averagelistl7.append(b2)
length_averagelistl7.append(length)
area_averagelistl7.append(area)
perimeter_averagelistl7.append(perimeter)
aMikro_averagelistl7.append(aMikro)
bMikro_averagelistl7.append(bMikro)
a2Mikro_averagelistl7.append(a2Mikro)
b2Mikro_averagelistl7.append(b2Mikro)
areaMikro_averagelistl7.append(areaMikro)
perimeterMikro_averagelistl7.append(perimeterMikro)
lengthMikro_averagelistl7.append(lengthMikro)
x0_averagelistl7.append(x0)
y0_averagelistl7.append(y0)
x0Mikro_averagelistl7.append(x0Mikro)
y0Mikro_averagelistl7.append(y0Mikro)
elif l7 < length <= l8:
a_averagelistl8.append(a)
a2_averagelistl8.append(a2)
b_averagelistl8.append(b)
b2_averagelistl8.append(b2)
length_averagelistl8.append(length)
area_averagelistl8.append(area)
perimeter_averagelistl8.append(perimeter)
aMikro_averagelistl8.append(aMikro)
bMikro_averagelistl8.append(bMikro)
a2Mikro_averagelistl8.append(a2Mikro)
b2Mikro_averagelistl8.append(b2Mikro)
areaMikro_averagelistl8.append(areaMikro)
perimeterMikro_averagelistl8.append(perimeterMikro)
lengthMikro_averagelistl8.append(lengthMikro)
x0_averagelistl8.append(x0)
y0_averagelistl8.append(y0)
x0Mikro_averagelistl8.append(x0Mikro)
y0Mikro_averagelistl8.append(y0Mikro)
elif l8 < length <= l9:
a_averagelistl9.append(a)
a2_averagelistl9.append(a2)
b_averagelistl9.append(b)
b2_averagelistl9.append(b2)
length_averagelistl9.append(length)
area_averagelistl9.append(area)
perimeter_averagelistl9.append(perimeter)
aMikro_averagelistl9.append(aMikro)
bMikro_averagelistl9.append(bMikro)
a2Mikro_averagelistl9.append(a2Mikro)
b2Mikro_averagelistl9.append(b2Mikro)
areaMikro_averagelistl9.append(areaMikro)
perimeterMikro_averagelistl9.append(perimeterMikro)
lengthMikro_averagelistl9.append(lengthMikro)
x0_averagelistl9.append(x0)
y0_averagelistl9.append(y0)
x0Mikro_averagelistl9.append(x0Mikro)
y0Mikro_averagelistl9.append(y0Mikro)
elif l9 < length <= l10:
a_averagelistl10.append(a)
a2_averagelistl10.append(a2)
b_averagelistl10.append(b)
b2_averagelistl10.append(b2)
length_averagelistl10.append(length)
area_averagelistl10.append(area)
perimeter_averagelistl10.append(perimeter)
aMikro_averagelistl10.append(aMikro)
bMikro_averagelistl10.append(bMikro)
a2Mikro_averagelistl10.append(a2Mikro)
b2Mikro_averagelistl10.append(b2Mikro)
areaMikro_averagelistl10.append(areaMikro)
perimeterMikro_averagelistl10.append(perimeterMikro)
lengthMikro_averagelistl10.append(lengthMikro)
x0_averagelistl10.append(x0)
y0_averagelistl10.append(y0)
x0Mikro_averagelistl10.append(x0Mikro)
y0Mikro_averagelistl10.append(y0Mikro)
elif l10 < length <= l11:
a_averagelistl11.append(a)
a2_averagelistl11.append(a2)
b_averagelistl11.append(b)
b2_averagelistl11.append(b2)
length_averagelistl11.append(length)
area_averagelistl11.append(area)
perimeter_averagelistl11.append(perimeter)
aMikro_averagelistl11.append(aMikro)
bMikro_averagelistl11.append(bMikro)
a2Mikro_averagelistl11.append(a2Mikro)
b2Mikro_averagelistl11.append(b2Mikro)
areaMikro_averagelistl11.append(areaMikro)
perimeterMikro_averagelistl11.append(perimeterMikro)
lengthMikro_averagelistl11.append(lengthMikro)
x0_averagelistl11.append(x0)
y0_averagelistl11.append(y0)
x0Mikro_averagelistl11.append(x0Mikro)
y0Mikro_averagelistl11.append(y0Mikro)
elif l11 < length <= l12:
a_averagelistl12.append(a)
a2_averagelistl12.append(a2)
b_averagelistl12.append(b)
b2_averagelistl12.append(b2)
length_averagelistl12.append(length)
area_averagelistl12.append(area)
perimeter_averagelistl12.append(perimeter)
aMikro_averagelistl12.append(aMikro)
bMikro_averagelistl12.append(bMikro)
a2Mikro_averagelistl12.append(a2Mikro)
b2Mikro_averagelistl12.append(b2Mikro)
areaMikro_averagelistl12.append(areaMikro)
perimeterMikro_averagelistl12.append(perimeterMikro)
lengthMikro_averagelistl12.append(lengthMikro)
x0_averagelistl12.append(x0)
y0_averagelistl12.append(y0)
x0Mikro_averagelistl12.append(x0Mikro)
y0Mikro_averagelistl12.append(y0Mikro)
#x0_c = max_x/3
x0_c = max_x/2
if x02>x0:
x02_c = max_x/2+(x02-x0)
else:
x02_c = max_x/2-(x0-x02)
#y0_c = max_y/3
y0_c = max_y/2
if y02>y0:
y02_c = max_y/2+(y02-y0)
else:
y02_c = max_y/2-(y0-y02)
el1=makeEllipse1(x0_c,y0_c,a,b,theta1)
el2=makeEllipse1(x02_c,y02_c,a2,b2,theta2)
Ar1 = makeArray(el1,max_x,max_y)
Ar2 = makeArray(el2,max_x,max_y)
both = Ar1+Ar2
#MakeImage(both, "")
#ShowImage()
#line through ellipse centers
###############################
xli = np.linspace(0,max_x-1,max_x)
yli = np.linspace(y0_c,y0_c, max_x)
x_ar=np.array([xli])
y_ar=np.array([yli])
horizontal_line = np.append(x_ar,y_ar, axis = 0)
if x02_c == x0_c:
#print "len5if"
xline = np.linspace(0,max_x/3-1,max_x/3)
yline = np.linspace(y0_c,y0_c, max_x/3)
x_array=np.array([xline])
y_array=np.array([yline])
array_line = np.append(x_array,y_array, axis = 0)
else:
m = (y02_c-y0_c)/(x02_c-x0_c)
xline=np.linspace(0,max_x/3-1,max_x/3)
yline = np.around(m*(xline-x0_c)+y0_c)
for yl in range(len(yline)):
if yline[yl] >= max_y:
yline[yl]=max_y-1
elif yline[yl] < 0:
yline[yl] = 0
x_array=np.array([xline])
y_array=np.array([yline])
array_line = np.append(x_array,y_array, axis = 0)
ua = makeArray(horizontal_line,max_x,max_y)
va = makeArray(array_line,max_x,max_y)
both = Ar1+Ar2+ua+va
u = (horizontal_line[0][5]-horizontal_line[0][0],horizontal_line[1][5]-horizontal_line[1][0])
#v = (array_line[0][5]-array_line[0][0],array_line[1][5]-array_line[1][0])
v = (x02_c-x0_c,y02_c-y0_c)
theta = calculate(u,v)
oldtheta = theta
if x02>x0 and y02>y0:
theta =-theta
elif x02<x0 and y02>y0:
theta=-theta
#print "new angle: ", theta
#print "old theta: ", oldtheta
eltheta1 =theta+theta1
eltheta2 = theta+theta2
l = np.sqrt((y02_c-y0_c)**2+(x02_c-x0_c)**2)
El1 = makeEllipse1(x0_c,y0_c,a,b,eltheta1)
El2 = makeEllipse1(x0_c+l,y0_c,a2,b2,eltheta2)
arra1=makeArray(El1,max_x,max_y)
arra2 = makeArray(El2,max_x,max_y)
barray = arra1+arra2
arra1filled = ndimage.binary_fill_holes(arra1).astype(int)
arra2filled = ndimage.binary_fill_holes(arra2).astype(int)
arrafilledboth = arra1filled + arra2filled
for h in range(np.size(arrafilledboth,0)):
for u in range(np.size(arrafilledboth,1)):
if arrafilledboth[h][u] > 1:
barray[h][u] = 0
if minlength < length <= l1:
allarrayl1 = allarrayl1 + barray
allarrayreducedl1 = allarrayreducedl1 + barray
elif l1 < length <= l2:
allarrayl2 = allarrayl2 + barray
allarrayreducedl2 = allarrayreducedl2 + barray
elif l2 < length <= l3:
allarrayl3 = allarrayl3 + barray
allarrayreducedl3 = allarrayreducedl3 + barray
elif l3 < length <= l4:
allarrayl4 = allarrayl4 + barray
allarrayreducedl4 = allarrayreducedl4 + barray
elif l4 < length <= l5:
allarrayl5 = allarrayl5 + barray
allarrayreducedl5 = allarrayreducedl5 + barray
elif l5 < length <= l6:
allarrayl6 = allarrayl6 + barray
allarrayreducedl6 = allarrayreducedl6 + barray
#MakeImagetrMicro(allarrayl6, '', x0_c, y0_c)
#plt.show()
elif l6 < length <= l7:
allarrayl7 = allarrayl7 + barray
allarrayreducedl7 = allarrayreducedl7 + barray
elif l7 < length <= l8:
allarrayl8 = allarrayl8 + barray
allarrayreducedl8 = allarrayreducedl8 + barray
elif l8 < length <= l9:
allarrayl9 = allarrayl9 + barray
allarrayreducedl9 = allarrayreducedl9 + barray
elif l9 < length <= l10:
allarrayl10 = allarrayl10 + barray
allarrayreducedl10 = allarrayreducedl10 + barray
#MakeImagetr(allarrayl10,'',x0_c,y0_c)
#plt.show()
#MakeImagetr(allarrayl10,'',x0_c,y0_c)
#plt.show()
elif l10 < length <= l11:
allarrayl11 = allarrayl11 + barray
allarrayreducedl11 = allarrayreducedl11 + barray
elif l11 < length <= l12:
allarrayl12 = allarrayl12 + barray
allarrayreducedl12 = allarrayreducedl12 + barray
A = []
matrix =sline
tuple_rx = re.compile("\(\s*(\d+),\s*(\d+)\)")
for match in tuple_rx.finditer(matrix):
A.append((int(match.group(1)),int(match.group(2))))
c = []
d = []
for i in A:
c.append(i[0])
d.append(i[1])
A=(c,d)
#print "len: ", len(A[0])
#print A
if len(A[0]) == 0:
#print "EMPTY"
continue
#print "NOT EMPTY"
#print "sline: ", sline
#print"c,d: ", c, d
#A=sorted(A)
A=np.array(A)
imAr = makeArray(A,max_x,max_y)
#MakeImage(imAr, "")
#ShowImage()
#a=40
#b=10
#a2 = 20
#b2 = 5
#x0=40
#y0=10
#x02=70
#y02=10
el1=makeEllipse1(x0,y0,a,b,0)
el2=makeEllipse1(x02,y02,a2,b2,0)
#pl.axis([-100,100,-100,100])
#pl.plot(A[0,:],A[1,:],'ro')
#pl.show()
for i in range(np.size(A[0])): #damit um mittelpunkt rotiert wird
A[0][i] = A[0][i]-x0
A[1][i] = A[1][i]-y0
for i in range(np.size(el1[0])):
el1[0][i] = el1[0][i]-x0
el1[1][i] = el1[1][i]-y0
el2[0][i] = el2[0][i]-x0
el2[1][i] = el2[1][i]-y0
#pl.axis([-100,100,-100,100])
#pl.plot(el1[0,:],el1[1,:])
#pl.plot(el2[0,:],el2[1,:])
#pl.show()
#pl.axis([-100,100,-100,100])
#pl.plot(A[0,:],A[1,:])
#pl.show()
midp = [[x0_c],[y0_c]]
midp = np.array(midp)
#pl.axis([-100,100,-100,100])
#pl.plot(A[0,:],A[1,:],el1[0,:],el1[1,:],midp[0,:],midp[1,:],'ro')
#pl.show()
RotEl = rotate(el1,theta)
RotEl2 = rotate(el2,theta)
RotA = rotate(A,theta)
RotAPos = rotate(A,theta)
RotAmaxx = np.amax(RotA[0])
RotAmaxy = np.amax(RotA[1])
###########################################hier!!!!!!!!#########################
RotAFH = rotate(A,theta)
RotASH = rotate(A,theta)
#print "xo: ", x0, "xo_c: ", x0_c, "y0: ", y0, "y0_c: ", y0_c
for i in range(np.size(el1[0])):
el1[0][i] = el1[0][i]+x0_c
el1[1][i] = el1[1][i]+y0_c
el2[0][i] = el2[0][i]+x0_c
el2[1][i] = el2[1][i]+y0_c
# RotASH = RotA
# print np.size(A[0])
# print np.size(RotA[0])
# print RotA
# print range(np.size(A[0]))
# print RotA[0][0]
for ir in range(np.size(A[0])):
# print a
if RotA[0][ir]>0:
RotAFH[0][ir] = RotA[0][ir]
RotAFH[1][ir] = RotA[1][ir]
RotASH[0][ir] = 0
RotASH[1][ir] = 0
else:
RotASH[0][ir] = RotA[0][ir]
RotASH[1][ir] = RotA[1][ir]
RotAFH[0][ir] = 0
RotAFH[1][ir] = 0
#pl.axis([-100,100,-100,100])
#pl.plot(RotA[0,:],RotA[1,:])
#pl.show()
#pl.axis([-100,100,-100,100])
#pl.plot(RotAFH[0,:],RotAFH[1,:])
#pl.show()
#pl.axis([-100,100,-100,100])
#pl.plot(RotA[0,:],RotA[1,:],midp[0,:],midp[1,:],'ro')
#pl.show()
zarray = makeArray(RotA,max_x,max_y)
#MakeImage(zarray, '')
#plt.show()
for i in range(np.size(A[0])):
if RotA[1][i] > 0:
RotAPos[1][i] = -RotAPos[1][i]
RotA[0][i] = RotA[0][i]+x0_c
RotA[1][i] = RotA[1][i]+y0_c
RotAPos[0][i] = RotAPos[0][i]+x0_c
RotAPos[1][i] = RotAPos[1][i]+y0_c
RotAFH[0][i] = RotAFH[0][i]+x0_c
RotAFH[1][i] = RotAFH[1][i] + y0_c
RotASH[0][i] = RotASH[0][i]+x0_c
RotASH[1][i] = RotASH[1][i] + y0_c
RotAminy = np.amin(RotA[1])
RotAminx = np.amin(RotA[0])
RotAmaxx = np.amax(RotA[1])
RotAmaxy = np.amax(RotA[0])
#print "area: ", area, "icount: ", icount
#print "maxx: ", RotAmaxx, "maxy: ", RotAmaxy
if RotAminx < 0:
#print "RotAminx kleiner 0"
RotA[0] = RotA[0]-RotAminx
RotAPos[0] = RotAPos[0]-RotAminx
if RotAminy < 0:
#print "RotAminy kleiner 0"
RotA[1] = RotA[1]-RotAminy
RotAPos[1] = RotAPos[1]-RotAminy
ellipse1Array = makeArray(el1, max_x, max_y)
#print "RotA: ", RotA, length,area
imageArray = makeArray(RotA,max_x,max_y)
imageArrayPos = makeArray(RotAPos,max_x,max_y)
imageArrayFH = makeArray(RotAFH,max_x,max_y)
imageArraySH = makeArray(RotASH,max_x,max_y)
#print x0,y0,x0_c,y0_c
MidArray = makeArray(midp,max_x,max_y)
#pl.axis([-100,100,-100,100])
#pl.plot(RotA[0,:],RotA[1,:],RotEl[0,:],RotEl[1,:],midp[0,:],midp[1,:],'ro')
#pl.show()
#MakeImage(MidArray,'')
#plt.show()
#MakeImage(imageArray,'')
#plt.show()
#MakeImage2(imageArray,MidArray)
#plt.show()
if minlength < length <= l1:
#print "l1: ",length
allcellsl1 = allcellsl1 + imageArray
allcellsPosl1 = allcellsPosl1 + imageArrayPos
allcellsFHl1 = allcellsFHl1 + imageArrayFH
allcellsSHl1 = allcellsSHl1 + imageArraySH
#MakeImage(imageArray, "test")
#plt.show()
elif l1 < length <= l2:
allcellsl2 = allcellsl2 + imageArray
allcellsPosl2 = allcellsPosl2 + imageArrayPos
allcellsFHl2 = allcellsFHl2 + imageArrayFH
allcellsSHl2 = allcellsSHl2 + imageArraySH
elif l2 < length <= l3:
allcellsl3 = allcellsl3 + imageArray
allcellsPosl3 = allcellsPosl3 + imageArrayPos
allcellsFHl3 = allcellsFHl3 + imageArrayFH
allcellsSHl3 = allcellsSHl3 + imageArraySH
elif l3 < length <= l4:
allcellsl4 = allcellsl4 + imageArray
allcellsPosl4 = allcellsPosl4 + imageArrayPos
allcellsFHl4 = allcellsFHl4 + imageArrayFH
allcellsSHl4 = allcellsSHl4 + imageArraySH
elif l4 < length <= l5:
allcellsl5 = allcellsl5 + imageArray
allcellsPosl5 = allcellsPosl5 + imageArrayPos
allcellsFHl5 = allcellsFHl5 + imageArrayFH
allcellsSHl5 = allcellsSHl5 + imageArraySH
elif l5 < length <= l6:
allcellsl6 = allcellsl6 + imageArray
allcellsPosl6 = allcellsPosl6 + imageArrayPos
allcellsFHl6 = allcellsFHl6 + imageArrayFH
allcellsSHl6 = allcellsSHl6 + imageArraySH
elif l6 < length <= l7:
allcellsl7 = allcellsl7 + imageArray
allcellsPosl7 = allcellsPosl7 + imageArrayPos
allcellsFHl7 = allcellsFHl7 + imageArrayFH
allcellsSHl7 = allcellsSHl7 + imageArraySH
elif l7 < length <= l8:
allcellsl8 = allcellsl8 + imageArray
allcellsPosl8 = allcellsPosl8 + imageArrayPos
allcellsFHl8 = allcellsFHl8 + imageArrayFH
allcellsSHl8 = allcellsSHl8 + imageArraySH
elif l8 < length <= l9:
allcellsl9 = allcellsl9 + imageArray
allcellsPosl9 = allcellsPosl9 + imageArrayPos
allcellsFHl9 = allcellsFHl9 + imageArrayFH
allcellsSHl9 = allcellsSHl9 + imageArraySH
#MakeImage(imageArray,'')
#plt.show()
#MakeImage2(imageArray,MidArray,ellipse1Array)
#plt.show()
#MakeImagetr(allcellsl9, MidArray,'', x0_c, y0_c)
#plt.show()
elif l9 < length <= l10:
allcellsl10 = allcellsl10 + imageArray
allcellsPosl10 = allcellsPosl10 + imageArrayPos
allcellsFHl10 = allcellsFHl10 + imageArrayFH
allcellsSHl10 = allcellsSHl10 + imageArraySH
elif l10 < length <= l11:
allcellsl11 = allcellsl11 + imageArray
allcellsPosl11 = allcellsPosl11 + imageArrayPos
allcellsFHl11 = allcellsFHl11 + imageArrayFH
allcellsSHl11 = allcellsSHl11 + imageArraySH
elif l11 < length <= l12:
allcellsl12 = allcellsl12 + imageArray
allcellsPosl12 = allcellsPosl12 + imageArrayPos
allcellsFHl12 = allcellsFHl12 + imageArrayFH
allcellsSHl12 = allcellsSHl12 + imageArraySH
icount += 1
imfile.readline()
sline = imfile.readline()
x0av1 = np.mean(x0_averagelistl1)
y0av1 = np.mean(y0_averagelistl1)
x0av2 = np.mean(x0_averagelistl2)
y0av2 = np.mean(y0_averagelistl2)
x0av3 = np.mean(x0_averagelistl3)
y0av3 = np.mean(y0_averagelistl3)
x0av4 = np.mean(x0_averagelistl4)
y0av4 = np.mean(y0_averagelistl4)
x0av5 = np.mean(x0_averagelistl5)
y0av5 = np.mean(y0_averagelistl5)
x0av6 = np.mean(x0_averagelistl6)
y0av6 = np.mean(y0_averagelistl6)
x0av7 = np.mean(x0_averagelistl7)
y0av7 = np.mean(y0_averagelistl7)
x0av8 = np.mean(x0_averagelistl8)
y0av8 = np.mean(y0_averagelistl8)
x0av9 = np.mean(x0_averagelistl9)
y0av9 = np.mean(y0_averagelistl9)
x0av10 = np.mean(x0_averagelistl10)
y0av10 = np.mean(y0_averagelistl10)
x0av11 = np.mean(x0_averagelistl11)
y0av11 = np.mean(y0_averagelistl11)
x0av12 = np.mean(x0_averagelistl12)
y0av12 = np.mean(y0_averagelistl12)
minlength = format(minlength,'.1f')
l1 = format(l1,'.1f')
l2 =format(l2,'.1f')
l3 = format(l3,'.1f')
l4 = format(l4,'.1f')
l5 =format(l5,'.1f')
l6 = format(l6,'.1f')
l7 =format(l7,'.1f')
l8 = format(l8,'.1f')
l9 = format(l9,'.1f')
l10 =format(l10,'.1f')
l11 = format(l11,'.1f')
l12 =format(l12,'.1f')
minlengthMicro = format(minlengthMicro,'.1f')
l1Micro = format(l1Micro,'.1f')
l2Micro =format(l2Micro,'.1f')
l3Micro = format(l3Micro,'.1f')
l4Micro = format(l4Micro,'.1f')
l5Micro =format(l5Micro,'.1f')
l6Micro = format(l6Micro,'.1f')
l7Micro =format(l7Micro,'.1f')
l8Micro = format(l8Micro,'.1f')
l9Micro = format(l9Micro,'.1f')
l10Micro =format(l10Micro,'.1f')
l11Micro = format(l11Micro,'.1f')
l12Micro =format(l12Micro,'.1f')
maxvl1 = np.amax(allcellsl1)
maxvl2 = np.amax(allcellsl2)
maxvl3 = np.amax(allcellsl3)
maxvl4 = np.amax(allcellsl4)
maxvl5 = np.amax(allcellsl5)
maxvl6 = np.amax(allcellsl6)
maxvl7 = np.amax(allcellsl7)
maxvl8 = np.amax(allcellsl8)
maxvl9 = np.amax(allcellsl9)
maxvl10 = np.amax(allcellsl10)
maxvl11 = np.amax(allcellsl11)
for row in range(np.size(allcellsl1,0)):
for color in range(np.size(allcellsl1,1)):
allcellsNormalizedl1[row][color] = allcellsl1[row][color]/maxvl1
for row in range(np.size(allcellsl2,0)):
for color in range(np.size(allcellsl2,1)):
allcellsNormalizedl2[row][color] = allcellsl2[row][color]/maxvl2
for row in range(np.size(allcellsl3,0)):
for color in range(np.size(allcellsl3,1)):
allcellsNormalizedl3[row][color] = allcellsl3[row][color]/maxvl3
for row in range(np.size(allcellsl4,0)):
for color in range(np.size(allcellsl4,1)):
allcellsNormalizedl4[row][color] = allcellsl4[row][color]/maxvl4
for row in range(np.size(allcellsl5,0)):
for color in range(np.size(allcellsl5,1)):
allcellsNormalizedl5[row][color] = allcellsl5[row][color]/maxvl5
for row in range(np.size(allcellsl6,0)):
for color in range(np.size(allcellsl6,1)):
allcellsNormalizedl6[row][color] = allcellsl6[row][color]/maxvl6
for row in range(np.size(allcellsl7,0)):
for color in range(np.size(allcellsl7,1)):
allcellsNormalizedl7[row][color] = allcellsl7[row][color]/maxvl7
for row in range(np.size(allcellsl8,0)):
for color in range(np.size(allcellsl8,1)):
allcellsNormalizedl8[row][color] = allcellsl8[row][color]/maxvl8
for row in range(np.size(allcellsl9,0)):
for color in range(np.size(allcellsl9,1)):
allcellsNormalizedl9[row][color] = allcellsl9[row][color]/maxvl9
for row in range(np.size(allcellsl10,0)):
for color in range(np.size(allcellsl10,1)):
allcellsNormalizedl10[row][color] = allcellsl10[row][color]/maxvl10
for row in range(np.size(allcellsl11,0)):
for color in range(np.size(allcellsl11,1)):
allcellsNormalizedl11[row][color] = allcellsl11[row][color]/maxvl11
MakeNormalizedImage(allcellsNormalizedl1, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(minlength) + " to " + str(l1))
MakeNormalizedImage(allcellsNormalizedl2, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l1Micro) + " to " + str(l2Micro))
MakeNormalizedImage(allcellsNormalizedl3, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l2Micro) + " to " + str(l3Micro))
MakeNormalizedImage(allcellsNormalizedl4, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l3Micro) + " to " + str(l4Micro))
MakeNormalizedImage(allcellsNormalizedl5, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l4Micro) + " to " + str(l5Micro))
MakeNormalizedImage(allcellsNormalizedl6, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l5Micro) + " to " + str(l6Micro))
MakeNormalizedImage(allcellsNormalizedl7, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l6Micro) + " to " + str(l7Micro))
MakeNormalizedImage(allcellsNormalizedl8, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l7Micro) + " to " + str(l8Micro))
MakeNormalizedImage(allcellsNormalizedl9, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l8Micro) + " to " + str(l9Micro))
MakeNormalizedImage(allcellsNormalizedl10, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l9Micro) + " to " + str(l10Micro))
MakeNormalizedImage(allcellsNormalizedl11, '',x0_c-1,y0_c-1)
ShowImage("/home/marie/Master/Thesis/Average_Images/NormalizedImages/LengthSeries/" + Strain + "/"+str(l10Micro) + " to " + str(l11Micro))
#####ab hier kommentiert
# Cellfile1Norm = open(MatlabfileNorm + "/" + str(minlength) + "_" + str(l1Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl1:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile1Norm.write(cell2+"\n")
# Cellfile2Norm = open(MatlabfileNorm + "/" + str(l1Micro) + "_" + str(l2Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl2:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile2Norm.write(cell2+"\n")
# Cellfile3Norm = open(MatlabfileNorm + "/" + str(l2Micro) + "_" + str(l3Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl3:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile3Norm.write(cell2+"\n")
# Cellfile4Norm = open(MatlabfileNorm + "/" + str(l3Micro) + "_" + str(l4Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl4:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile4Norm.write(cell2+"\n")
# Cellfile5Norm = open(MatlabfileNorm + "/" + str(l4Micro) + "_" + str(l5Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl5:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile5Norm.write(cell2+"\n")
# Cellfile6Norm = open(MatlabfileNorm + "/" + str(l5Micro) + "_" + str(l6Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl6:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile6Norm.write(cell2+"\n")
# Cellfile7Norm = open(MatlabfileNorm + "/" + str(l6Micro) + "_" + str(l7Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl7:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile7Norm.write(cell2+"\n")
# Cellfile8Norm = open(MatlabfileNorm + "/" + str(l7Micro) + "_" + str(l8Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl8:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile8Norm.write(cell2+"\n")
# Cellfile9Norm = open(MatlabfileNorm + "/" + str(l8Micro) + "_" + str(l9Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl9:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile9Norm.write(cell2+"\n")
# Cellfile10Norm = open(MatlabfileNorm + "/" + str(l9Micro) + "_" + str(l10Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl10:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile10Norm.write(cell2+"\n")
# Cellfile11Norm = open(MatlabfileNorm + "/" + str(l10Micro) + "_" + str(l11Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl11:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile11Norm.write(cell2+"\n")
# Cellfile12Norm = open(MatlabfileNorm + "/" + str(l11Micro) + "_" + str(l12Micro) + "_Cells.txt","w")
# for cell in allcellsNormalizedl12:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile12Norm.write(cell2+"\n")
###bis hier kommentiert
# MakeImagetr(allcellsl1, str(minlength) + " pixel to " + str(l1) + " pixel",x0_c-1,y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+str(minlength) + " to " + str(l1) + "_Cells")
# MakeImagetr(allcellsl2,str(l1) + " pixel to " + str(l2) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l1) + " to " + str(l2) + "_Cells")
# MakeImagetr(allcellsl3,str(l2) + " pixel to "+ str(l3) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l2) + " to " + str(l3) + "_Cells")
# MakeImagetr(allcellsl4,str(l3) + " pixel to " + str(l4) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l3) + " to " + str(l4) + "_Cells")
# MakeImagetr(allcellsl5,str(l4) + " pixel to " + str(l5) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l4) + " to " + str(l5) + "_Cells")
# MakeImagetr(allcellsl6,str(l5) + " pixel to " + str(l6) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l5) + " to " + str(l6)+ "_Cells")
# MakeImagetr(allcellsl7,str(l6) +" pixel to "+ str(l7) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l6) + " to " + str(l7) + "_Cells")
# MakeImagetr(allcellsl8,str(l7) +" pixel to " + str(l8) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l7) + " to " + str(l8) + "_Cells")
# MakeImagetr(allcellsl9,str(l8) + " pixel to " + str(l9) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+ str(l8) + " to " + str(l9)+ "_Cells")
# MakeImagetr(allcellsl10,str(l9) +" pixel to " + str(l10) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+ str(l9) + " to " + str(l10) + "_Cells")
# MakeImagetr(allcellsl11,str(l10) + " pixel to " + str(l11) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+str(l10) + " to " + str(l11)+ "_Cells")
# MakeImagetr(allcellsl12,str(l11) + " pixel to "+ str(l12) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l11) + " to " + str(l12) + "_Cells")
# MakeImagetrMicro(allcellsl1,str(minlengthMicro) + " $\mu$m to " + str(l1Micro)+ " $\mu$m",x0_c-1,y0_c-1)
# ShowImage(ImageLocOut +"Micrometer"+ "/" + "_Cells"+str(minlengthMicro) + "_" + str(l1Micro))
# MakeImagetrMicro(allcellsl2,str(l1Micro) + " $\mu$m to "+ str(l2Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" + "_Cells"+str(l1Micro) + "_" + str(l2Micro))
# MakeImagetrMicro(allcellsl3,str(l2Micro) + " $\mu$m to " + str(l3Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" + "_Cells"+str(l2Micro) + "_" + str(l3Micro))
# MakeImagetrMicro(allcellsl4,str(l3Micro) + " $\mu$m to " + str(l4Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" +"_Cells"+str(l3Micro) + "_" + str(l4Micro))
# MakeImagetrMicro(allcellsl5,str(l4Micro) + " $\mu$m to "+ str(l5Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells"+str(l4Micro) + "_" + str(l5Micro))
# MakeImagetrMicro(allcellsl6,str(l5Micro) + " $\mu$m to " + str(l6Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells"+str(l5Micro) + "_" + str(l6Micro))
# MakeImagetrMicro(allcellsl7,str(l6Micro) + " $\mu$m to " + str(l7Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" + "/" +"_Cells"+str(l6Micro) + "_" + str(l7Micro))
# MakeImagetrMicro(allcellsl8,str(l7Micro) + " $\mu$m to " + str(l8Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells"+str(l7Micro) + "_" + str(l8Micro))
# MakeImagetrMicro(allcellsl9,str(l8Micro) + " $\mu$m to " + str(l9Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" + "/" +"_Cells"+str(l8Micro) + "_" + str(l9Micro))
# MakeImagetrMicro(allcellsl10,str(l9Micro) + " $\mu$m to " + str(l10Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" +"/" + "_Cells"+str(l9Micro) + "_" + str(l10Micro))
# MakeImagetrMicro(allcellsl11,str(l10Micro) + " $\mu$m to " + str(l11Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" +"/" + "_Cells"+str(l10Micro) + "_" + str(l11Micro))
# MakeImagetrMicro(allcellsl12,str(l11Micro) + " $\mu$m to " + str(l12Micro)+ " $\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells"+str(l11Micro) + "_" + str(l12Micro))
# ###################gespiegelt
# MakeImagetr(allcellsPosl1, str(minlength) + " pixel to "+ str(l1) + " pixel",x0_c-1,y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+str(minlength) + " to " + str(l1) + "_PosCells")
# MakeImagetr(allcellsPosl2,str(l1) + " pixel to "+ str(l2) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l1) + " to " + str(l2) + "_PosCells")
# MakeImagetr(allcellsPosl3,str(l2) + " pixel to " + str(l3) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l2) + " to " + str(l3) + "_PosCells")
# MakeImagetr(allcellsPosl4,str(l3) + " pixel to "+ str(l4) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l3) + " to " + str(l4) + "_PosCells")
# MakeImagetr(allcellsPosl5,str(l4) +" pixel to "+ str(l5) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l4) + " to " + str(l5) + "_PosCells")
# MakeImagetr(allcellsPosl6,str(l5) + " pixel to "+ str(l6) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l5) + " to " + str(l6)+ "_PosCells")
# MakeImagetr(allcellsPosl7,str(l6) + " pixel to " + str(l7) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l6) + " to " + str(l7) + "_PosCells")
# MakeImagetr(allcellsPosl8,str(l7) + " pixel to " + str(l8) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l7) + " to " + str(l8) + "_PosCells")
# MakeImagetr(allcellsPosl9,str(l8) + " pixel to " + str(l9) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+ str(l8) + " to " + str(l9)+ "_PosCells")
# MakeImagetr(allcellsPosl10,str(l9) + " pixel to " + str(l10) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+ str(l9) + " to " + str(l10) + "_PosCells")
# MakeImagetr(allcellsPosl11, str(l10) +" pixel to " + str(l11) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Pixel"+ "/"+str(l10) + " to " + str(l11)+ "_PosCells")
# MakeImagetr(allcellsPosl12, str(l11) + " pixel to "+ str(l12) + " pixel",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Pixel"+ "/"+str(l11) + " to " + str(l12) + "_PosCells")
# MakeImagetrMicro(allcellsPosl1,str(minlengthMicro) + "$\mu$m to " + str(l1Micro)+ "$\mu$m",x0_c-1,y0_c-1)
# ShowImage(ImageLocOut +"Micrometer"+ "/" + "_PosCells"+str(minlengthMicro) + "_" + str(l1Micro))
# MakeImagetrMicro(allcellsPosl2,str(l1Micro) + "$\mu$m to "+ str(l2Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" + "_PosCells"+str(l1Micro) + "_" + str(l2Micro))
# MakeImagetrMicro(allcellsPosl3,str(l2Micro) +"$\mu$m to "+ str(l3Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" + "_PosCells"+str(l2Micro) + "_" + str(l3Micro))
# MakeImagetrMicro(allcellsPosl4,str(l3Micro) + "$\mu$m to " + str(l4Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer"+ "/" +"_PosCells"+str(l3Micro) + "_" + str(l4Micro))
# MakeImagetrMicro(allcellsPosl5,str(l4Micro) +"$\mu$m to " + str(l5Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_PosCells"+str(l4Micro) + "_" + str(l5Micro))
# MakeImagetrMicro(allcellsPosl6,str(l5Micro) + "$\mu$m to " + str(l6Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_PosCells"+str(l5Micro) + "_" + str(l6Micro))
# MakeImagetrMicro(allcellsPosl7,str(l6Micro) + "$\mu$m to " + str(l7Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" + "/" +"_PosCells"+str(l6Micro) + "_" + str(l7Micro))
# MakeImagetrMicro(allcellsPosl8, str(l7Micro) +"$\mu$m to " + str(l8Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_PosCells"+str(l7Micro) + "_" + str(l8Micro))
# MakeImagetrMicro(allcellsPosl9, str(l8Micro) +"$\mu$m to " + str(l9Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" + "/" +"_PosCells"+str(l8Micro) + "_" + str(l9Micro))
# MakeImagetrMicro(allcellsPosl10,str(l9Micro) +"$\mu$m to " + str(l10Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" +"/" + "_PosCells"+str(l9Micro) + "_" + str(l10Micro))
# MakeImagetrMicro(allcellsPosl11,str(l10Micro) + "$\mu$m to " + str(l11Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut +"Micrometer" +"/" + "_PosCells"+str(l10Micro) + "_" + str(l11Micro))
# MakeImagetrMicro(allcellsPosl12, str(l11Micro) + "$\mu$m to " + str(l12Micro)+ "$\mu$m",x0_c-1, y0_c-1)
# ShowImage(ImageLocOut + "Micrometer" +"/" + "_PosCells"+str(l11Micro) + "_" + str(l12Micro))
# Cellfile1 = open(Matlabfile + "/" + str(minlength) + "_" + str(l1Micro) + "_Cells.txt","w")
# for cell in allcellsl1:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile1.write(cell2+"\n")
# Cellfile2 = open(Matlabfile + "/" + str(l1Micro) + "_" + str(l2Micro) + "_Cells.txt","w")
# for cell in allcellsl2:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile2.write(cell2+"\n")
# Cellfile3 = open(Matlabfile + "/" + str(l2Micro) + "_" + str(l3Micro) + "_Cells.txt","w")
# for cell in allcellsl3:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile3.write(cell2+"\n")
# Cellfile4 = open(Matlabfile + "/" + str(l3Micro) + "_" + str(l4Micro) + "_Cells.txt","w")
# for cell in allcellsl4:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile4.write(cell2+"\n")
# Cellfile5 = open(Matlabfile + "/" + str(l4Micro) + "_" + str(l5Micro) + "_Cells.txt","w")
# for cell in allcellsl5:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile5.write(cell2+"\n")
# Cellfile6 = open(Matlabfile + "/" + str(l5Micro) + "_" + str(l6Micro) + "_Cells.txt","w")
# for cell in allcellsl6:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile6.write(cell2+"\n")
# Cellfile7 = open(Matlabfile + "/" + str(l6Micro) + "_" + str(l7Micro) + "_Cells.txt","w")
# for cell in allcellsl7:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile7.write(cell2+"\n")
# Cellfile8 = open(Matlabfile + "/" + str(l7Micro) + "_" + str(l8Micro) + "_Cells.txt","w")
# for cell in allcellsl8:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile8.write(cell2+"\n")
# Cellfile9 = open(Matlabfile + "/" + str(l8Micro) + "_" + str(l9Micro) + "_Cells.txt","w")
# for cell in allcellsl9:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile9.write(cell2+"\n")
# Cellfile10 = open(Matlabfile + "/" + str(l9Micro) + "_" + str(l10Micro) + "_Cells.txt","w")
# for cell in allcellsl10:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile10.write(cell2+"\n")
# Cellfile11 = open(Matlabfile + "/" + str(l10Micro) + "_" + str(l11Micro) + "_Cells.txt","w")
# for cell in allcellsl11:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile11.write(cell2+"\n")
# Cellfile12 = open(Matlabfile + "/" + str(l11Micro) + "_" + str(l12Micro) + "_Cells.txt","w")
# for cell in allcellsl12:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile12.write(cell2+"\n")
icountl1 = len(aMikro_averagelistl1)
print "len amikro", len(aMikro_averagelistl1), len(bMikro_averagelistl1), len(lengthMikro_averagelistl1)
icountl2 = len(aMikro_averagelistl2)
icountl3 = len(aMikro_averagelistl3)
icountl4 = len(aMikro_averagelistl4)
icountl5 = len(aMikro_averagelistl5)
icountl6 = len(aMikro_averagelistl6)
icountl7 = len(aMikro_averagelistl7)
icountl8 = len(aMikro_averagelistl8)
icountl9 = len(aMikro_averagelistl9)
icountl10 = len(aMikro_averagelistl10)
icountl11 = len(aMikro_averagelistl11)
icountl12 = len(aMikro_averagelistl12)
aMikro_averagel1 = np.mean(aMikro_averagelistl1)
a2Mikro_averagel1 = np.mean(a2Mikro_averagelistl1)
bMikro_averagel1 = np.mean(bMikro_averagelistl1)
b2Mikro_averagel1 = np.mean(b2Mikro_averagelistl1)
areaMikro_averagel1 = np.mean(areaMikro_averagelistl1)
perimeterMikro_averagel1 = np.mean(perimeterMikro_averagelistl1)
lengthMikro_averagel1 = np.mean(lengthMikro_averagelistl1)
aMikro_variancel1 = np.var(aMikro_averagelistl1)
a2Mikro_variancel1 = np.var(a2Mikro_averagelistl1)
bMikro_variancel1 = np.var(bMikro_averagelistl1)
b2Mikro_variancel1 = np.var(b2Mikro_averagelistl1)
areaMikro_variancel1 = np.var(areaMikro_averagelistl1)
perimeterMikro_variancel1 = np.var(perimeterMikro_averagelistl1)
lengthMikro_variancel1 = np.var(lengthMikro_averagelistl1)
aMikro_sigmal1 = np.sqrt(aMikro_variancel1)
a2Mikro_sigmal1 = np.sqrt(a2Mikro_variancel1)
bMikro_sigmal1 = np.sqrt(bMikro_variancel1)
b2Mikro_sigmal1 = np.sqrt(b2Mikro_variancel1)
areaMikro_sigmal1 = np.sqrt(areaMikro_variancel1)
perimeterMikro_sigmal1 = np.sqrt(perimeterMikro_variancel1)
lengthMikro_sigmal1 = np.sqrt(lengthMikro_variancel1)
aMikro_standardfehlerl1 = aMikro_sigmal1/np.sqrt(icountl1)
a2Mikro_standardfehlerl1 = a2Mikro_sigmal1/np.sqrt(icountl1)
bMikro_standardfehlerl1 = bMikro_sigmal1/np.sqrt(icountl1)
b2Mikro_standardfehlerl1 = b2Mikro_sigmal1/np.sqrt(icountl1)
areaMikro_standardfehlerl1 = areaMikro_sigmal1/np.sqrt(icountl1)
perimeterMikro_standardfehlerl1 = perimeterMikro_sigmal1/np.sqrt(icountl1)
lengthMikro_standardfehlerl1 = lengthMikro_sigmal1/np.sqrt(icountl1)
#######l2
aMikro_averagel2 = np.mean(aMikro_averagelistl2)
a2Mikro_averagel2 = np.mean(a2Mikro_averagelistl2)
bMikro_averagel2 = np.mean(bMikro_averagelistl2)
b2Mikro_averagel2 = np.mean(b2Mikro_averagelistl2)
areaMikro_averagel2 = np.mean(areaMikro_averagelistl2)
perimeterMikro_averagel2 = np.mean(perimeterMikro_averagelistl2)
lengthMikro_averagel2 = np.mean(lengthMikro_averagelistl2)
aMikro_variancel2 = np.var(aMikro_averagelistl2)
a2Mikro_variancel2 = np.var(a2Mikro_averagelistl2)
bMikro_variancel2 = np.var(bMikro_averagelistl2)
b2Mikro_variancel2 = np.var(b2Mikro_averagelistl2)
areaMikro_variancel2 = np.var(areaMikro_averagelistl2)
perimeterMikro_variancel2 = np.var(perimeterMikro_averagelistl2)
lengthMikro_variancel2 = np.var(lengthMikro_averagelistl2)
aMikro_sigmal2 = np.sqrt(aMikro_variancel2)
a2Mikro_sigmal2 = np.sqrt(a2Mikro_variancel2)
bMikro_sigmal2 = np.sqrt(bMikro_variancel2)
b2Mikro_sigmal2 = np.sqrt(b2Mikro_variancel2)
areaMikro_sigmal2 = np.sqrt(areaMikro_variancel2)
perimeterMikro_sigmal2 = np.sqrt(perimeterMikro_variancel2)
lengthMikro_sigmal2 = np.sqrt(lengthMikro_variancel2)
aMikro_standardfehlerl2 = aMikro_sigmal2/np.sqrt(icountl2)
a2Mikro_standardfehlerl2 = a2Mikro_sigmal2/np.sqrt(icountl2)
bMikro_standardfehlerl2 = bMikro_sigmal2/np.sqrt(icountl2)
b2Mikro_standardfehlerl2 = b2Mikro_sigmal2/np.sqrt(icountl2)
areaMikro_standardfehlerl2 = areaMikro_sigmal2/np.sqrt(icountl2)
perimeterMikro_standardfehlerl2 = perimeterMikro_sigmal2/np.sqrt(icountl2)
lengthMikro_standardfehlerl2 = lengthMikro_sigmal2/np.sqrt(icountl2)
#########l3
aMikro_averagel3 = np.mean(aMikro_averagelistl3)
a2Mikro_averagel3 = np.mean(a2Mikro_averagelistl3)
bMikro_averagel3 = np.mean(bMikro_averagelistl3)
b2Mikro_averagel3 = np.mean(b2Mikro_averagelistl3)
areaMikro_averagel3 = np.mean(areaMikro_averagelistl3)
perimeterMikro_averagel3 = np.mean(perimeterMikro_averagelistl3)
lengthMikro_averagel3 = np.mean(lengthMikro_averagelistl3)
aMikro_variancel3 = np.var(aMikro_averagelistl3)
a2Mikro_variancel3 = np.var(a2Mikro_averagelistl3)
bMikro_variancel3 = np.var(bMikro_averagelistl3)
b2Mikro_variancel3 = np.var(b2Mikro_averagelistl3)
areaMikro_variancel3 = np.var(areaMikro_averagelistl3)
perimeterMikro_variancel3 = np.var(perimeterMikro_averagelistl3)
lengthMikro_variancel3 = np.var(lengthMikro_averagelistl3)
aMikro_sigmal3 = np.sqrt(aMikro_variancel3)
a2Mikro_sigmal3 = np.sqrt(a2Mikro_variancel3)
bMikro_sigmal3 = np.sqrt(bMikro_variancel3)
b2Mikro_sigmal3 = np.sqrt(b2Mikro_variancel3)
areaMikro_sigmal3 = np.sqrt(areaMikro_variancel3)
perimeterMikro_sigmal3 = np.sqrt(perimeterMikro_variancel3)
lengthMikro_sigmal3 = np.sqrt(lengthMikro_variancel3)
aMikro_standardfehlerl3 = aMikro_sigmal3/np.sqrt(icountl3)
a2Mikro_standardfehlerl3 = a2Mikro_sigmal3/np.sqrt(icountl3)
bMikro_standardfehlerl3 = bMikro_sigmal3/np.sqrt(icountl3)
b2Mikro_standardfehlerl3 = b2Mikro_sigmal3/np.sqrt(icountl3)
areaMikro_standardfehlerl3 = areaMikro_sigmal3/np.sqrt(icountl3)
perimeterMikro_standardfehlerl3 = perimeterMikro_sigmal3/np.sqrt(icountl3)
lengthMikro_standardfehlerl3 = lengthMikro_sigmal3/np.sqrt(icountl3)
#####l4
aMikro_averagel4 = np.mean(aMikro_averagelistl4)
a2Mikro_averagel4 = np.mean(a2Mikro_averagelistl4)
bMikro_averagel4 = np.mean(bMikro_averagelistl4)
b2Mikro_averagel4 = np.mean(b2Mikro_averagelistl4)
areaMikro_averagel4 = np.mean(areaMikro_averagelistl4)
perimeterMikro_averagel4 = np.mean(perimeterMikro_averagelistl4)
lengthMikro_averagel4 = np.mean(lengthMikro_averagelistl4)
aMikro_variancel4 = np.var(aMikro_averagelistl4)
a2Mikro_variancel4 = np.var(a2Mikro_averagelistl4)
bMikro_variancel4 = np.var(bMikro_averagelistl4)
b2Mikro_variancel4 = np.var(b2Mikro_averagelistl4)
areaMikro_variancel4 = np.var(areaMikro_averagelistl4)
perimeterMikro_variancel4 = np.var(perimeterMikro_averagelistl4)
lengthMikro_variancel4 = np.var(lengthMikro_averagelistl4)
aMikro_sigmal4 = np.sqrt(aMikro_variancel4)
a2Mikro_sigmal4 = np.sqrt(a2Mikro_variancel4)
bMikro_sigmal4 = np.sqrt(bMikro_variancel4)
b2Mikro_sigmal4 = np.sqrt(b2Mikro_variancel4)
areaMikro_sigmal4 = np.sqrt(areaMikro_variancel4)
perimeterMikro_sigmal4 = np.sqrt(perimeterMikro_variancel4)
lengthMikro_sigmal4 = np.sqrt(lengthMikro_variancel4)
aMikro_standardfehlerl4 = aMikro_sigmal4/np.sqrt(icountl4)
a2Mikro_standardfehlerl4 = a2Mikro_sigmal4/np.sqrt(icountl4)
bMikro_standardfehlerl4 = bMikro_sigmal4/np.sqrt(icountl4)
b2Mikro_standardfehlerl4 = b2Mikro_sigmal4/np.sqrt(icountl4)
areaMikro_standardfehlerl4 = areaMikro_sigmal4/np.sqrt(icountl4)
perimeterMikro_standardfehlerl4 = perimeterMikro_sigmal4/np.sqrt(icountl4)
lengthMikro_standardfehlerl4 = lengthMikro_sigmal4/np.sqrt(icountl4)
########l5
aMikro_averagel5 = np.mean(aMikro_averagelistl5)
a2Mikro_averagel5 = np.mean(a2Mikro_averagelistl5)
bMikro_averagel5 = np.mean(bMikro_averagelistl5)
b2Mikro_averagel5 = np.mean(b2Mikro_averagelistl5)
areaMikro_averagel5 = np.mean(areaMikro_averagelistl5)
perimeterMikro_averagel5 = np.mean(perimeterMikro_averagelistl5)
lengthMikro_averagel5 = np.mean(lengthMikro_averagelistl5)
aMikro_variancel5 = np.var(aMikro_averagelistl5)
a2Mikro_variancel5 = np.var(a2Mikro_averagelistl5)
bMikro_variancel5 = np.var(bMikro_averagelistl5)
b2Mikro_variancel5 = np.var(b2Mikro_averagelistl5)
areaMikro_variancel5 = np.var(areaMikro_averagelistl5)
perimeterMikro_variancel5 = np.var(perimeterMikro_averagelistl5)
lengthMikro_variancel5 = np.var(lengthMikro_averagelistl5)
aMikro_sigmal5 = np.sqrt(aMikro_variancel5)
a2Mikro_sigmal5 = np.sqrt(a2Mikro_variancel5)
bMikro_sigmal5 = np.sqrt(bMikro_variancel5)
b2Mikro_sigmal5 = np.sqrt(b2Mikro_variancel5)
areaMikro_sigmal5 = np.sqrt(areaMikro_variancel5)
perimeterMikro_sigmal5 = np.sqrt(perimeterMikro_variancel5)
lengthMikro_sigmal5 = np.sqrt(lengthMikro_variancel5)
aMikro_standardfehlerl5 = aMikro_sigmal5/np.sqrt(icountl5)
a2Mikro_standardfehlerl5 = a2Mikro_sigmal5/np.sqrt(icountl5)
bMikro_standardfehlerl5 = bMikro_sigmal5/np.sqrt(icountl5)
b2Mikro_standardfehlerl5 = b2Mikro_sigmal5/np.sqrt(icountl5)
areaMikro_standardfehlerl5 = areaMikro_sigmal5/np.sqrt(icountl5)
perimeterMikro_standardfehlerl5 = perimeterMikro_sigmal5/np.sqrt(icountl5)
lengthMikro_standardfehlerl5 = lengthMikro_sigmal5/np.sqrt(icountl5)
#######l6
aMikro_averagel6 = np.mean(aMikro_averagelistl6)
a2Mikro_averagel6 = np.mean(a2Mikro_averagelistl6)
bMikro_averagel6 = np.mean(bMikro_averagelistl6)
b2Mikro_averagel6 = np.mean(b2Mikro_averagelistl6)
areaMikro_averagel6 = np.mean(areaMikro_averagelistl6)
perimeterMikro_averagel6 = np.mean(perimeterMikro_averagelistl6)
lengthMikro_averagel6 = np.mean(lengthMikro_averagelistl6)
aMikro_variancel6 = np.var(aMikro_averagelistl6)
a2Mikro_variancel6 = np.var(a2Mikro_averagelistl6)
bMikro_variancel6 = np.var(bMikro_averagelistl6)
b2Mikro_variancel6 = np.var(b2Mikro_averagelistl6)
areaMikro_variancel6 = np.var(areaMikro_averagelistl6)
perimeterMikro_variancel6 = np.var(perimeterMikro_averagelistl6)
lengthMikro_variancel6 = np.var(lengthMikro_averagelistl6)
aMikro_sigmal6 = np.sqrt(aMikro_variancel6)
a2Mikro_sigmal6 = np.sqrt(a2Mikro_variancel6)
bMikro_sigmal6 = np.sqrt(bMikro_variancel6)
b2Mikro_sigmal6 = np.sqrt(b2Mikro_variancel6)
areaMikro_sigmal6 = np.sqrt(areaMikro_variancel6)
perimeterMikro_sigmal6 = np.sqrt(perimeterMikro_variancel6)
lengthMikro_sigmal6 = np.sqrt(lengthMikro_variancel6)
aMikro_standardfehlerl6 = aMikro_sigmal6/np.sqrt(icountl6)
a2Mikro_standardfehlerl6 = a2Mikro_sigmal6/np.sqrt(icountl6)
bMikro_standardfehlerl6 = bMikro_sigmal6/np.sqrt(icountl6)
b2Mikro_standardfehlerl6 = b2Mikro_sigmal6/np.sqrt(icountl6)
areaMikro_standardfehlerl6 = areaMikro_sigmal6/np.sqrt(icountl6)
perimeterMikro_standardfehlerl6 = perimeterMikro_sigmal6/np.sqrt(icountl6)
lengthMikro_standardfehlerl6 = lengthMikro_sigmal6/np.sqrt(icountl6)
######l7
aMikro_averagel7 = np.mean(aMikro_averagelistl7)
a2Mikro_averagel7 = np.mean(a2Mikro_averagelistl7)
bMikro_averagel7 = np.mean(bMikro_averagelistl7)
b2Mikro_averagel7 = np.mean(b2Mikro_averagelistl7)
areaMikro_averagel7 = np.mean(areaMikro_averagelistl7)
perimeterMikro_averagel7 = np.mean(perimeterMikro_averagelistl7)
lengthMikro_averagel7 = np.mean(lengthMikro_averagelistl7)
aMikro_variancel7 = np.var(aMikro_averagelistl7)
a2Mikro_variancel7 = np.var(a2Mikro_averagelistl7)
bMikro_variancel7 = np.var(bMikro_averagelistl7)
b2Mikro_variancel7 = np.var(b2Mikro_averagelistl7)
areaMikro_variancel7 = np.var(areaMikro_averagelistl7)
perimeterMikro_variancel7 = np.var(perimeterMikro_averagelistl7)
lengthMikro_variancel7 = np.var(lengthMikro_averagelistl7)
aMikro_sigmal7 = np.sqrt(aMikro_variancel7)
a2Mikro_sigmal7 = np.sqrt(a2Mikro_variancel7)
bMikro_sigmal7 = np.sqrt(bMikro_variancel7)
b2Mikro_sigmal7 = np.sqrt(b2Mikro_variancel7)
areaMikro_sigmal7 = np.sqrt(areaMikro_variancel7)
perimeterMikro_sigmal7 = np.sqrt(perimeterMikro_variancel7)
lengthMikro_sigmal7 = np.sqrt(lengthMikro_variancel7)
aMikro_standardfehlerl7 = aMikro_sigmal7/np.sqrt(icountl7)
a2Mikro_standardfehlerl7 = a2Mikro_sigmal7/np.sqrt(icountl7)
bMikro_standardfehlerl7 = bMikro_sigmal7/np.sqrt(icountl7)
b2Mikro_standardfehlerl7 = b2Mikro_sigmal7/np.sqrt(icountl7)
areaMikro_standardfehlerl7 = areaMikro_sigmal7/np.sqrt(icountl7)
perimeterMikro_standardfehlerl7 = perimeterMikro_sigmal7/np.sqrt(icountl7)
lengthMikro_standardfehlerl7 = lengthMikro_sigmal7/np.sqrt(icountl7)
######l8
aMikro_averagel8 = np.mean(aMikro_averagelistl8)
a2Mikro_averagel8 = np.mean(a2Mikro_averagelistl8)
bMikro_averagel8 = np.mean(bMikro_averagelistl8)
b2Mikro_averagel8 = np.mean(b2Mikro_averagelistl8)
areaMikro_averagel8 = np.mean(areaMikro_averagelistl8)
perimeterMikro_averagel8 = np.mean(perimeterMikro_averagelistl8)
lengthMikro_averagel8 = np.mean(lengthMikro_averagelistl8)
aMikro_variancel8 = np.var(aMikro_averagelistl8)
a2Mikro_variancel8 = np.var(a2Mikro_averagelistl8)
bMikro_variancel8 = np.var(bMikro_averagelistl8)
b2Mikro_variancel8 = np.var(b2Mikro_averagelistl8)
areaMikro_variancel8 = np.var(areaMikro_averagelistl8)
perimeterMikro_variancel8 = np.var(perimeterMikro_averagelistl8)
lengthMikro_variancel8 = np.var(lengthMikro_averagelistl8)
aMikro_sigmal8 = np.sqrt(aMikro_variancel8)
a2Mikro_sigmal8 = np.sqrt(a2Mikro_variancel8)
bMikro_sigmal8 = np.sqrt(bMikro_variancel8)
b2Mikro_sigmal8 = np.sqrt(b2Mikro_variancel8)
areaMikro_sigmal8 = np.sqrt(areaMikro_variancel8)
perimeterMikro_sigmal8 = np.sqrt(perimeterMikro_variancel8)
lengthMikro_sigmal8 = np.sqrt(lengthMikro_variancel8)
aMikro_standardfehlerl8 = aMikro_sigmal8/np.sqrt(icountl8)
a2Mikro_standardfehlerl8 = a2Mikro_sigmal8/np.sqrt(icountl8)
bMikro_standardfehlerl8 = bMikro_sigmal8/np.sqrt(icountl8)
b2Mikro_standardfehlerl8 = b2Mikro_sigmal8/np.sqrt(icountl8)
areaMikro_standardfehlerl8 = areaMikro_sigmal8/np.sqrt(icountl8)
perimeterMikro_standardfehlerl8 = perimeterMikro_sigmal8/np.sqrt(icountl8)
lengthMikro_standardfehlerl8 = lengthMikro_sigmal8/np.sqrt(icountl8)
####l9
aMikro_averagel9 = np.mean(aMikro_averagelistl9)
a2Mikro_averagel9 = np.mean(a2Mikro_averagelistl9)
bMikro_averagel9 = np.mean(bMikro_averagelistl9)
b2Mikro_averagel9 = np.mean(b2Mikro_averagelistl9)
areaMikro_averagel9 = np.mean(areaMikro_averagelistl9)
perimeterMikro_averagel9 = np.mean(perimeterMikro_averagelistl9)
lengthMikro_averagel9 = np.mean(lengthMikro_averagelistl9)
aMikro_variancel9 = np.var(aMikro_averagelistl9)
a2Mikro_variancel9 = np.var(a2Mikro_averagelistl9)
bMikro_variancel9 = np.var(bMikro_averagelistl9)
b2Mikro_variancel9 = np.var(b2Mikro_averagelistl9)
areaMikro_variancel9 = np.var(areaMikro_averagelistl9)
perimeterMikro_variancel9 = np.var(perimeterMikro_averagelistl9)
lengthMikro_variancel9 = np.var(lengthMikro_averagelistl9)
aMikro_sigmal9 = np.sqrt(aMikro_variancel9)
a2Mikro_sigmal9 = np.sqrt(a2Mikro_variancel9)
bMikro_sigmal9 = np.sqrt(bMikro_variancel9)
b2Mikro_sigmal9 = np.sqrt(b2Mikro_variancel9)
areaMikro_sigmal9 = np.sqrt(areaMikro_variancel9)
perimeterMikro_sigmal9 = np.sqrt(perimeterMikro_variancel9)
lengthMikro_sigmal9 = np.sqrt(lengthMikro_variancel9)
aMikro_standardfehlerl9 = aMikro_sigmal9/np.sqrt(icountl9)
a2Mikro_standardfehlerl9 = a2Mikro_sigmal9/np.sqrt(icountl9)
bMikro_standardfehlerl9 = bMikro_sigmal9/np.sqrt(icountl9)
b2Mikro_standardfehlerl9 = b2Mikro_sigmal9/np.sqrt(icountl9)
areaMikro_standardfehlerl9 = areaMikro_sigmal9/np.sqrt(icountl9)
perimeterMikro_standardfehlerl9 = perimeterMikro_sigmal9/np.sqrt(icountl9)
lengthMikro_standardfehlerl9 = lengthMikro_sigmal9/np.sqrt(icountl9)
#####l10
aMikro_averagel10 = np.mean(aMikro_averagelistl10)
a2Mikro_averagel10 = np.mean(a2Mikro_averagelistl10)
bMikro_averagel10 = np.mean(bMikro_averagelistl10)
b2Mikro_averagel10 = np.mean(b2Mikro_averagelistl10)
areaMikro_averagel10 = np.mean(areaMikro_averagelistl10)
perimeterMikro_averagel10 = np.mean(perimeterMikro_averagelistl10)
lengthMikro_averagel10 = np.mean(lengthMikro_averagelistl10)
aMikro_variancel10 = np.var(aMikro_averagelistl10)
a2Mikro_variancel10 = np.var(a2Mikro_averagelistl10)
bMikro_variancel10 = np.var(bMikro_averagelistl10)
b2Mikro_variancel10 = np.var(b2Mikro_averagelistl10)
areaMikro_variancel10 = np.var(areaMikro_averagelistl10)
perimeterMikro_variancel10 = np.var(perimeterMikro_averagelistl10)
lengthMikro_variancel10 = np.var(lengthMikro_averagelistl10)
aMikro_sigmal10 = np.sqrt(aMikro_variancel10)
a2Mikro_sigmal10 = np.sqrt(a2Mikro_variancel10)
bMikro_sigmal10 = np.sqrt(bMikro_variancel10)
b2Mikro_sigmal10 = np.sqrt(b2Mikro_variancel10)
areaMikro_sigmal10 = np.sqrt(areaMikro_variancel10)
perimeterMikro_sigmal10 = np.sqrt(perimeterMikro_variancel10)
lengthMikro_sigmal10 = np.sqrt(lengthMikro_variancel10)
aMikro_standardfehlerl10 = aMikro_sigmal10/np.sqrt(icountl10)
a2Mikro_standardfehlerl10 = a2Mikro_sigmal10/np.sqrt(icountl10)
bMikro_standardfehlerl10 = bMikro_sigmal10/np.sqrt(icountl10)
b2Mikro_standardfehlerl10 = b2Mikro_sigmal10/np.sqrt(icountl10)
areaMikro_standardfehlerl10 = areaMikro_sigmal10/np.sqrt(icountl10)
perimeterMikro_standardfehlerl10 = perimeterMikro_sigmal10/np.sqrt(icountl10)
lengthMikro_standardfehlerl10 = lengthMikro_sigmal10/np.sqrt(icountl10)
#######l11
aMikro_averagel11 = np.mean(aMikro_averagelistl11)
a2Mikro_averagel11 = np.mean(a2Mikro_averagelistl11)
bMikro_averagel11 = np.mean(bMikro_averagelistl11)
b2Mikro_averagel11 = np.mean(b2Mikro_averagelistl11)
areaMikro_averagel11 = np.mean(areaMikro_averagelistl11)
perimeterMikro_averagel11 = np.mean(perimeterMikro_averagelistl11)
lengthMikro_averagel11 = np.mean(lengthMikro_averagelistl11)
aMikro_variancel11 = np.var(aMikro_averagelistl11)
a2Mikro_variancel11 = np.var(a2Mikro_averagelistl11)
bMikro_variancel11 = np.var(bMikro_averagelistl11)
b2Mikro_variancel11 = np.var(b2Mikro_averagelistl11)
areaMikro_variancel11 = np.var(areaMikro_averagelistl11)
perimeterMikro_variancel11 = np.var(perimeterMikro_averagelistl11)
lengthMikro_variancel11 = np.var(lengthMikro_averagelistl11)
aMikro_sigmal11 = np.sqrt(aMikro_variancel11)
a2Mikro_sigmal11 = np.sqrt(a2Mikro_variancel11)
bMikro_sigmal11 = np.sqrt(bMikro_variancel11)
b2Mikro_sigmal11 = np.sqrt(b2Mikro_variancel11)
areaMikro_sigmal11 = np.sqrt(areaMikro_variancel11)
perimeterMikro_sigmal11 = np.sqrt(perimeterMikro_variancel11)
lengthMikro_sigmal11 = np.sqrt(lengthMikro_variancel11)
aMikro_standardfehlerl11 = aMikro_sigmal11/np.sqrt(icountl11)
a2Mikro_standardfehlerl11 = a2Mikro_sigmal11/np.sqrt(icountl11)
bMikro_standardfehlerl11 = bMikro_sigmal11/np.sqrt(icountl11)
b2Mikro_standardfehlerl11 = b2Mikro_sigmal11/np.sqrt(icountl11)
areaMikro_standardfehlerl11 = areaMikro_sigmal11/np.sqrt(icountl11)
perimeterMikro_standardfehlerl11 = perimeterMikro_sigmal11/np.sqrt(icountl11)
lengthMikro_standardfehlerl11 = lengthMikro_sigmal11/np.sqrt(icountl11)
#####l12
aMikro_averagel12 = np.mean(aMikro_averagelistl12)
a2Mikro_averagel12 = np.mean(a2Mikro_averagelistl12)
bMikro_averagel12 = np.mean(bMikro_averagelistl12)
b2Mikro_averagel12 = np.mean(b2Mikro_averagelistl12)
areaMikro_averagel12 = np.mean(areaMikro_averagelistl12)
perimeterMikro_averagel12 = np.mean(perimeterMikro_averagelistl12)
lengthMikro_averagel12 = np.mean(lengthMikro_averagelistl12)
aMikro_variancel12 = np.var(aMikro_averagelistl12)
a2Mikro_variancel12 = np.var(a2Mikro_averagelistl12)
bMikro_variancel12 = np.var(bMikro_averagelistl12)
b2Mikro_variancel12 = np.var(b2Mikro_averagelistl12)
areaMikro_variancel12 = np.var(areaMikro_averagelistl12)
perimeterMikro_variancel12 = np.var(perimeterMikro_averagelistl12)
lengthMikro_variancel12 = np.var(lengthMikro_averagelistl12)
aMikro_sigmal12 = np.sqrt(aMikro_variancel12)
a2Mikro_sigmal12 = np.sqrt(a2Mikro_variancel12)
bMikro_sigmal12 = np.sqrt(bMikro_variancel12)
b2Mikro_sigmal12 = np.sqrt(b2Mikro_variancel12)
areaMikro_sigmal12 = np.sqrt(areaMikro_variancel12)
perimeterMikro_sigmal12 = np.sqrt(perimeterMikro_variancel12)
lengthMikro_sigmal12 = np.sqrt(lengthMikro_variancel12)
aMikro_standardfehlerl12 = aMikro_sigmal12/np.sqrt(icountl12)
a2Mikro_standardfehlerl12 = a2Mikro_sigmal12/np.sqrt(icountl12)
bMikro_standardfehlerl12 = bMikro_sigmal12/np.sqrt(icountl12)
b2Mikro_standardfehlerl12 = b2Mikro_sigmal12/np.sqrt(icountl12)
areaMikro_standardfehlerl12 = areaMikro_sigmal12/np.sqrt(icountl12)
perimeterMikro_standardfehlerl12 = perimeterMikro_sigmal12/np.sqrt(icountl12)
lengthMikro_standardfehlerl12 = lengthMikro_sigmal12/np.sqrt(icountl12)
wf.write("Parameter: " + "Value" + "Variance" + "Sigma" + "Standard error"+ "\n")
wf.write(str(minlengthMicro)+ " to " + str(l1Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel1.round(2)) +", "+ str(areaMikro_variancel1.round(2))+ ", "+str(areaMikro_sigmal1.round(2))+ ", "+ str(areaMikro_standardfehlerl1.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel1.round(2)) +", "+str(perimeterMikro_variancel1.round(2))+ ", "+str(perimeterMikro_sigmal1.round(2))+ ", "+ str(perimeterMikro_standardfehlerl1.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel1.round(2)) +", "+ str(lengthMikro_variancel1.round(2))+ ", "+str(lengthMikro_sigmal1.round(2))+ ", "+ str(lengthMikro_standardfehlerl1.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel1.round(2)) +", "+ str(aMikro_variancel1.round(2))+ ", "+str(aMikro_sigmal1.round(2))+ ", "+ str(aMikro_standardfehlerl1.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel1.round(2))+", " +str(bMikro_variancel1.round(2))+ ", "+str(bMikro_sigmal1.round(2))+ ", "+ str(bMikro_standardfehlerl1.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel1.round(2)) +", "+ str(a2Mikro_variancel1.round(2))+ ", "+str(a2Mikro_sigmal1.round(2))+ ", "+ str(a2Mikro_standardfehlerl1.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel1.round(2))+", " + str(b2Mikro_variancel1.round(2))+ ", "+str(b2Mikro_sigmal1.round(2))+ ", "+ str(b2Mikro_standardfehlerl1.round(2))+"\n")
wf.write("Amount: " + str(icountl1) + "\n"+ "\n")
wf.write(str(l1Micro)+ " to " + str(l2Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel2.round(2)) +", "+ str(areaMikro_variancel2.round(2))+ ", "+str(areaMikro_sigmal2.round(2))+ ", "+ str(areaMikro_standardfehlerl2.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel2.round(2))+", " +str(perimeterMikro_variancel2.round(2))+ ", "+str(perimeterMikro_sigmal2.round(2))+ ", "+ str(perimeterMikro_standardfehlerl2.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel2.round(2)) +", "+ str(lengthMikro_variancel2.round(2))+ ", "+str(lengthMikro_sigmal2.round(2))+ ", "+ str(lengthMikro_standardfehlerl2.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel2.round(2))+", " + str(aMikro_variancel2.round(2))+ ", "+str(aMikro_sigmal2.round(2))+ ", "+ str(aMikro_standardfehlerl2.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel2.round(2))+", " +str(bMikro_variancel2.round(2))+ ", "+str(bMikro_sigmal2.round(2))+ ", "+ str(bMikro_standardfehlerl2.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel2.round(2))+", " + str(a2Mikro_variancel2.round(2))+ ", "+str(a2Mikro_sigmal2.round(2))+ ", "+ str(a2Mikro_standardfehlerl2.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel2.round(2)) +", "+", "+ str(b2Mikro_variancel2.round(2))+ ", "+str(b2Mikro_sigmal2.round(2))+ ", "+ str(b2Mikro_standardfehlerl2.round(2))+"\n" )
wf.write("Amount: " + str(icountl2) + "\n"+ "\n")
wf.write(str(l2Micro)+ " to " + str(l3Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel3.round(2))+", " + str(areaMikro_variancel3.round(2))+ ", "+str(areaMikro_sigmal3.round(2))+ ", "+ str(areaMikro_standardfehlerl3.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel3.round(2))+", " +str(perimeterMikro_variancel3.round(2))+ ", "+str(perimeterMikro_sigmal3.round(2))+ ", "+ str(perimeterMikro_standardfehlerl3.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel3.round(2))+", " + str(lengthMikro_variancel3.round(2))+ ", "+str(lengthMikro_sigmal3.round(2))+ ", "+ str(lengthMikro_standardfehlerl3.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel3.round(2))+", " + str(aMikro_variancel3.round(2))+ ", "+str(aMikro_sigmal3.round(2))+ ", "+ str(aMikro_standardfehlerl3.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel3.round(2))+", " +str(bMikro_variancel3.round(2))+ ", "+str(bMikro_sigmal3.round(2))+ ", "+ str(bMikro_standardfehlerl3.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel3.round(2))+", " + str(a2Mikro_variancel3.round(2))+ ", "+str(a2Mikro_sigmal3.round(2))+ ", "+ str(a2Mikro_standardfehlerl3.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel3.round(2)) +", "+ str(b2Mikro_variancel3.round(2))+ ", "+str(b2Mikro_sigmal3.round(2))+ ", "+ str(b2Mikro_standardfehlerl3.round(2))+"\n")
wf.write("Amount: " + str(icountl3) + "\n"+ "\n")
wf.write(str(l3Micro)+ " to " + str(l4Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel4.round(2)) +", "+ str(areaMikro_variancel4.round(2))+ ", "+str(areaMikro_sigmal4.round(2))+ ", "+ str(areaMikro_standardfehlerl4.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel4.round(2))+", " +str(perimeterMikro_variancel4.round(2))+ ", "+str(perimeterMikro_sigmal4.round(2))+ ", "+ str(perimeterMikro_standardfehlerl4.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel4.round(2))+", " + str(lengthMikro_variancel4.round(2))+ ", "+str(lengthMikro_sigmal4.round(2))+ ", "+ str(lengthMikro_standardfehlerl4.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel4.round(2))+", " + str(aMikro_variancel4.round(2))+ ", "+str(aMikro_sigmal4.round(2))+ ", "+ str(aMikro_standardfehlerl4.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel4.round(2)) +", "+str(bMikro_variancel4.round(2))+ ", "+str(bMikro_sigmal4.round(2))+ ", "+ str(bMikro_standardfehlerl4.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel4.round(2))+", " + str(a2Mikro_variancel4.round(2))+ ", "+str(a2Mikro_sigmal4.round(2))+ ", "+ str(a2Mikro_standardfehlerl4.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel4.round(2)) +", "+ str(b2Mikro_variancel4.round(2))+ ", "+str(b2Mikro_sigmal4.round(2))+ ", "+ str(b2Mikro_standardfehlerl4.round(2))+"\n")
wf.write("Amount: " + str(icountl4) + "\n"+ "\n")
wf.write(str(l4Micro)+ " to " + str(l5Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel5.round(2)) +", "+ str(areaMikro_variancel5.round(2))+ ", "+str(areaMikro_sigmal5.round(2))+ ", "+ str(areaMikro_standardfehlerl5.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel5.round(2))+", " +str(perimeterMikro_variancel5.round(2))+ ", "+str(perimeterMikro_sigmal5.round(2))+ ", "+ str(perimeterMikro_standardfehlerl5.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel5.round(2))+", " + str(lengthMikro_variancel5.round(2))+ ", "+str(lengthMikro_sigmal5.round(2))+ ", "+ str(lengthMikro_standardfehlerl5.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel5.round(2)) +", "+ str(aMikro_variancel5.round(2))+ ", "+str(aMikro_sigmal5.round(2))+ ", "+ str(aMikro_standardfehlerl5.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel5.round(2))+", " +str(bMikro_variancel5.round(2))+ ", "+str(bMikro_sigmal5.round(2))+ ", "+ str(bMikro_standardfehlerl5.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel5.round(2)) +", "+ str(a2Mikro_variancel5.round(2))+ ", "+str(a2Mikro_sigmal5.round(2))+ ", "+ str(a2Mikro_standardfehlerl5.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel5.round(2)) +", "+ str(b2Mikro_variancel5.round(2))+ ", "+str(b2Mikro_sigmal5.round(2))+ ", "+ str(b2Mikro_standardfehlerl5.round(2))+"\n")
wf.write("Amount: " + str(icountl5) + "\n"+ "\n")
wf.write(str(l5Micro)+ " to " + str(l6Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel6.round(2))+", " + str(areaMikro_variancel6.round(2))+ ", "+str(areaMikro_sigmal6.round(2))+ ", "+ str(areaMikro_standardfehlerl6.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel6.round(2))+", " +str(perimeterMikro_variancel6.round(2))+ ", "+str(perimeterMikro_sigmal6.round(2))+ ", "+ str(perimeterMikro_standardfehlerl6.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel6.round(2))+", " + str(lengthMikro_variancel6.round(2))+ ", "+str(lengthMikro_sigmal6.round(2))+ ", "+ str(lengthMikro_standardfehlerl6.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel6.round(2)) +", "+ str(aMikro_variancel6.round(2))+ ", "+str(aMikro_sigmal6.round(2))+ ", "+ str(aMikro_standardfehlerl6.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel6.round(2))+", " +str(bMikro_variancel6.round(2))+ ", "+str(bMikro_sigmal6.round(2))+ ", "+ str(bMikro_standardfehlerl6.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel6.round(2))+", " + str(a2Mikro_variancel6.round(2))+ ", "+str(a2Mikro_sigmal6.round(2))+ ", "+ str(a2Mikro_standardfehlerl6.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel6.round(2)) +", "+ str(b2Mikro_variancel6.round(2))+ ", "+str(b2Mikro_sigmal6.round(2))+ ", "+ str(b2Mikro_standardfehlerl6.round(2))+"\n")
wf.write("Amount: " + str(icountl6) + "\n"+ "\n")
wf.write(str(l6Micro)+ " to " + str(l7Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel7.round(2))+", " + str(areaMikro_variancel7.round(2))+ ", "+str(areaMikro_sigmal7.round(2))+ ", "+ str(areaMikro_standardfehlerl7.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel7.round(2)) +", "+str(perimeterMikro_variancel7.round(2))+ ", "+str(perimeterMikro_sigmal7.round(2))+ ", "+ str(perimeterMikro_standardfehlerl7.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel7.round(2))+", " + str(lengthMikro_variancel7.round(2))+ ", "+str(lengthMikro_sigmal7.round(2))+ ", "+ str(lengthMikro_standardfehlerl7.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel7.round(2)) +", "+ str(aMikro_variancel7.round(2))+ ", "+str(aMikro_sigmal7.round(2))+ ", "+ str(aMikro_standardfehlerl7.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel7.round(2))+", " +str(bMikro_variancel7.round(2))+ ", "+str(bMikro_sigmal7.round(2))+ ", "+ str(bMikro_standardfehlerl7.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel7.round(2))+", " + str(a2Mikro_variancel7.round(2))+ ", "+str(a2Mikro_sigmal7.round(2))+ ", "+ str(a2Mikro_standardfehlerl7.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel7.round(2)) +", "+ str(b2Mikro_variancel7.round(2))+ ", "+str(b2Mikro_sigmal7.round(2))+ ", "+ str(b2Mikro_standardfehlerl7.round(2))+"\n")
wf.write("Amount: " + str(icountl7) + "\n"+ "\n")
wf.write(str(l7Micro)+ " to " + str(l8Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel8.round(2)) +", "+ str(areaMikro_variancel8.round(2))+ ", "+str(areaMikro_sigmal8.round(2))+ ", "+ str(areaMikro_standardfehlerl8.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel8.round(2))+", " +str(perimeterMikro_variancel8.round(2))+ ", "+str(perimeterMikro_sigmal8.round(2))+ ", "+ str(perimeterMikro_standardfehlerl8.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel8.round(2)) +", "+ str(lengthMikro_variancel8.round(2))+ ", "+str(lengthMikro_sigmal8.round(2))+ ", "+ str(lengthMikro_standardfehlerl8.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel8.round(2))+", " + str(aMikro_variancel8.round(2))+ ", "+str(aMikro_sigmal8.round(2))+ ", "+ str(aMikro_standardfehlerl8.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel8.round(2)) +", "+str(bMikro_variancel8.round(2))+ ", "+str(bMikro_sigmal8.round(2))+ ", "+ str(bMikro_standardfehlerl8.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel8.round(2)) +", "+ str(a2Mikro_variancel8.round(2))+ ", "+str(a2Mikro_sigmal8.round(2))+ ", "+ str(a2Mikro_standardfehlerl8.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel8.round(2)) +", "+ str(b2Mikro_variancel8.round(2))+ ", "+str(b2Mikro_sigmal8.round(2))+ ", "+ str(b2Mikro_standardfehlerl8.round(2))+"\n")
wf.write("Amount: " + str(icountl8) + "\n"+ "\n")
wf.write(str(l8Micro)+ " to " + str(l9Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel9.round(2))+", " + str(areaMikro_variancel9.round(2))+ ", "+str(areaMikro_sigmal9.round(2))+ ", "+ str(areaMikro_standardfehlerl9.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel9.round(2))+", " +str(perimeterMikro_variancel9.round(2))+ ", "+str(perimeterMikro_sigmal9.round(2))+ ", "+ str(perimeterMikro_standardfehlerl9.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel9.round(2)) +", "+ str(lengthMikro_variancel9.round(2))+ ", "+str(lengthMikro_sigmal9.round(2))+ ", "+ str(lengthMikro_standardfehlerl9.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel9.round(2))+", " + str(aMikro_variancel9.round(2))+ ", "+str(aMikro_sigmal9.round(2))+ ", "+ str(aMikro_standardfehlerl9.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel9.round(2))+", " +str(bMikro_variancel9.round(2))+ ", "+str(bMikro_sigmal9.round(2))+ ", "+ str(bMikro_standardfehlerl9.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel9.round(2))+", " + str(a2Mikro_variancel9.round(2))+ ", "+str(a2Mikro_sigmal9.round(2))+ ", "+ str(a2Mikro_standardfehlerl9.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel9.round(2))+", " + str(b2Mikro_variancel9.round(2))+ ", "+str(b2Mikro_sigmal9.round(2))+ ", "+ str(b2Mikro_standardfehlerl9.round(2))+"\n")
wf.write("Amount: " + str(icountl9) + "\n"+ "\n")
wf.write(str(l9Micro)+ " to " + str(l10Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel10.round(2))+", " + str(areaMikro_variancel10.round(2))+ ", "+str(areaMikro_sigmal10.round(2))+ ", "+ str(areaMikro_standardfehlerl10.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel10.round(2))+", " +str(perimeterMikro_variancel10.round(2))+ ", "+str(perimeterMikro_sigmal10.round(2))+ ", "+ str(perimeterMikro_standardfehlerl10.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel10.round(2))+", " + str(lengthMikro_variancel10.round(2))+ ", "+str(lengthMikro_sigmal10.round(2))+ ", "+ str(lengthMikro_standardfehlerl10.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel10.round(2))+", " + str(aMikro_variancel10.round(2))+ ", "+str(aMikro_sigmal10.round(2))+ ", "+ str(aMikro_standardfehlerl10.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel10.round(2))+", " +str(bMikro_variancel10.round(2))+ ", "+str(bMikro_sigmal10.round(2))+ ", "+ str(bMikro_standardfehlerl10.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel10.round(2))+", "+ str(a2Mikro_variancel10.round(2))+ ", "+str(a2Mikro_sigmal10.round(2))+ ", "+ str(a2Mikro_standardfehlerl10.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel10.round(2)) +", "+ str(b2Mikro_variancel10.round(2))+ ", "+str(b2Mikro_sigmal10.round(2))+ ", "+ str(b2Mikro_standardfehlerl10.round(2))+"\n")
wf.write("Amount: " + str(icountl10) + "\n"+ "\n")
wf.write(str(l10Micro)+ " to " + str(l11Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel11.round(2))+", " + str(areaMikro_variancel11.round(2))+ ", "+str(areaMikro_sigmal11.round(2))+ ", "+ str(areaMikro_standardfehlerl11.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel11.round(2))+", " +str(perimeterMikro_variancel11.round(2))+ ", "+str(perimeterMikro_sigmal11.round(2))+ ", "+ str(perimeterMikro_standardfehlerl11.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel11.round(2))+", " + str(lengthMikro_variancel11.round(2))+ ", "+str(lengthMikro_sigmal11.round(2))+ ", "+ str(lengthMikro_standardfehlerl11.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel11.round(2))+", " + str(aMikro_variancel11.round(2))+ ", "+str(aMikro_sigmal11.round(2))+ ", "+ str(aMikro_standardfehlerl11.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel11.round(2))+", " +str(bMikro_variancel11.round(2))+ ", "+str(bMikro_sigmal11.round(2))+ ", "+ str(bMikro_standardfehlerl11.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel11.round(2)) +", "+ str(a2Mikro_variancel11.round(2))+ ", "+str(a2Mikro_sigmal11.round(2))+ ", "+ str(a2Mikro_standardfehlerl11.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel11.round(2)) +", "+ str(b2Mikro_variancel11.round(2))+ ", "+str(b2Mikro_sigmal11.round(2))+ ", "+ str(b2Mikro_standardfehlerl11.round(2))+"\n")
wf.write("Amount: " + str(icountl11) + "\n"+ "\n")
wf.write(str(l11Micro)+ " to " + str(l12Micro)+"\n")
wf.write("Area in micrometer: " + str(areaMikro_averagel12.round(2))+", " + str(areaMikro_variancel12.round(2))+ ", "+str(areaMikro_sigmal12.round(2))+ ", "+ str(areaMikro_standardfehlerl12.round(2))+"\n")
wf.write("Perimeter in micrometer: " + str(perimeterMikro_averagel12.round(2))+", " +str(perimeterMikro_variancel12.round(2))+ ", "+str(perimeterMikro_sigmal12.round(2))+ ", "+ str(perimeterMikro_standardfehlerl12.round(2))+ "\n")
wf.write("Length in micrometer: " + str(lengthMikro_averagel12.round(2))+", " + str(lengthMikro_variancel12.round(2))+ ", "+str(lengthMikro_sigmal12.round(2))+ ", "+ str(lengthMikro_standardfehlerl12.round(2))+"\n")
wf.write("a in micrometer: " + str(aMikro_averagel12.round(2))+", " + str(aMikro_variancel12.round(2))+ ", "+str(aMikro_sigmal12.round(2))+ ", "+ str(aMikro_standardfehlerl12.round(2))+"\n")
wf.write("b in micrometer: " + str( bMikro_averagel12.round(2)) +", "+str(bMikro_variancel12.round(2))+ ", "+str(bMikro_sigmal12.round(2))+ ", "+ str(bMikro_standardfehlerl12.round(2))+ "\n")
wf.write("a2 in micrometer: " + str(a2Mikro_averagel12.round(2)) +", "+ str(a2Mikro_variancel12.round(2))+ ", "+str(a2Mikro_sigmal12.round(2))+ ", "+ str(a2Mikro_standardfehlerl12.round(2))+"\n")
wf.write("b2 in micrometer: " + str(b2Mikro_averagel12.round(2)) +", "+ str(b2Mikro_variancel12.round(2))+ ", "+str(b2Mikro_sigmal12.round(2))+ ", "+ str(b2Mikro_standardfehlerl12.round(2))+"\n")
wf.write("Amount: " + str(icountl12) + "\n"+ "\n")
wf.write("Amount: " + str(icount))
#####################Splines########################
###############l1
allcellsPosl1T = allcellsPosl1.transpose()
for row2 in range(np.size(allcellsPosl1T,0)):
maxval = np.amax(allcellsPosl1T[row2])
for value in range(np.size(allcellsPosl1T,1)):
if allcellsPosl1T[row2][value]>=maxval:
allcellsPosl1Spline[value][row2]=allcellsPosl1T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl1FH[ent][ent2] = allcellsPosl1[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl1RH[ent][ent2] = allcellsPosl1[ent][ent2]
for row2 in range(np.size(allcellsl1FH,0)):
maxval = np.amax(allcellsl1FH[row2])
for value in range(np.size(allcellsl1FH,1)):
if allcellsl1FH[row2][value]>=maxval:
allcellsl1FHSpl[row2][value]=allcellsl1FH[row2][value]
for row2 in range(np.size(allcellsl1RH,0)):
maxval = np.amax(allcellsl1RH[row2])
for value in range(np.size(allcellsl1RH,1)):
if allcellsl1RH[row2][value]>=maxval:
allcellsl1RHSpl[row2][value]=allcellsl1RH[row2][value]
BothSplines1 = allcellsl1RHSpl + allcellsl1FHSpl
ispl = max_y/2
mspl = 0
BothFinl1 = allcellsPosl1Spline + BothSplines1
MaxInt = np.amax(BothFinl1)
#MakeImagetrMicro(BothFinl1, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l1Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l1Micro))
for row2 in range(np.size(BothFinl1,0)):
for value in range(np.size(BothFinl1,1)):
if BothFinl1[row2][value]<MaxInt*0.3:
BothFinl1[row2][value] = 0
#MakeImagetrMicro(BothFinl1, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l1Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l1Micro))
Splinepointsl1 = []
for val in range(np.size(BothFinl1,1)):
for row in range(np.size(BothFinl1,0)):
if BothFinl1[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl1.append([row,val])
xlistl1 = []
ylistl1 = []
print Splinepointsl1
for i in Splinepointsl1:
xlistl1.append(i[0])
ylistl1.append(i[1])
testp = []
testp = [xlistl1,ylistl1]
#plt.plot(ylistl1,xlistl1,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl1,ImageLocOut + "Micrometer" + "/_Splinel1")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l2
allcellsPosl2T = allcellsPosl2.transpose()
for row2 in range(np.size(allcellsPosl2T,0)):
maxval = np.amax(allcellsPosl2T[row2])
for value in range(np.size(allcellsPosl2T,1)):
if allcellsPosl2T[row2][value]>=maxval:
allcellsPosl2Spline[value][row2]=allcellsPosl2T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl2FH[ent][ent2] = allcellsPosl2[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl2RH[ent][ent2] = allcellsPosl2[ent][ent2]
for row2 in range(np.size(allcellsl2FH,0)):
maxval = np.amax(allcellsl2FH[row2])
for value in range(np.size(allcellsl2FH,1)):
if allcellsl2FH[row2][value]>=maxval:
allcellsl2FHSpl[row2][value]=allcellsl2FH[row2][value]
for row2 in range(np.size(allcellsl2RH,0)):
maxval = np.amax(allcellsl2RH[row2])
for value in range(np.size(allcellsl2RH,1)):
if allcellsl2RH[row2][value]>=maxval:
allcellsl2RHSpl[row2][value]=allcellsl2RH[row2][value]
BothSplines2 = allcellsl2RHSpl + allcellsl2FHSpl
ispl = max_y/2
mspl = 0
BothFinl2 = allcellsPosl2Spline + BothSplines2
MaxInt = np.amax(BothFinl2)
#MakeImagetrMicro(BothFinl2, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l2Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l2Micro))
for row2 in range(np.size(BothFinl2,0)):
for value in range(np.size(BothFinl2,1)):
if BothFinl2[row2][value]<MaxInt*0.3:
BothFinl2[row2][value] = 0
#MakeImagetrMicro(BothFinl2, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l2Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l2Micro))
Splinepointsl2 = []
for val in range(np.size(BothFinl2,1)):
for row in range(np.size(BothFinl2,0)):
if BothFinl2[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl2.append([row,val])
xlistl2 = []
ylistl2 = []
print Splinepointsl2
for i in Splinepointsl2:
xlistl2.append(i[0])
ylistl2.append(i[1])
testp = []
testp = [xlistl2,ylistl2]
#plt.plot(ylistl2,xlistl2,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl2,ImageLocOut + "Micrometer" + "/_Splinel2")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l3
allcellsPosl3T = allcellsPosl3.transpose()
for row2 in range(np.size(allcellsPosl3T,0)):
maxval = np.amax(allcellsPosl3T[row2])
for value in range(np.size(allcellsPosl3T,1)):
if allcellsPosl3T[row2][value]>=maxval:
allcellsPosl3Spline[value][row2]=allcellsPosl3T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl3FH[ent][ent2] = allcellsPosl3[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl3RH[ent][ent2] = allcellsPosl3[ent][ent2]
for row2 in range(np.size(allcellsl3FH,0)):
maxval = np.amax(allcellsl3FH[row2])
for value in range(np.size(allcellsl3FH,1)):
if allcellsl3FH[row2][value]>=maxval:
allcellsl3FHSpl[row2][value]=allcellsl3FH[row2][value]
for row2 in range(np.size(allcellsl3RH,0)):
maxval = np.amax(allcellsl3RH[row2])
for value in range(np.size(allcellsl3RH,1)):
if allcellsl3RH[row2][value]>=maxval:
allcellsl3RHSpl[row2][value]=allcellsl3RH[row2][value]
BothSplines3 = allcellsl3RHSpl + allcellsl3FHSpl
ispl = max_y/2
mspl = 0
BothFinl3 = allcellsPosl3Spline + BothSplines3
MaxInt = np.amax(BothFinl3)
#MakeImagetrMicro(BothFinl3, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l3Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l3Micro))
for row2 in range(np.size(BothFinl3,0)):
for value in range(np.size(BothFinl3,1)):
if BothFinl3[row2][value]<MaxInt*0.3:
BothFinl3[row2][value] = 0
#MakeImagetrMicro(BothFinl3, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l3Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l3Micro))
Splinepointsl3 = []
for val in range(np.size(BothFinl3,1)):
for row in range(np.size(BothFinl3,0)):
if BothFinl3[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl3.append([row,val])
xlistl3 = []
ylistl3 = []
print Splinepointsl3
for i in Splinepointsl3:
xlistl3.append(i[0])
ylistl3.append(i[1])
testp = []
testp = [xlistl3,ylistl3]
#plt.plot(ylistl3,xlistl3,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl3,ImageLocOut + "Micrometer" + "/_Splinel3")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l4
allcellsPosl4T = allcellsPosl4.transpose()
for row2 in range(np.size(allcellsPosl4T,0)):
maxval = np.amax(allcellsPosl4T[row2])
for value in range(np.size(allcellsPosl4T,1)):
if allcellsPosl4T[row2][value]>=maxval:
allcellsPosl4Spline[value][row2]=allcellsPosl4T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl4FH[ent][ent2] = allcellsPosl4[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl4RH[ent][ent2] = allcellsPosl4[ent][ent2]
for row2 in range(np.size(allcellsl4FH,0)):
maxval = np.amax(allcellsl4FH[row2])
for value in range(np.size(allcellsl4FH,1)):
if allcellsl4FH[row2][value]>=maxval:
allcellsl4FHSpl[row2][value]=allcellsl4FH[row2][value]
for row2 in range(np.size(allcellsl4RH,0)):
maxval = np.amax(allcellsl4RH[row2])
for value in range(np.size(allcellsl4RH,1)):
if allcellsl4RH[row2][value]>=maxval:
allcellsl4RHSpl[row2][value]=allcellsl4RH[row2][value]
BothSplines4 = allcellsl4RHSpl + allcellsl4FHSpl
ispl = max_y/2
mspl = 0
BothFinl4 = allcellsPosl4Spline + BothSplines4
MaxInt = np.amax(BothFinl4)
#MakeImagetrMicro(BothFinl4, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l4Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l4Micro))
for row2 in range(np.size(BothFinl4,0)):
for value in range(np.size(BothFinl4,1)):
if BothFinl4[row2][value]<MaxInt*0.3:
BothFinl4[row2][value] = 0
#MakeImagetrMicro(BothFinl4, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l4Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l4Micro))
Splinepointsl4 = []
for val in range(np.size(BothFinl4,1)):
for row in range(np.size(BothFinl4,0)):
if BothFinl4[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl4.append([row,val])
xlistl4 = []
ylistl4 = []
print Splinepointsl4
for i in Splinepointsl4:
xlistl4.append(i[0])
ylistl4.append(i[1])
testp = []
testp = [xlistl4,ylistl4]
#plt.plot(ylistl4,xlistl4,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl4,ImageLocOut + "Micrometer" + "/_Splinel4")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
#################l5
allcellsPosl5T = allcellsPosl5.transpose()
for row2 in range(np.size(allcellsPosl5T,0)):
maxval = np.amax(allcellsPosl5T[row2])
for value in range(np.size(allcellsPosl5T,1)):
if allcellsPosl5T[row2][value]>=maxval:
allcellsPosl5Spline[value][row2]=allcellsPosl5T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl5FH[ent][ent2] = allcellsPosl5[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl5RH[ent][ent2] = allcellsPosl5[ent][ent2]
for row2 in range(np.size(allcellsl5FH,0)):
maxval = np.amax(allcellsl5FH[row2])
for value in range(np.size(allcellsl5FH,1)):
if allcellsl5FH[row2][value]>=maxval:
allcellsl5FHSpl[row2][value]=allcellsl5FH[row2][value]
for row2 in range(np.size(allcellsl5RH,0)):
maxval = np.amax(allcellsl5RH[row2])
for value in range(np.size(allcellsl5RH,1)):
if allcellsl5RH[row2][value]>=maxval:
allcellsl5RHSpl[row2][value]=allcellsl5RH[row2][value]
BothSplines5 = allcellsl5RHSpl + allcellsl5FHSpl
ispl = max_y/2
mspl = 0
BothFinl5 = allcellsPosl5Spline + BothSplines5
MaxInt = np.amax(BothFinl5)
#MakeImagetrMicro(BothFinl5, "Cells_SplinePrep "+str(l4Micro) + " to " + str(l5Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l4Micro) + "_" + str(l5Micro))
for row2 in range(np.size(BothFinl5,0)):
for value in range(np.size(BothFinl5,1)):
if BothFinl5[row2][value]<MaxInt*0.6:
BothFinl5[row2][value] = 0
#MakeImagetrMicro(BothFinl5, "Cells_SplinePrepRed_ "+str(l4Micro) + " to " + str(l5Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l4Micro) + "_" + str(l5Micro))
Splinepointsl5 = []
for val in range(np.size(BothFinl5,1)):
for row in range(np.size(BothFinl5,0)):
if BothFinl5[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl5.append([row,val])
xlistl5 = []
ylistl5 = []
print Splinepointsl5
for i in Splinepointsl5:
xlistl5.append(i[0])
ylistl5.append(i[1])
testp = []
testp = [xlistl5,ylistl5]
print "xlist: ", xlistl5
print "ylist: ", ylistl5
#plt.plot(ylistl5,xlistl5,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl5,ImageLocOut + "Micrometer" +"/" + "_Splinel5")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l6
allcellsPosl6T = allcellsPosl6.transpose()
for row2 in range(np.size(allcellsPosl6T,0)):
maxval = np.amax(allcellsPosl6T[row2])
for value in range(np.size(allcellsPosl6T,1)):
if allcellsPosl6T[row2][value]>=maxval:
allcellsPosl6Spline[value][row2]=allcellsPosl6T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl6FH[ent][ent2] = allcellsPosl6[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl6RH[ent][ent2] = allcellsPosl6[ent][ent2]
for row2 in range(np.size(allcellsl6FH,0)):
maxval = np.amax(allcellsl6FH[row2])
for value in range(np.size(allcellsl6FH,1)):
if allcellsl6FH[row2][value]>=maxval:
allcellsl6FHSpl[row2][value]=allcellsl6FH[row2][value]
for row2 in range(np.size(allcellsl6RH,0)):
maxval = np.amax(allcellsl6RH[row2])
for value in range(np.size(allcellsl6RH,1)):
if allcellsl6RH[row2][value]>=maxval:
allcellsl6RHSpl[row2][value]=allcellsl6RH[row2][value]
BothSplines6 = allcellsl6RHSpl + allcellsl6FHSpl
ispl = max_y/2
mspl = 0
BothFinl6 = allcellsPosl6Spline + BothSplines6
MaxInt = np.amax(BothFinl6)
#MakeImagetrMicro(BothFinl6, "Cells_SplinePrep "+str(l5Micro) + " to " + str(l6Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l5Micro) + "_" + str(l6Micro))
for row2 in range(np.size(BothFinl6,0)):
for value in range(np.size(BothFinl6,1)):
if BothFinl6[row2][value]<MaxInt*0.5:
BothFinl6[row2][value] = 0
MakeImagetrMicro(BothFinl6, "Cells_SplinePrepRed_ "+str(l5Micro) + " to " + str(l6Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l5Micro) + "_" + str(l6Micro))
Splinepointsl6 = []
for val in range(np.size(BothFinl6,1)):
for row in range(np.size(BothFinl6,0)):
if BothFinl6[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl6.append([row,val])
xlistl6 = []
ylistl6 = []
print Splinepointsl6
for i in Splinepointsl6:
xlistl6.append(i[0])
ylistl6.append(i[1])
testp = []
testp = [xlistl6,ylistl6]
print "xlist: ", xlistl6
print "ylist: ", ylistl6
#plt.plot(ylistl6,xlistl6,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl6,ImageLocOut + "Micrometer" + "/_Splinel6")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l7
allcellsPosl7T = allcellsPosl7.transpose()
for row2 in range(np.size(allcellsPosl7T,0)):
maxval = np.amax(allcellsPosl7T[row2])
for value in range(np.size(allcellsPosl7T,1)):
if allcellsPosl7T[row2][value]>=maxval:
allcellsPosl7Spline[value][row2]=allcellsPosl7T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl7FH[ent][ent2] = allcellsPosl7[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl7RH[ent][ent2] = allcellsPosl7[ent][ent2]
for row2 in range(np.size(allcellsl7FH,0)):
maxval = np.amax(allcellsl7FH[row2])
for value in range(np.size(allcellsl7FH,1)):
if allcellsl7FH[row2][value]>=maxval:
allcellsl7FHSpl[row2][value]=allcellsl7FH[row2][value]
for row2 in range(np.size(allcellsl7RH,0)):
maxval = np.amax(allcellsl7RH[row2])
for value in range(np.size(allcellsl7RH,1)):
if allcellsl7RH[row2][value]>=maxval:
allcellsl7RHSpl[row2][value]=allcellsl7RH[row2][value]
BothSplines7 = allcellsl7RHSpl + allcellsl7FHSpl
ispl = max_y/2
mspl = 0
BothFinl7 = allcellsPosl7Spline + BothSplines7
MaxInt = np.amax(BothFinl7)
#MakeImagetrMicro(BothFinl7, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFinl7,0)):
for value in range(np.size(BothFinl7,1)):
if BothFinl7[row2][value]<MaxInt*0.3:
BothFinl7[row2][value] = 0
#MakeImagetrMicro(BothFinl7, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l7Micro))
Splinepointsl7 = []
for val in range(np.size(BothFinl7,1)):
for row in range(np.size(BothFinl7,0)):
if BothFinl7[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl7.append([row,val])
xlistl7 = []
ylistl7 = []
print Splinepointsl7
for i in Splinepointsl7:
xlistl7.append(i[0])
ylistl7.append(i[1])
testp = []
testp = [xlistl7,ylistl7]
#plt.plot(ylistl7,xlistl7,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl7,ImageLocOut + "Micrometer" + "/_Splinel7")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l8
allcellsPosl8T = allcellsPosl8.transpose()
for row2 in range(np.size(allcellsPosl8T,0)):
maxval = np.amax(allcellsPosl8T[row2])
for value in range(np.size(allcellsPosl8T,1)):
if allcellsPosl8T[row2][value]>=maxval:
allcellsPosl8Spline[value][row2]=allcellsPosl8T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl8FH[ent][ent2] = allcellsPosl8[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl8RH[ent][ent2] = allcellsPosl8[ent][ent2]
for row2 in range(np.size(allcellsl8FH,0)):
maxval = np.amax(allcellsl8FH[row2])
for value in range(np.size(allcellsl8FH,1)):
if allcellsl8FH[row2][value]>=maxval:
allcellsl8FHSpl[row2][value]=allcellsl8FH[row2][value]
for row2 in range(np.size(allcellsl8RH,0)):
maxval = np.amax(allcellsl8RH[row2])
for value in range(np.size(allcellsl8RH,1)):
if allcellsl8RH[row2][value]>=maxval:
allcellsl8RHSpl[row2][value]=allcellsl8RH[row2][value]
BothSplines8 = allcellsl8RHSpl + allcellsl8FHSpl
ispl = max_y/2
mspl = 0
BothFinl8 = allcellsPosl8Spline + BothSplines8
MaxInt = np.amax(BothFinl8)
MakeImagetrMicro(BothFinl8, "Cells_SplinePrep "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFinl8,0)):
for value in range(np.size(BothFinl8,1)):
if BothFinl8[row2][value]<MaxInt*0.45: #0.4 0.5
BothFinl8[row2][value] = 0
MakeImagetrMicro(BothFinl8, "Cells_SplinePrepRed_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l7Micro) + "_" + str(l8Micro))
Splinepointsl8 = []
WeightArray = []
WeightArray2 = []
print "BothFinl8: ", BothFinl8
print "Maxint: ", MaxInt
for val in range(np.size(BothFinl8,1)):
for row in range(np.size(BothFinl8,0)):
if BothFinl8[row][val] > 0:
row2 = row-(y0_c-1)
val2 = val - (x0_c-1)
Splinepointsl8.append([row2,val2])
#if BothFinl8[row][val] > MaxInt*0.8:
# WeightArray2.append(3)
#elif BothFinl8[row][val] > MaxInt*0.6:
# WeightArray2.append(2)
#else:
# WeightArray2.append(1)
if BothFinl8[row][val] > MaxInt*0.7:
WeightArray2.append(2)
else:
WeightArray2.append(1)
WeightArray.append(BothFinl8[row][val])
xlistl8 = []
ylistl8 = []
print "WArray: ", WeightArray
print "WArray2: ", WeightArray2
#print Splinepointsl8
for i in Splinepointsl8:
xlistl8.append(i[0])
ylistl8.append(i[1])
testp = []
testp = [xlistl8,ylistl8]
plt.plot(ylistl8,xlistl8,'ro')
plt.show()
spline = Spline_Interpolation(Splinepointsl8,WeightArray2,ImageLocOut + "Micrometer" + "/_Splinel8_Defense")
spline.show_naturalshape()
spline.show_naturalshapeMicro()
###############################
allcellsPosl8T = allcellsPosl8.transpose()
for row2 in range(np.size(allcellsPosl8T,0)):
maxval = np.amax(allcellsPosl8T[row2])
for value in range(np.size(allcellsPosl8T,1)):
if allcellsPosl8T[row2][value]>=maxval:
allcellsPosl8Spline[value][row2]=allcellsPosl8T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl8FH[ent][ent2] = allcellsPosl8[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl8RH[ent][ent2] = allcellsPosl8[ent][ent2]
for row2 in range(np.size(allcellsl8FH,0)):
maxval = np.amax(allcellsl8FH[row2])
for value in range(np.size(allcellsl8FH,1)):
if allcellsl8FH[row2][value]>=maxval:
allcellsl8FHSpl[row2][value]=allcellsl8FH[row2][value]
for row2 in range(np.size(allcellsl8RH,0)):
maxval = np.amax(allcellsl8RH[row2])
for value in range(np.size(allcellsl8RH,1)):
if allcellsl8RH[row2][value]>=maxval:
allcellsl8RHSpl[row2][value]=allcellsl8RH[row2][value]
BothSplines8 = allcellsl8RHSpl + allcellsl8FHSpl
ispl = max_y/2
mspl = 0
BothFinl8 = allcellsPosl8Spline + BothSplines8
MaxInt = np.amax(BothFinl8)
MakeImagetrMicro(BothFinl8, "Cells_SplinePrep "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFinl8,0)):
for value in range(np.size(BothFinl8,1)):
if BothFinl8[row2][value]<MaxInt*0.45: #0.4 0.5
BothFinl8[row2][value] = 0
MakeImagetrMicro(BothFinl8, "Cells_SplinePrepRed_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l7Micro) + "_" + str(l8Micro))
Splinepointsl8 = []
WeightArray = []
WeightArray2 = []
print "BothFinl8: ", BothFinl8
print "Maxint: ", MaxInt
for val in range(np.size(BothFinl8,1)):
for row in range(np.size(BothFinl8,0)):
if BothFinl8[row][val] > 0:
row2 = row-(y0_c-1)
val2 = val - (x0_c-1)
Splinepointsl8.append([row2,val2])
#if BothFinl8[row][val] > MaxInt*0.8:
# WeightArray2.append(3)
#elif BothFinl8[row][val] > MaxInt*0.6:
# WeightArray2.append(2)
#else:
# WeightArray2.append(1)
if BothFinl8[row][val] > MaxInt*0.7:
WeightArray2.append(2)
else:
WeightArray2.append(1)
WeightArray.append(BothFinl8[row][val])
xlistl8 = []
ylistl8 = []
print "WArray: ", WeightArray
print "WArray2: ", WeightArray2
#print Splinepointsl8
for i in Splinepointsl8:
xlistl8.append(i[0])
ylistl8.append(i[1])
testp = []
testp = [xlistl8,ylistl8]
plt.plot(ylistl8,xlistl8,'ro')
plt.show()
spline = Spline_Interpolation(Splinepointsl8,WeightArray2,ImageLocOut + "Micrometer" + "/_Splinel8")
spline.show_naturalshape()
spline.show_naturalshapeMicro()
###############l9
allcellsPosl9T = allcellsPosl9.transpose()
for row2 in range(np.size(allcellsPosl9T,0)):
maxval = np.amax(allcellsPosl9T[row2])
for value in range(np.size(allcellsPosl9T,1)):
if allcellsPosl9T[row2][value]>=maxval:
allcellsPosl9Spline[value][row2]=allcellsPosl9T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl9FH[ent][ent2] = allcellsPosl9[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl9RH[ent][ent2] = allcellsPosl9[ent][ent2]
for row2 in range(np.size(allcellsl9FH,0)):
maxval = np.amax(allcellsl9FH[row2])
for value in range(np.size(allcellsl9FH,1)):
if allcellsl9FH[row2][value]>=maxval:
allcellsl9FHSpl[row2][value]=allcellsl9FH[row2][value]
for row2 in range(np.size(allcellsl9RH,0)):
maxval = np.amax(allcellsl9RH[row2])
for value in range(np.size(allcellsl9RH,1)):
if allcellsl9RH[row2][value]>=maxval:
allcellsl9RHSpl[row2][value]=allcellsl9RH[row2][value]
BothSplines9 = allcellsl9RHSpl + allcellsl9FHSpl
ispl = max_y/2
mspl = 0
BothFinl9 = allcellsPosl9Spline + BothSplines9
MaxInt = np.amax(BothFinl9)
MakeImagetrMicro(BothFinl9, "Cells_SplinePrep "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFinl9,0)):
for value in range(np.size(BothFinl9,1)):
if BothFinl9[row2][value]<MaxInt*0.3:
BothFinl9[row2][value] = 0
MakeImagetrMicro(BothFinl9, "Cells_SplinePrepRed_ "+str(l9Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l8Micro) + "_" + str(l9Micro))
Splinepointsl9 = []
for val in range(np.size(BothFinl9,1)):
for row in range(np.size(BothFinl9,0)):
if BothFinl9[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl9.append([row,val])
xlistl9 = []
ylistl9 = []
print Splinepointsl9
for i in Splinepointsl9:
xlistl9.append(i[0])
ylistl9.append(i[1])
testp = []
testp = [xlistl9,ylistl9]
#plt.plot(ylistl9,xlistl9,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl9,ImageLocOut + "Micrometer" + "/_Splinel9")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l10
allcellsPosl10T = allcellsPosl10.transpose()
for row2 in range(np.size(allcellsPosl10T,0)):
maxval = np.amax(allcellsPosl10T[row2])
for value in range(np.size(allcellsPosl10T,1)):
if allcellsPosl10T[row2][value]>=maxval:
allcellsPosl10Spline[value][row2]=allcellsPosl10T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl10FH[ent][ent2] = allcellsPosl10[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl10RH[ent][ent2] = allcellsPosl10[ent][ent2]
for row2 in range(np.size(allcellsl10FH,0)):
maxval = np.amax(allcellsl10FH[row2])
for value in range(np.size(allcellsl10FH,1)):
if allcellsl10FH[row2][value]>=maxval:
allcellsl10FHSpl[row2][value]=allcellsl10FH[row2][value]
for row2 in range(np.size(allcellsl10RH,0)):
maxval = np.amax(allcellsl10RH[row2])
for value in range(np.size(allcellsl7RH,1)):
if allcellsl10RH[row2][value]>=maxval:
allcellsl10RHSpl[row2][value]=allcellsl10RH[row2][value]
BothSplines10 = allcellsl10RHSpl + allcellsl10FHSpl
ispl = max_y/2
mspl = 0
BothFinl10 = allcellsPosl10Spline + BothSplines10
MaxInt = np.amax(BothFinl10)
MakeImagetrMicro(BothFinl10, "Cells_SplinePrep "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFinl10,0)):
for value in range(np.size(BothFinl10,1)):
if BothFinl10[row2][value]<MaxInt*0.3:
BothFinl10[row2][value] = 0
MakeImagetrMicro(BothFinl10, "Cells_SplinePrepRed_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l9Micro) + "_" + str(l10Micro))
Splinepointsl10 = []
#for val in range(np.size(BothFinl10,1)):
# for row in range(np.size(BothFinl10,0)):
# if BothFinl10[row][val] > 0:
# row = row-y0_c
# val = val - x0_c
# Splinepointsl10.append([row,val])
#xlistl10 = []
#ylistl10 = []
#print Splinepointsl10
#for i in Splinepointsl10:
# xlistl10.append(i[0])
# ylistl10.append(i[1])
#testp = []
#testp = [xlistl10,ylistl10]
#plt.plot(ylistl10,xlistl10,'ro')
#plt.show()
#spline = Spline_Interpolation(Splinepointsl10,ImageLocOut + "Micrometer" + "/_Splinel10")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l11
allcellsPosl11T = allcellsPosl11.transpose()
for row2 in range(np.size(allcellsPosl11T,0)):
maxval = np.amax(allcellsPosl11T[row2])
for value in range(np.size(allcellsPosl11T,1)):
if allcellsPosl11T[row2][value]>=maxval:
allcellsPosl11Spline[value][row2]=allcellsPosl11T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl11FH[ent][ent2] = allcellsPosl11[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl11RH[ent][ent2] = allcellsPosl11[ent][ent2]
for row2 in range(np.size(allcellsl11FH,0)):
maxval = np.amax(allcellsl11FH[row2])
for value in range(np.size(allcellsl11FH,1)):
if allcellsl11FH[row2][value]>=maxval:
allcellsl11FHSpl[row2][value]=allcellsl11FH[row2][value]
for row2 in range(np.size(allcellsl11RH,0)):
maxval = np.amax(allcellsl11RH[row2])
for value in range(np.size(allcellsl11RH,1)):
if allcellsl11RH[row2][value]>=maxval:
allcellsl11RHSpl[row2][value]=allcellsl11RH[row2][value]
BothSplines11 = allcellsl11RHSpl + allcellsl11FHSpl
ispl = max_y/2
mspl = 0
BothFinl11 = allcellsPosl11Spline + BothSplines11
MaxInt = np.amax(BothFinl11)
#MakeImagetrMicro(BothFinl11, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l11Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l11Micro))
for row2 in range(np.size(BothFinl11,0)):
for value in range(np.size(BothFinl11,1)):
if BothFinl11[row2][value]<MaxInt*0.3:
BothFinl11[row2][value] = 0
#MakeImagetrMicro(BothFinl11, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l11Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l11Micro))
Splinepointsl11 = []
for val in range(np.size(BothFinl11,1)):
for row in range(np.size(BothFinl11,0)):
if BothFinl11[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl11.append([row,val])
xlistl11 = []
ylistl11 = []
print Splinepointsl11
for i in Splinepointsl11:
xlistl11.append(i[0])
ylistl11.append(i[1])
testp = []
testp = [xlistl11,ylistl11]
plt.plot(ylistl11,xlistl11,'ro')
plt.show()
#spline = Spline_Interpolation(Splinepointsl11,ImageLocOut + "Micrometer" + "/_Splinel11")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
###############l12
allcellsPosl12T = allcellsPosl12.transpose()
for row2 in range(np.size(allcellsPosl12T,0)):
maxval = np.amax(allcellsPosl12T[row2])
for value in range(np.size(allcellsPosl12T,1)):
if allcellsPosl12T[row2][value]>=maxval:
allcellsPosl12Spline[value][row2]=allcellsPosl12T[row2][value]
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl12FH[ent][ent2] = allcellsPosl12[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl12RH[ent][ent2] = allcellsPosl12[ent][ent2]
for row2 in range(np.size(allcellsl12FH,0)):
maxval = np.amax(allcellsl12FH[row2])
for value in range(np.size(allcellsl12FH,1)):
if allcellsl12FH[row2][value]>=maxval:
allcellsl12FHSpl[row2][value]=allcellsl12FH[row2][value]
for row2 in range(np.size(allcellsl12RH,0)):
maxval = np.amax(allcellsl12RH[row2])
for value in range(np.size(allcellsl12RH,1)):
if allcellsl12RH[row2][value]>=maxval:
allcellsl12RHSpl[row2][value]=allcellsl12RH[row2][value]
BothSplines12 = allcellsl12RHSpl + allcellsl12FHSpl
ispl = max_y/2
mspl = 0
BothFinl12 = allcellsPosl12Spline + BothSplines12
MaxInt = np.amax(BothFinl12)
#MakeImagetrMicro(BothFinl12, "Cells_SplinePrep "+str(l6Micro) + " to " + str(l12Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" + "/" + "_Cells_SplinePrep_"+str(l6Micro) + "_" + str(l12Micro))
for row2 in range(np.size(BothFinl12,0)):
for value in range(np.size(BothFinl12,1)):
if BothFinl12[row2][value]<MaxInt*0.3:
BothFinl12[row2][value] = 0
#MakeImagetrMicro(BothFinl12, "Cells_SplinePrepRed_ "+str(l6Micro) + " to " + str(l12Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut + "Micrometer" +"/" + "_Cells_SplinePrepRed_"+str(l6Micro) + "_" + str(l12Micro))
Splinepointsl12 = []
for val in range(np.size(BothFinl12,1)):
for row in range(np.size(BothFinl12,0)):
if BothFinl12[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepointsl12.append([row,val])
xlistl12 = []
ylistl12 = []
print Splinepointsl12
for i in Splinepointsl12:
xlistl12.append(i[0])
ylistl12.append(i[1])
testp = []
testp = [xlistl12,ylistl12]
plt.plot(ylistl12,xlistl12,'ro')
plt.show()
#spline = Spline_Interpolation(Splinepointsl12,ImageLocOut + "Micrometer" + "/_Splinel12")
#spline.show_naturalshape()
#spline.show_naturalshapeMicro()
# while ispl >= 0:
# while mspl < max_x:
# if BothSplines[ispl][mspl] >= MaxInt/2:
# BothSplinesRed[ispl][mspl] = BothSplines[ispl][mspl]
# elif BothSplines5[ispl][mspl] >= MaxInt/2:
# BothSplinesRed[ispl][mspl] = BothSplines5[ispl][mspl]
# mspl = mspl+1
# mspl = 0
# ispl = ispl-1
# mspl = 0
# ispl = max_y/2
# while ispl< max_y:
# while mspl < max_x:
# if BothSplines[ispl][mspl] >= MaxInt/2:
# BothSplinesRed[ispl][mspl] = BothSplines[ispl][mspl]
# elif BothSplines5[ispl][mspl] >= MaxInt/2:
# BothSplinesRed[ispl][mspl] = BothSplines5[ispl][mspl]
# mspl = mspl+1
# mspl = 0
# ispl = ispl+1
for row2 in range(np.size(BothFin5,0)):
for value in range(np.size(BothFin5,1)):
if BothFin5[row2][value]<MaxInt*0.65:
BothFin5[row2][value] = 0
#MakeImagetrMicro(BothFin5, "Cells_SplineFin3_ "+str(l4Micro) + " to " + str(l5Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin3_"+str(l4Micro) + "_" + str(l5Micro))
for row2 in range(np.size(BothFin5,0)):
for value in range(np.size(BothFin5,1)):
if BothFin5[row2][value]<MaxInt*0.7:
BothFin5[row2][value] = 0
#MakeImagetrMicro(BothFin5, "Cells_SplineFin4_ "+str(l4Micro) + " to " + str(l5Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin4_"+str(l4Micro) + "_" + str(l5Micro))
for row2 in range(np.size(BothFin5,0)):
for value in range(np.size(BothFin5,1)):
if BothFin5[row2][value]<MaxInt*0.75:
BothFin5[row2][value] = 0
#MakeImagetrMicro(BothFin5, "Cells_SplineFin5_ "+str(l4Micro) + " to " + str(l5Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin5_"+str(l4Micro) + "_" + str(l5Micro))
###################l7#############################################
for ent in range(max_y/2):
for ent2 in range(max_x):
allcellsl7pos[ent][ent2] = allcellsl7[ent][ent2]
allcellsl7posT = allcellsl7pos.transpose()
for row2 in range(np.size(allcellsl7posT,0)):
maxval = np.amax(allcellsl7posT[row2])
for value in range(np.size(allcellsl7posT,1)):
if allcellsl7posT[row2][value]>=maxval:
allcellsl7posSpline[value][row2]=allcellsl7posT[row2][value]
for ent in range(max_y/2,max_y):
for ent2 in range(max_x):
allcellsl7neg[ent][ent2] = allcellsl7[ent][ent2]
allcellsl7negT = allcellsl7neg.transpose()
for row2 in range(np.size(allcellsl7negT,0)):
maxval = np.amax(allcellsl7negT[row2])
# print "row2: ", row2
# print "maxval: ", maxval
for value in range(np.size(allcellsl7negT,1)):
if allcellsl7negT[row2][value]>=maxval:
# print "yes if"
allcellsl7negSpline[value][row2]=allcellsl7negT[row2][value]
BothSplines = allcellsl7posSpline + allcellsl7negSpline
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl7FH[ent][ent2] = allcellsl7[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl7RH[ent][ent2] = allcellsl7[ent][ent2]
for row2 in range(np.size(allcellsl7FH,0)):
print "hi"
#print "row: ", row2
print "allcells: ", allcellsl7FH[row2]
maxval = np.amax(allcellsl7FH[row2])
print "max: ", max
#pdb.set_trace()
for value in range(np.size(allcellsl7FH,1)):
print "hu"
#pdb.set_trace()
if allcellsl7FH[row2][value]>=maxval:
allcellsl7FHSpl[row2][value]=allcellsl7FH[row2][value]
#pdb.set_trace()
for row2 in range(np.size(allcellsl7RH,0)):
maxval = np.amax(allcellsl7RH[row2])
#pdb.set_trace()
for value in range(np.size(allcellsl7RH,1)):
if allcellsl7RH[row2][value]>=maxval:
allcellsl7RHSpl[row2][value]=allcellsl7RH[row2][value]
BothSplines7 = allcellsl7RHSpl + allcellsl7FHSpl
ispl = max_y/2
MaxInt = np.amax(BothSplines7)
mspl = 0
while ispl >= 0:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed7[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines7[ispl][mspl] >= MaxInt/2:
BothSplinesRed7[ispl][mspl] = BothSplines7[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl-1
mspl = 0
ispl = max_y/2
while ispl< max_y:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed7[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines7[ispl][mspl] >= MaxInt/2:
BothSplinesRed7[ispl][mspl] = BothSplines7[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl+1
BothFin7 = BothSplines + BothSplines7
#MakeImagetrMicro(BothFin7, "Cells_SplineFin0_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin0_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt/2:
BothFin7[row2][value] = 0
print "Red"
#MakeImagetrMicro(BothSplinesRed7, "Cells_Spline_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_Spline_"+str(l6Micro) + "_" + str(l7Micro))
#MakeImagetrMicro(BothSplines, "Cells_Spline0_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_Spline0_"+str(l6Micro) + "_" + str(l7Micro))
#MakeImagetrMicro(BothSplines7, "Cells_Spline01_ "+str(l6Micro) + " to " + str(l7),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_Spline01_"+str(l6Micro) + "_" + str(l7))
#MakeImagetrMicro(BothFin7, "Cells_SplineFin1_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin1_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt*0.6:
BothFin7[row2][value] = 0
MakeImagetrMicro(BothFin7, "Cells_SplineFin2_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin2_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt*0.65:
BothFin7[row2][value] = 0
MakeImagetrMicro(BothFin7, "Cells_SplineFin3_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin3_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt*0.7:
BothFin7[row2][value] = 0
MakeImagetrMicro(BothFin7, "Cells_SplineFin4_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin4_"+str(l6Micro) + "_" + str(l7Micro))
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt*0.85:
BothFin7[row2][value] = 0
MakeImagetrMicro(BothFin7, "Cells_SplineFin5_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin5_"+str(l6Micro) + "_" + str(l7Micro))
Splinepoints = []
for val in range(np.size(BothFin7,1)):
for row in range(y0_c):
if BothFin7[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepoints.append([row,val])
xlist = []
ylist = []
print Splinepoints
for i in Splinepoints:
xlist.append(i[0])
ylist.append(i[1])
testp = []
testp = [xlist,ylist]
print "xlist: ", xlist
print "ylist: ", ylist
plt.plot(ylist,xlist,'ro')
plt.show()
spline = Spline_Interpolation(Splinepoints,ImageLocOut + "/Spline75")
spline.show_naturalshape()
spline.show_naturalshapeMicro()
for row2 in range(np.size(BothFin7,0)):
for value in range(np.size(BothFin7,1)):
if BothFin7[row2][value]<MaxInt*0.5:
BothFin7[row2][value] = 0
MakeImagetrMicro(BothFin7, "Cells_SplineFin6_ "+str(l6Micro) + " to " + str(l7Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin6_"+str(l6Micro) + "_" + str(l7Micro))
Splinepoints2 = []
for val in range(np.size(BothFin7,1)):
for row in range(y0_c):
if BothFin7[row][val] > 0:
row = row-y0_c
val = val - x0_c
Splinepoints2.append([row,val])
xlist = []
ylist = []
print Splinepoints2
for i in Splinepoints2:
xlist.append(i[0])
ylist.append(i[1])
testp = []
testp = [xlist,ylist]
print "xlist: ", xlist
print "ylist: ", ylist
plt.plot(ylist,xlist,'ro')
plt.show()
spline = Spline_Interpolation(Splinepoints2,ImageLocOut + "/Spline76")
###################l8#############################################
for ent in range(max_y/2):
for ent2 in range(max_x):
allcellsl8pos[ent][ent2] = allcellsl8[ent][ent2]
allcellsl8posT = allcellsl8pos.transpose()
for row2 in range(np.size(allcellsl8posT,0)):
maxval = np.amax(allcellsl8posT[row2])
# print "row2: ", row2
# print "maxval: ", maxval
for value in range(np.size(allcellsl8posT,1)):
if allcellsl8posT[row2][value]>=maxval:
# print "yes if"
allcellsl8posSpline[value][row2]=allcellsl8posT[row2][value]
for ent in range(max_y/2,max_y):
for ent2 in range(max_x):
allcellsl8neg[ent][ent2] = allcellsl8[ent][ent2]
allcellsl8negT = allcellsl8neg.transpose()
for row2 in range(np.size(allcellsl8negT,0)):
maxval = np.amax(allcellsl8negT[row2])
for value in range(np.size(allcellsl8negT,1)):
if allcellsl8negT[row2][value]>=maxval:
allcellsl8negSpline[value][row2]=allcellsl8negT[row2][value]
BothSplines = allcellsl8posSpline + allcellsl8negSpline
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl8FH[ent][ent2] = allcellsl8[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl8RH[ent][ent2] = allcellsl8[ent][ent2]
for row2 in range(np.size(allcellsl8FH,0)):
maxval = np.amax(allcellsl8FH[row2])
for value in range(np.size(allcellsl8FH,1)):
if allcellsl8FH[row2][value]>=maxval:
allcellsl8FHSpl[row2][value]=allcellsl8FH[row2][value]
for row2 in range(np.size(allcellsl8RH,0)):
maxval = np.amax(allcellsl8RH[row2])
for value in range(np.size(allcellsl8RH,1)):
if allcellsl8RH[row2][value]>=maxval:
allcellsl8RHSpl[row2][value]=allcellsl8RH[row2][value]
BothSplines8 = allcellsl8RHSpl + allcellsl8FHSpl
ispl = max_y/2
MaxInt = np.amax(BothSplines8)
mspl = 0
while ispl >= 0:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed8[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines8[ispl][mspl] >= MaxInt/2:
BothSplinesRed8[ispl][mspl] = BothSplines8[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl-1
mspl = 0
ispl = max_y/2
while ispl< max_y:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed8[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines8[ispl][mspl] >= MaxInt/2:
BothSplinesRed8[ispl][mspl] = BothSplines8[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl+1
BothFin8 = BothSplines + BothSplines8
#MakeImagetrMicro(BothFin8, "Cells_SplineFin0_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
#ShowImage(ImageLocOut +"/" + "_Cells_SplineFin0_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFin8,0)):
for value in range(np.size(BothFin8,1)):
if BothFin8[row2][value]<MaxInt/2:
BothFin8[row2][value] = 0
print "Red"
MakeImagetrMicro(BothSplinesRed8, "Cells_Spline_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline_"+str(l7Micro) + "_" + str(l8Micro))
MakeImagetrMicro(BothSplines, "Cells_Spline0_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline0_"+str(l7Micro) + "_" + str(l8Micro))
MakeImagetrMicro(BothSplines8, "Cells_Spline01_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline01_"+str(l7Micro) + "_" + str(l8Micro))
MakeImagetrMicro(BothFin8, "Cells_SplineFin1_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin1_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFin8,0)):
for value in range(np.size(BothFin8,1)):
if BothFin8[row2][value]<MaxInt*0.6:
BothFin8[row2][value] = 0
MakeImagetrMicro(BothFin8, "Cells_SplineFin2_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin2_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFin8,0)):
for value in range(np.size(BothFin8,1)):
if BothFin8[row2][value]<MaxInt*0.65:
BothFin8[row2][value] = 0
MakeImagetrMicro(BothFin8, "Cells_SplineFin3_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin3_"+str(l7Micro) + "_" + str(l8Micro))
for row2 in range(np.size(BothFin8,0)):
for value in range(np.size(BothFin8,1)):
if BothFin8[row2][value]<MaxInt*0.7:
BothFin8[row2][value] = 0
MakeImagetrMicro(BothFin8, "Cells_SplineFin4_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin4_"+str(l7Micro) + "_" + str(l8Micro))
#Cellfile = open(Matlabfile + "/" + str(l7Micro) + "_" + str(l8Micro) + "_Cells.txt","w")
#for cell in BothFin8:
# cell2 = str(cell)[1:-1]
# #print cell
# #print cell2
# Cellfile.write(cell2+"\n")
for row2 in range(np.size(BothFin8,0)):
for value in range(np.size(BothFin8,1)):
if BothFin8[row2][value]<MaxInt*0.75:
BothFin8[row2][value] = 0
MakeImagetrMicro(BothFin8, "Cells_SplineFin5_ "+str(l7Micro) + " to " + str(l8Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin5_"+str(l7Micro) + "_" + str(l8Micro))
###################l9#############################################
for ent in range(max_y/2):
for ent2 in range(max_x):
allcellsl9pos[ent][ent2] = allcellsl9[ent][ent2]
allcellsl9posT = allcellsl9pos.transpose()
for row2 in range(np.size(allcellsl9posT,0)):
maxval = np.amax(allcellsl9posT[row2])
for value in range(np.size(allcellsl9posT,1)):
if allcellsl9posT[row2][value]>=maxval:
allcellsl9posSpline[value][row2]=allcellsl9posT[row2][value]
for ent in range(max_y/2,max_y):
for ent2 in range(max_x):
allcellsl9neg[ent][ent2] = allcellsl9[ent][ent2]
allcellsl9negT = allcellsl9neg.transpose()
for row2 in range(np.size(allcellsl9negT,0)):
maxval = np.amax(allcellsl9negT[row2])
for value in range(np.size(allcellsl9negT,1)):
if allcellsl9negT[row2][value]>=maxval:
allcellsl9negSpline[value][row2]=allcellsl9negT[row2][value]
BothSplines = allcellsl9posSpline + allcellsl9negSpline
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl9FH[ent][ent2] = allcellsl9[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl9RH[ent][ent2] = allcellsl9[ent][ent2]
for row2 in range(np.size(allcellsl9FH,0)):
maxval = np.amax(allcellsl9FH[row2])
for value in range(np.size(allcellsl9FH,1)):
if allcellsl9FH[row2][value]>=maxval:
allcellsl9FHSpl[row2][value]=allcellsl9FH[row2][value]
for row2 in range(np.size(allcellsl9RH,0)):
maxval = np.amax(allcellsl9RH[row2])
for value in range(np.size(allcellsl9RH,1)):
if allcellsl9RH[row2][value]>=maxval:
allcellsl9RHSpl[row2][value]=allcellsl9RH[row2][value]
BothSplines9 = allcellsl9RHSpl + allcellsl9FHSpl
ispl = max_y/2
MaxInt = np.amax(BothSplines9)
mspl = 0
while ispl >= 0:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed9[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines9[ispl][mspl] >= MaxInt/2:
BothSplinesRed9[ispl][mspl] = BothSplines9[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl-1
mspl = 0
ispl = max_y/2
while ispl< max_y:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed9[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines9[ispl][mspl] >= MaxInt/2:
BothSplinesRed9[ispl][mspl] = BothSplines9[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl+1
BothFin9 = BothSplines + BothSplines9
MakeImagetrMicro(BothFin9, "Cells_SplineFin0_ "+str(l8Micro) + " to " + str(l9Micro),x0_c, y0_c)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin0_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFin9,0)):
for value in range(np.size(BothFin9,1)):
if BothFin9[row2][value]<MaxInt/2:
BothFin9[row2][value] = 0
print "Red"
MakeImagetrMicro(BothSplinesRed9, "Cells_Spline_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline_"+str(l8Micro) + "_" + str(l9Micro))
MakeImagetrMicro(BothSplines, "Cells_Spline0_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline0_"+str(l8Micro) + "_" + str(l9Micro))
MakeImagetrMicro(BothSplines9, "Cells_Spline01_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline01_"+str(l8Micro) + "_" + str(l9Micro))
MakeImagetrMicro(BothFin9, "Cells_SplineFin1_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin1_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFin9,0)):
for value in range(np.size(BothFin9,1)):
if BothFin9[row2][value]<MaxInt*0.6:
BothFin9[row2][value] = 0
MakeImagetrMicro(BothFin9, "Cells_SplineFin2_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin2_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFin9,0)):
for value in range(np.size(BothFin9,1)):
if BothFin9[row2][value]<MaxInt*0.65:
BothFin9[row2][value] = 0
MakeImagetrMicro(BothFin9, "Cells_SplineFin3_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin3_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFin9,0)):
for value in range(np.size(BothFin9,1)):
if BothFin9[row2][value]<MaxInt*0.7:
BothFin9[row2][value] = 0
MakeImagetrMicro(BothFin9, "Cells_SplineFin4_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin4_"+str(l8Micro) + "_" + str(l9Micro))
for row2 in range(np.size(BothFin9,0)):
for value in range(np.size(BothFin9,1)):
if BothFin9[row2][value]<MaxInt*0.75:
BothFin9[row2][value] = 0
MakeImagetrMicro(BothFin9, "Cells_SplineFin5_ "+str(l8Micro) + " to " + str(l9Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin5_"+str(l8Micro) + "_" + str(l9Micro))
###################l10#############################################
for ent in range(max_y/2):
for ent2 in range(max_x):
allcellsl10pos[ent][ent2] = allcellsl10[ent][ent2]
allcellsl10posT = allcellsl10pos.transpose()
for row2 in range(np.size(allcellsl10posT,0)):
maxval = np.amax(allcellsl10posT[row2])
# print "row2: ", row2
# print "maxval: ", maxval
for value in range(np.size(allcellsl10posT,1)):
if allcellsl10posT[row2][value]>=maxval:
# print "yes if"
allcellsl10posSpline[value][row2]=allcellsl10posT[row2][value]
for ent in range(max_y/2,max_y):
for ent2 in range(max_x):
allcellsl10neg[ent][ent2] = allcellsl10[ent][ent2]
allcellsl10negT = allcellsl10neg.transpose()
for row2 in range(np.size(allcellsl10negT,0)):
maxval = np.amax(allcellsl10negT[row2])
for value in range(np.size(allcellsl10negT,1)):
if allcellsl10negT[row2][value]>=maxval:
allcellsl10negSpline[value][row2]=allcellsl10negT[row2][value]
BothSplines = allcellsl10posSpline + allcellsl10negSpline
for ent in range(max_y):
for ent2 in range(max_x/2):
allcellsl10FH[ent][ent2] = allcellsl10[ent][ent2]
for ent in range(max_y):
for ent2 in range(max_x/2,max_x):
allcellsl10RH[ent][ent2] = allcellsl10[ent][ent2]
for row2 in range(np.size(allcellsl10FH,0)):
maxval = np.amax(allcellsl10FH[row2])
for value in range(np.size(allcellsl10FH,1)):
if allcellsl10FH[row2][value]>=maxval:
allcellsl10FHSpl[row2][value]=allcellsl10FH[row2][value]
for row2 in range(np.size(allcellsl10RH,0)):
maxval = np.amax(allcellsl10RH[row2])
for value in range(np.size(allcellsl10RH,1)):
if allcellsl10RH[row2][value]>=maxval:
allcellsl10RHSpl[row2][value]=allcellsl10RH[row2][value]
BothSplines10 = allcellsl10RHSpl + allcellsl10FHSpl
ispl = max_y/2
MaxInt = np.amax(BothSplines10)
mspl = 0
while ispl >= 0:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed10[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines10[ispl][mspl] >= MaxInt/2:
BothSplinesRed10[ispl][mspl] = BothSplines10[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl-1
mspl = 0
ispl = max_y/2
while ispl< max_y:
while mspl < max_x:
if BothSplines[ispl][mspl] >= MaxInt/2:
BothSplinesRed10[ispl][mspl] = BothSplines[ispl][mspl]
elif BothSplines10[ispl][mspl] >= MaxInt/2:
BothSplinesRed10[ispl][mspl] = BothSplines10[ispl][mspl]
mspl = mspl+1
mspl = 0
ispl = ispl+1
BothFin10 = BothSplines + BothSplines10
MakeImagetrMicro(BothFin10, "Cells_SplineFin0_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin0_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFin10,0)):
for value in range(np.size(BothFin10,1)):
if BothFin10[row2][value]<MaxInt/2:
BothFin10[row2][value] = 0
print "Red"
MakeImagetrMicro(BothSplinesRed10, "Cells_Spline_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline_"+str(l9Micro) + "_" + str(l10Micro))
MakeImagetrMicro(BothSplines, "Cells_Spline0_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline0_"+str(l9Micro) + "_" + str(l10Micro))
MakeImagetrMicro(BothSplines10, "Cells_Spline01_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_Spline01_"+str(l9Micro) + "_" + str(l10Micro))
MakeImagetrMicro(BothFin10, "Cells_SplineFin1_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin1_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFin10,0)):
for value in range(np.size(BothFin10,1)):
if BothFin10[row2][value]<MaxInt*0.6:
BothFin10[row2][value] = 0
MakeImagetrMicro(BothFin10, "Cells_SplineFin2_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin2_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFin10,0)):
for value in range(np.size(BothFin10,1)):
if BothFin10[row2][value]<MaxInt*0.65:
BothFin10[row2][value] = 0
MakeImagetrMicro(BothFin10, "Cells_SplineFin3_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin3_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFin10,0)):
for value in range(np.size(BothFin10,1)):
if BothFin10[row2][value]<MaxInt*0.6:
BothFin10[row2][value] = 0
MakeImagetrMicro(BothFin10, "Cells_SplineFin4_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin4_"+str(l9Micro) + "_" + str(l10Micro))
for row2 in range(np.size(BothFin10,0)):
for value in range(np.size(BothFin10,1)):
if BothFin10[row2][value]<MaxInt*0.75:
BothFin10[row2][value] = 0
MakeImagetrMicro(BothFin10, "Cells_SplineFin5_ "+str(l9Micro) + " to " + str(l10Micro),x0_c-1, y0_c-1)
ShowImage(ImageLocOut +"/" + "_Cells_SplineFin5_"+str(l9Micro) + "_" + str(l10Micro))
#################
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
for row in range(np.size(allcells1,0)):
for color in range(np.size(allcells1,1)):
allcells1Normed[row][color] = allcells1[row][color]/icount
MakeImage(allcellsNormed1,'Normalized')
ShowImage(ImageLoc+ Timepoint+"_Cells_Normalized")
|
16,052 | ca2aab6bdb63625b96096dc17bee9659e15360d7 | from data_structures_and_algorithms.challenges.array_binary_search.array_binary_search import binary_search
"""
test empty array
test the odd number in sotred array
test the even number in sotred array
"""
def test_works_if_empty_arr():
actual = binary_search([], 2)
expected = -1
assert expected == actual
def test_finds_indexed_odd_num():
expected = 4
actual = binary_search([1, 2, 3, 4, 5, 6], 5)
assert expected == actual
def test_finds_indexed_even_num_():
expected = 4
actual = binary_search([1, 2, 4, 5, 6, 7], 6)
assert expected == actual
|
16,053 | 74d5fafee5b48119f0d767d1be2ac2240030bcdd | N=int(input())
print(N*(N-1)//2 if N>2 else 1 if N==2 else 0) |
16,054 | f55fa7dfd5d75769fbdd1f12b48dc202f59db548 | # Generated by Django 3.1.7 on 2021-04-05 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('limbo', '0003_auto_20210329_2015'),
]
operations = [
migrations.CreateModel(
name='DefaultList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sid', models.CharField(max_length=50)),
('amount', models.IntegerField()),
('remarks', models.CharField(max_length=200)),
('department', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='schedule',
name='iid',
field=models.CharField(max_length=50),
),
]
|
16,055 | e64fe5f94d5159b21f0bb4d846a0f28ff75dc9c4 |
'''
RULES:
1. All tetrominoes spawn horizontall and wholly above the playfield
2. I, O tetrominoes spawn centrally, while 3-cell wide tetrominoes spawn rounded to the left
3. J, L, T spawn flat-side first
'''
import pygame
import time
from time import sleep
import random
import math
from block_sh import sh_pick
pygame.init()
#pixels
BLOCK_DIM = 24
BOARD_W = 10
BOARD_H = 22
display_width = BLOCK_DIM * BOARD_W
display_height = BLOCK_DIM * BOARD_H
BLOCK_N = 7
# colors
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0 , 0)
gameDisplay = pygame.display.set_mode((display_width,display_height)) #set frame by giving tuple as param5
pygame.display.set_caption('Tetris') # window title
clock = pygame.time.Clock() # game clock
game_cond = False
#Color #. Reference
#white 0
blue_bl = pygame.image.load('img/blue_block.png') #blue 1 O
cyan_bl = pygame.image.load('img/cyan_block.png') #cyan 2 I
green_bl = pygame.image.load('img/green_block.png') #green 3 L
purple_bl = pygame.image.load('img/purple_block.png') #purple 4 Z
red_bl = pygame.image.load('img/red_block.png') #red 5 J
turq_bl = pygame.image.load('img/turq_block.png') #turq 6 S
yellow_bl = pygame.image.load('img/yellow_block.png') #yellow 7 T
#init 22x10, height x width matrix to 0
matrix_tetris = [[0] * BOARD_W for i in range(BOARD_H)]
def draw_obj(img_n, x, y):
gameDisplay.blit(img_n, (x, y))
def draw_Board(matrix):
for x in range(BOARD_H):
for y in range(BOARD_W):
if matrix[x][y] != 0:
draw_obj(blue_bl, x*BLOCK_DIM, y*BLOCK_DIM)
#draws matrix into matrix_tetris onto x, y coordinates
def draw_sh_matrix(matrix, x, y):
for i in range(len(matrix)):
#fix this
for j in range(int(math.sqrt(len(matrix)))):
print(i, j)
if matrix[i][j] == 1:
matrix_tetris[x + i][y + j] = 1
def free_matrix(matrix, x, y):
for i in range(len(matrix)):
for j in range(len(matrix)):
if matrix[i][j] == 1:
matrix_tetris[x + i][y + j] = 0
#0 left, 1 down, 2 right
#todo: implement different tetrominoes as input
def update_on_keypress(dir, x, y):
#free_matrix(sh_pick(1, 0), x, y)
if dir == 0:
draw_sh_matrix(sh_pick(1, 0),x - 1, y)
if dir == 1:
draw_sh_matrix(sh_pick(1, 0),x, y + 1)
if dir == 2:
draw_sh_matrix(sh_pick(1, 0),x + 1, y)
x = 0
y = 0
free_block = True #block that is still moving
while not game_cond:
gameDisplay.fill(black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_cond = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_cond = True
#fix speed, different keypress and implement rotation
if free_block == True:
if event.key == pygame.K_DOWN:
update_on_keypress(1, x, y)
draw_Board(matrix_tetris)
y += 1
if event.key == pygame.K_RIGHT:
draw_Board(matrix_tetris)
if event.key == pygame.K_LEFT:
draw_Board(matrix_tetris)
pygame.display.update()
clock.tick(1)
pygame.quit() # ends pygame
quit() # ends python
|
16,056 | 09c9e4aea40a3da12ca16385faa8dedd23e45098 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pylab import *
from matplotlib.colors import ListedColormap
from perceptron import Perceptron
def decision_region(X, y, classifier, resolution = 0.02):
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None)
print(df.tail)
# Vars [1 : 100]
y = df.iloc[0:100, 4].values
y = np.where( y == 'Iris-setosa', -1, 1 )
print(y)
# [1:100], column(1:3)
X = df.iloc[0:100, [0, 2]].values
print(X)
# Plot setosa
plt_setosa = plt.scatter(X[:50, 0], X[:50, 1], color = 'red', marker = 'o', label = 'setosa')
# Plot versicolor
plt_versicolor = plt.scatter(X[50:100, 0], X[50:100, 1], color = 'blue', marker = 'o', label = 'versicolor')
# Object generator && Fit training model
ppn = Perceptron(eta = 0.1, n_iter = 10)
ppn.fit(X, y)
# Marker & Color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# Plot decision region
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# Grid point
xx1, xx2 = np.meshgrid(np.arange(x1_min, x2_max, resolution),
np.arange(x2_min, x2_max, resolution))
# Execute prediction of 1-dim array feature
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
# Exchange grid point with datasize
Z = Z.reshape(xx1.shape)
# Grid point
plt.contourf(xx1, xx2, Z, alpha = 0.4, cmap = cmap)
# Set axis
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# Plot class sample
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl, 0], y = X[y == cl, 1], \
alpha = 0.8, c = cmap(idx), \
marker = markers[idx], label = cl)
if __name__ == '__main__':
### set new window
fig = plt.figure()
# Add sub plot
ax1 = fig.add_subplot(2,2,1)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc = 'upper left')
ax2 = fig.add_subplot(2,2,2)
plt.plot(range(1, len(ppn.errors_) + 1 ), ppn.errors_, marker = 'o' )
plt.xlabel('Epochs')
plt.ylabel('Number of misclassfications')
ax3 = fig.add_subplot(2,2,3)
decision_region(X, y, classifier = ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc = 'upper left')
plt.show()
plt.savefig("decision_region.png")
# plt.savefig("/Users/usui/work/python/Machine_Learning/figures/decision_region.png")
|
16,057 | 915960b82cfea6ed7247cc36fdd8671610a9d9f5 | from api import main_api
base_url = "https://api.reliefweb.int/v1/reports?appname=apidoc"
def get_reports_for_country(iso_code):
reports = []
for ocha_product in 20471, 12347, 12348, 12354: # https://api.reliefweb.int/v1/references/ocha-products
report = main_api.call_get(
url=base_url + '&filter[operator]=AND'
'&filter[conditions][0][field]=primary_country.iso3'
'&filter[conditions][0][value]={0}'
'&filter[conditions][1][field]=ocha_product.id'
'&filter[conditions][1][value]={1}'
'&sort[]=score:desc'
'&sort[]=date:desc'
'&limit=1'
.format(iso_code, ocha_product))['data']
if len(report) > 0:
response = main_api.call_get(report[0]['href'])['data'][0]['fields']
reports.append(
{'title': response['title'],
'thumbnail': response['file'][0]['preview']['url-large'],
'file': response['file'][0]['url']
})
return reports
|
16,058 | a67b67e949ee14978148bab03d81f0b2ca1efaa9 | from django.shortcuts import render,redirect
from apps.Tienda.models import Categoria
from apps.Tienda.models import Producto
from apps.Tienda.models import Ventas
from django.views.generic import ListView
from apps.Tienda.forms import ProductoForm
from apps.Tienda.forms import CategoriaForm
from apps.Tienda.forms import Venta
from django.contrib import messages
# Create your views here.
def index(request):
return render(request,'base/index.html')
# Ventas de Productos
def ventas(request):
context = {'productos': Producto.objects.all()}
return render(request,'Tienda/ventas.html',context)
def ventasBsq(request):
nombrePrd=request.GET.get('campo')
context = {'productos': Producto.objects.filter(nombre=nombrePrd)}
return render(request,'Tienda/ventasBsq.html',context)
def resumenVentas(request,idProducto):
producto=Producto.objects.get(id=idProducto)
total=0*producto.costo
nombre=producto.nombre+" "+producto.descripcion
costo=producto.costo
if request.method=='POST':
form = Venta(request.POST,initial={'producto':nombre,'cantidad':0,'total':total,'precio':costo})
if form.is_valid():
resta=form.cleaned_data['cantidad']
if(resta>producto.numExistencias):
messages.error(request," No hay sificiente producto en inventario para esa compra")
return render(request, 'Tienda/resumenVenta.html',{'form':form})
else:
messages.success(request,"Venta exitosa")
producto.numExistencias=producto.numExistencias-resta
if(producto.numExistencias==0):
producto.disponible=False
producto.save()
form.save()
return redirect('tienda:prd')
else:
form = Venta(initial={'producto':nombre,'cantidad':0,'total':total,'precio':costo})
return render(request, 'Tienda/resumenVenta.html',{'form':form})
#Categorías
def categorias(request):
context = {'categorias': Categoria.objects.all()}
return render(request, 'Tienda/cat.html', context)
def nuevaCategoria(request):
if request.method=='POST':
form=CategoriaForm(request.POST)
if form.is_valid():
form.save()
return redirect('tienda:cat')
else:
form=CategoriaForm()
return render(request, 'Tienda/FormCat.html',{'form':form})
def modificarCategoria(request,idCategoria):
categoria=Categoria.objects.get(id=idCategoria)
if(request.method=="GET"):
form = CategoriaForm(instance=categoria)
else:
form = CategoriaForm(request.POST,instance=categoria)
if form.is_valid():
form.save()
return redirect('tienda:cat')
return render(request, 'Tienda/FormCat.html',{'form':form})
def eliminarCategoria(request,idCategoria):
categoria= Categoria.objects.get(id=idCategoria)
categoria.delete()
return redirect('tienda:cat')
# Productos
def nuevoProducto(request):
if request.method=='POST':
form=ProductoForm(request.POST)
if form.is_valid():
form.save()
return redirect('tienda:prd')
else:
form=ProductoForm()
return render(request, 'Tienda/FormPrd.html',{'form':form})
def modificarProducto(request,idProducto):
producto=Producto.objects.get(id=idProducto)
if(request.method=="GET"):
form = ProductoForm(instance=producto)
else:
form = ProductoForm(request.POST,instance=producto)
if form.is_valid():
form.save()
return redirect('tienda:prd')
return render(request, 'Tienda/FormPrd.html',{'form':form})
def eliminarProducto(request,idProducto):
producto = Producto.objects.get(id=idProducto)
producto.delete()
return redirect('tienda:prd')
class ProductoListView(ListView):
model = Producto
queryset=Producto.objects.all()
template_name = "Tienda/productos.html"
|
16,059 | 30c48ffc95e10d5b50d667f185fff349c80de32d | /home/runner/.cache/pip/pool/0b/cd/ab/c0557d6742d41ea7191fdf6322937500d409fcabfe599c041b56d75c26 |
16,060 | cf5dcb4cbcdb7962f510ce4af9c459b761b4ef33 | import logging
import os
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import users
import jinja2
import webapp2
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
ROOT_PATH = os.path.dirname(__file__)
class Checkbook(db.Model):
author = db.UserProperty()
name = db.StringProperty()
amount = db.FloatProperty()
active = db.BooleanProperty()
time = db.DateProperty(auto_now_add = True)
class Transaction(db.Model):
author = db.UserProperty()
date = db.DateTimeProperty(auto_now_add = True)
dateDisplay = db.DateProperty(auto_now_add = True)
debit_amount = db.FloatProperty()
description = db.StringProperty()
credit_amount = db.FloatProperty()
total = db.FloatProperty()
class Total(db.Model):
author = db.UserProperty()
checkbook_total = db.FloatProperty()
def get_template(name):
return os.path.join(ROOT_PATH, name)
class BaseHandler(webapp2.RequestHandler):
def set_user(self):
self.user = users.get_current_user()
if self.user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
self.user = 'Friend'
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'username': user,
'url': url,
'url_linktext': url_linktext,
}
return template_values
def render_template(self, tempalte_name, ctx):
self.response.out.write(template.render(get_template(tempalte_name),
ctx))
class MainPage(BaseHandler):
def get(self):
ctx = self.set_user()
self.render_template('checkbook_main.html', ctx)
class About(BaseHandler):
def get(self):
ctx = self.set_user()
self.render_template('about.html', ctx)
class UserHandler(BaseHandler):
def _get_checkbook(self):
if not self.user:
return
return Checkbook.all().filter('author', self.user).get()
def get(self):
ctx = self.set_user()
active = False
total = 0.00
book_name = "No"
checkbook = self._get_checkbook()
if checkbook:
total = checkbook.amount
transaction_query = Transaction.all().filter('author', self.user)
for tran in transaction_query:
total = total + tran.debit_amount
total = total - tran.credit_amount
book_name = book.name
active = book.active
ctx.update({
'active': active,
'book_name': book_name,
'total': total,
'checkbook': checkbook,
'transaction': transaction,
'user': user,
})
self.render_template('Userpage.html', ctx)
def post(self):
submit_checkbook = self.request.get('checkbook')
submit_debit = self.request.get('debit')
submit_credit = self.request.get('credit')
user = users.get_current_user()
if not user or not submit_checkbook:
self.redirect('/userpage')
return
to_save = []
checkbook = Checkbook()
checkbook.author = user
checkbook.name = self.request.get('new_checkbook')
checkbook.amount = float(self.request.get('amount', 0.0))
checkbook.active = True
total_value = checkbook.amount
transaction = Transaction()
transaction.author = user
if submit_debit:
transaction.debit_amount = float(
self.request.get('debit_amount', 0.0))
transaction.description = self.request.get('debit_tran_des')
transaction.credit_amount = 0.0
transaction.total = total_value + transaction.debit_amount
to_save.append(transaction)
elif submit_credit:
transaction.credit_amount = float(
self.request.get('credit_amount', 0.0))
transaction.description = self.request.get('credit_tran_des', 0.0)
transaction.debit_amount = 0.0
transaction.total = total_value - transaction.credit_amount
to_save.append(transaction)
total = Total()
total.author = user
total.checkbook_total = total_value
db.put([checkbook, total] + to_save)
self.redirect('/userpage')
app = webapp2.WSGIApplication([('/', MainPage),
('/userpage', UserHandler),
('/about', About)], debug = True)
|
16,061 | 26ddbe6177d35aac60169168ca81b1cc13ead72d | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-16 19:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0008_movie_title_sort'),
]
operations = [
migrations.RenameField(
model_name='movie',
old_name='title_sort',
new_name='sorted_title',
),
]
|
16,062 | 06e79743bff04ec5bf180bcfa8e77c063a62db71 | from itertools import combinations
N = input()
nth = len(N)-1
N = int(N)
for val in combinations() |
16,063 | 1ac038fdc3d48d498331cde0c840e88a70bf1860 | # -*- coding: utf-8 -*-
import telebot
import json
from os.path import exists
triggers = {}
tfile = "chatter_data.json"
separator = '||'
if(exists(tfile)):
with open(tfile) as f:
f = open(tfile)
triggers = json.load(f)
print("[Chatter] Loaded data.json file.")
else:
print("[Chatter] Creating data.json file.")
f = open(tfile, 'a')
f.write("{}")
f.close()
def trim(s):
i = 0
while(s[i] == ' '):
i += 1
s = s[i:]
i = len(s)-1
while(s[i] == ' '):
i-= 1
s = s[:i+1]
return s
def newTrigger(trigger, response):
trigger = trim(trigger)
triggers[trigger.lower()] = trim(response)
with open(tfile, "w") as f:
json.dump(triggers, f)
@bot.message_handler(commands=['addreply'])
def add_reply_t(message):
userlang = redisserver.get("settings:user:language:" + str(message.from_user.id))
if len(message.text.split()) < 2:
bot.reply_to(message, language[userlang]["CHATTER_NEA_MSG"], parse_mode="Markdown")
return
cid = message.chat.id
text = message.text.replace("/addreply ","",1)
try:
i = text.rindex(separator)
except:
bot.send_message(cid, language[userlang]["CHATTER_INCORRECT_MSG"])
return
tr = text.split(separator)[0]
if len(tr) < 3:
bot.reply_to(message, str(tr) + " Is too short!")
return
re = text.split(separator)[1]
if triggers.has_key(tr):
bot.reply_to(message, language[userlang]["CHATTER_ALREADYDEFINED_MSG"])
return
newTrigger(tr,re)
bot.send_message(cid, language[userlang]["CHATTER_DONE_MSG"].format(tr, re), parse_mode="Markdown")
|
16,064 | 1ba0703aa45f7c58ec4831579fcf3ee4d11b3f8b | # This script copies over the native protein structure for each pdb id and
# randomly samples decoys for each native structure. These pdb files are saved
# in another folder called 3DRobot_subset.
# 0. Loading the relevant packages--------------------------------------------
import os
import numpy as np
import pandas as pd
import subprocess
import pathlib
import random
from random import sample
# 1. Setting different parameters----------------------------------------------
# Setting the file paths
data_3drobot_set = "data/3DRobot_set/"
data_3drobot_subset = "data/3DRobot_subset/"
# Setting the seed
random.seed(42)
# Setting the number of structures
num_structures = 10
# Setting the number of decoys
num_decoys = 40
# 2. Creating the subset data----------------------------------------------------
# Defining the subset of data
subdirs_subset = [
"data/3DRobot_set/1N8VA",
"data/3DRobot_set/1ZI8A",
"data/3DRobot_set/2HS1A",
"data/3DRobot_set/3CHBD",
"data/3DRobot_set/3NJNA",
"data/3DRobot_set/3WCQA",
"data/3DRobot_set/3WDCA",
"data/3DRobot_set/3LDCA",
"data/3DRobot_set/2XODA",
]
# Printing how many structures the script will run for
print(
"Sampling "
+ str(num_decoys)
+ " decoys for "
+ str(len(subdirs_subset))
+ " structures"
)
# Looping through all the sub directories
for subdir in subdirs_subset:
# Extracting the name of the subdir
subdir_name = os.path.basename(subdir)
# Defining the native structure file path
native_file_path = (
data_3drobot_set + subdir_name + "/" + subdir_name + "_native.pdb"
)
# Creating the subfolder in the subset folder
subprocess.run(["mkdir", data_3drobot_subset + subdir_name])
# Copying this structure across to the subset folder
subprocess.run(
[
"cp",
native_file_path,
data_3drobot_subset + subdir_name + "/" + subdir_name + "_native.pdb",
]
)
# Extracting all the file names of the decoy structures
decoy_structures = [
f
for f in os.listdir(subdir)
if os.path.isfile(os.path.join(subdir, f)) and "decoy" in f and subdir_name in f
]
# Sampling decoy files
sample_decoys = sample(decoy_structures, num_decoys)
# Copying over these decoy files
for decoy_structure in sample_decoys:
# Copying this structure across to the subset folder
subprocess.run(
[
"cp",
data_3drobot_set + subdir_name + "/" + decoy_structure,
data_3drobot_subset + subdir_name + "/" + decoy_structure,
]
)
|
16,065 | f7648ce474bfaeb15b0825681721c9ab06d7c138 | # -*- coding: utf-8 -*-
from vablut.modules.ashton import *
from vablut.modules.tables import _indices
from random import randint
import pytest
def test_colrow():
#ashton rules required 9x9 board
assert col == 9,"Ashton rules: col must be 9 not %s"%col
assert row == 9,"Ashton rules: row must be 9 not %s"%row
def test_camps():
assert len(camps) == 4,"camps must contains 4 arrays not %s"%len(camps)
for c in camps:
assert len(c) == 4,"every camp must contins 4 elements[%s]"%c
assert len(set(c)) == 4,"camps elements must be different from each other"
assert len(np.bincount(c)) < col*row,"each element in every camps must be an index < %s"%(col*row)
def test_campsegments():
assert len(camp_segments) == 9*9,"camp_segments must be an index square with %s elements index"%(9*9)
for c in camps:
for ec in c:
assert ec in camp_segments[ec],"camp:%s - element:%s must be in camp_segments[%s]:%s"%(c,ec,ec,camp_segments[ec])
assert set(camp_segments[ec]) == set(c),"camp_segments[%s] should be %s instead of %s"%(ec,c,camp_segments[ec])
def test_throneel():
assert throne_el == 40,"throne_el must be 40 not %s"%throne_el
def test_kingcapturesegments():
corners = {_indices[0][0]}
corners.add(_indices[0][-1])
corners.add(_indices[-1][0])
corners.add(_indices[-1][-1])
assert len(king_capture_segments) == 9*9,"king_capture_segments must be an index square with %s elements index"%(9*9)
for c in corners:
assert len(king_capture_segments[c]) == 0,"king_capture_segments[%s] must be empty because the King can not get to the corners"%c
for i_kc in cross_center_segments[throne_el]:
assert len(king_capture_segments[i_kc]) == 1,"the capture segments with king starteing index must be just one and not %s"%len(king_capture_segments[i_kc])
assert len(set(king_capture_segments[i_kc][0])) == 5,"in the throne_el or neighborhood king_capture_segments[%s] must contains 5 elements"%i_kc
assert set(cross_center_segments[i_kc]) == set(king_capture_segments[i_kc][0]),"king_capture_segments[%s] should be %s instead of %s"%(i_kc,cross_center_segments[i_kc],king_capture_segments[i_kc])
horizontal_per = []
horizontal_per.append(_indices[0][1:-1])
horizontal_per.append(_indices[-1][1:-1])
for hp in horizontal_per:
for hpe in hp:
assert (king_capture_segments[hpe] == np.asarray([hpe-1,hpe,hpe+1])).all(),"king_capture_segments[%s] should be %s instead of %s"%(hpe,np.asarray([hpe-1,hpe,hpe+1]),king_capture_segments[hpe])
vertical_per = []
vertical_per.append(_indices.transpose()[0][1:-1])
vertical_per.append(_indices.transpose()[-1][1:-1])
for vp in vertical_per:
for vpe in vp:
assert (king_capture_segments[vpe] == np.asarray([vpe-col,vpe,vpe+col])).all(),"king_capture_segments[%s] should be %s instead of %s"%(vpe,np.asarray([vpe-col,vpe,vpe+col]),king_capture_segments[vpe])
for ins in _indices[1:-1].transpose()[1:-1].transpose().flatten():
if ins not in cross_center_segments[throne_el]:
assert [ins-1,ins,ins+1] in king_capture_segments[ins].tolist(),"king_capture_segments[%s]:%s should contain %s"%(ins,king_capture_segments[ins],np.asarray([ins-1,ins,ins+1]))
assert [ins-col,ins,ins+col] in king_capture_segments[ins].tolist(),"king_capture_segments[%s]:%s should contain %s"%(ins,king_capture_segments[ins],np.asarray([ins-col,ins,ins+col]))
def test_winningel():
per = []
per.append(_indices[0][1:-1])
per.append(_indices[-1][1:-1])
per.append(_indices.transpose()[0][1:-1])
per.append(_indices.transpose()[-1][1:-1])
assert (len(winning_el) == ((col-2)*2 + (row-2)*2)-12),"winning_el must contain %s elements instead of %s"%(((col-2)*2 + (row-2)*2)-12,len(winning_el))
for p in per:
assert p[0] in winning_el,"%s should be in winning_el:%s"%(p[0],winning_el)
assert p[1] in winning_el,"%s should be in winning_el:%s"%(p[1],winning_el)
assert p[-1] in winning_el,"%s should be in winning_el:%s"%(p[-1],winning_el)
assert p[-2] in winning_el,"%s should be in winning_el:%s"%(p[-2],winning_el)
def test_prohibitedsegments():
#testing black prohibited elements
for c in camps.flatten():
assert c in prohibited_segments[PLAYER1][0],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,0)
assert c in prohibited_segments[PLAYER1][2],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,2)
assert c in prohibited_segments[PLAYER1][7],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,7)
assert c in prohibited_segments[PLAYER1][11],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,11)
assert c in prohibited_segments[PLAYER1][12],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,12)
assert c in prohibited_segments[PLAYER1][16],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,16)
assert c in prohibited_segments[PLAYER1][31],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,31)
assert c in prohibited_segments[PLAYER1][39],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,39)
assert c in prohibited_segments[PLAYER1][41],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,41)
assert c in prohibited_segments[PLAYER1][49],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,49)
assert c in prohibited_segments[PLAYER1][58],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,58)
assert c in prohibited_segments[PLAYER1][68],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,68)
assert c in prohibited_segments[PLAYER1][69],"the camp element %s should be prohibited for the Black Player moving FROM:%s"%(c,69)
for i in _indices.flatten():
for cs in camp_segments[i]:
assert cs not in prohibited_segments[PLAYER1][i],"the camp element %s should not be prohibited for the Black Player moving FROM:%s"%(cs,i)
#testing black prohibited elements
def test_capturingdic():
for i,cd in capturing_dic.items():
assert throne_el in cd,"throne element:%s should count always in capturing. It must be in %s"%(throne_el,cd)
for c in camps:
assert c[0] in cd,"camp element:%s should count always in capturing. It must be in %s"%(c[0],cd)
assert c[2] in cd,"camp element:%s should count always in capturing. It must be in %s"%(c[2],cd)
assert c[3] in cd,"camp element:%s should count always in capturing. It must be in %s"%(c[3],cd)
test_kingcapturesegments() |
16,066 | 1d949db0e3a278385661000273714abf339798bd | from .wordnet_mapper import WordNetMapper
from . import attribute_mapper
from . import relationship_mapper
|
16,067 | 2b1d9d040d0cd1ef5478ad23fdd5804a87b78007 | import numpy as np
import pandas as pd
def datapoints_f(path):
'''
This function takes the raw data and changes it into a numpy array and a result_vector, with the array's columns being each datapoint. Assumes path is a string
'''
df=pd.read_csv(path,delim_whitespace=True,names=['x','y','result'])
data=df.as_matrix()
datapoints=data[:,0:2]
result_vector=data[:,2]
return datapoints,result_vector
###########################################################
def input_matrix_f(datapoints):
'''
Returns the transformed inputs. Assumes transformed space is 8 dimensional. Also assumes the datapoints' columns are each point in R^2
'''
# Seperating out the x and y values
transpose=datapoints.T
x1=transpose[0]
x2=transpose[1]
# Creating the output matrix
datapoints_dimensions,datapoints_number=transpose.shape
input_matrix=np.ones((8,datapoints_number))
input_matrix[0]=1
input_matrix[1]=x1
input_matrix[2]=x2
input_matrix[3]=x1**2
input_matrix[4]=x2**2
input_matrix[5]=x1*x2
input_matrix[6]=np.abs(x1-x2)
input_matrix[7]=np.abs(x1+x2)
return input_matrix.T
#################################################################
def LinReg_weight_vector_f(input_matrix, result_vector):
'''
This function computes the linear regression vector. Assumes rows are different vectors and columns is the dimension of the vector
'''
transpose=input_matrix.T
inverse_part=np.linalg.inv(np.dot(transpose,input_matrix))
pseudo_inverse=np.dot(inverse_part,transpose)
return np.dot(pseudo_inverse,result_vector)
################################################################
def squared_error_f(weight_vector, datapoints, result_vector):
'''
This function returns the squared error
'''
pred_vector=np.dot(datapoints,weight_vector)
norm_vector=np.subtract(pred_vector,result_vector)
norm=np.linalg.norm(norm_vector)
samples_size=len(result_vector)
return (norm**2)/samples_size
|
16,068 | 29e67fdc269c8c58f19021e4f429c549e473cf11 | # _*_ encoding: utf-8 _*_
'''
Created on 2016年3月16日
@author: carm
'''
from pylab import plot, show, figure, subplot, hist, xlim
class DataDrawing(object):
def draw_two_dimension_spot(self,data,target):
"""
使用第一和第三维度(花萼的长和宽)
在上图中有150个点,不同的颜色代表不同的类型;
蓝色点代表山鸢尾(setosa),
红色点代表变色鸢尾(versicolor),
绿色点代表维吉尼亚鸢尾(virginica)。
"""
plot(data[target=='setosa',0],data[target=='setosa',2],'bo')
plot(data[target=='versicolor',0],data[target=='versicolor',2],'ro')
plot(data[target=='virginica',0],data[target=='virginica',2],'go')
show()
def draw_histogram(self, data, target):
"""
绘制数据中每一类型的第一个特性(花萼的长度)
"""
xmin = min(data[:,0])
xmax = max(data[:,0])
figure()
subplot(411) # distribution of the setosa class (1st, on the top)
hist(data[target=='setosa',0],color='b',alpha=.7)
xlim(xmin,xmax)
subplot(412) # distribution of the versicolor class (2nd)
hist(data[target=='versicolor',0],color='r',alpha=.7)
xlim(xmin,xmax)
subplot(413) # distribution of the virginica class (3rd)
hist(data[target=='virginica',0],color='g',alpha=.7)
xlim(xmin,xmax)
subplot(414) # global histogram (4th, on the bottom)
hist(data[:,0],color='y',alpha=.7)
xlim(xmin,xmax)
show()
|
16,069 | 6ad0d15444d9a7ac297b059598da33e8aada3cc0 | ## Abstract superclass for all formatters. Each formatter defines a list of
# relevant file extensions and a run() method for doing the actual work.
class Formatter:
def __init__(self):
self.file_extensions = []
self._config_file_name = None
## Adds any arguments to the given argparse.ArgumentParser object if needed.
def add_args(self, argparser):
pass
## The name of the config file used by this formatter - assumed to be at the
# root of the project.
@property
def config_file_name(self):
return self._config_file_name
## Run the formatter on the specified file.
# @param args The arguments parsed by the ArgumentParser
# @param filepath The file to check/format
# @param check If true, run in checkstyle mode and don't modify the file.
# @param calc_diff If true, the second return value of this function is the patch needed to bring the file into compliance
# @return (noncompliant, patch) tuple. @noncompliant is True if the file
# needed/needs formatting and @patch contains a git-formatted patch if @calc_patch is True.
def run(self, args, filepath, check=False, calc_diff=False):
raise NotImplementedError("Subclass of Formatter must override run()")
## Checks if requirements are fullfilled and returns the command to use if they are
# @return None if the required command is not found and the command to use by this formatter if found.
def get_command(self):
return None
## A list of file extensions that this formatter is relevant for. Includes
# the dot.
@property
def file_extensions(self):
return self._file_extensions
@file_extensions.setter
def file_extensions(self, value):
self._file_extensions = value
|
16,070 | 01d59de2b3b90eac0eabe882c4ffc8b63e2e7521 | def remove_smallest(numbers):
return [v for i, v in enumerate(numbers) if i != numbers.index(min(numbers))]
|
16,071 | b0536f628c48254d64e428401bd5a13ca1b07c30 | class KeyValueStorage:
def __init__(self, path):
with open(path, "r") as file:
data = file.read().splitlines()
for line in data:
key, value = line.split("=")
if not key.isidentifier():
raise ValueError("Wrong key!")
if value.isdigit():
value = int(value)
if key not in self.__dict__:
setattr(self, key, value)
def __getitem__(self, key):
return self.__dict__.get(key, None)
if __name__ == "__main__":
storage = KeyValueStorage("task1.txt")
print(storage["name"])
print(storage.song)
print(storage.power)
|
16,072 | 7469061842a49c2b06a0567a9de0c11c02e8b456 | #!/usr/bin/env python3
from sys import argv
a, b, c, = int(argv[1]), int(argv[2]), int(argv[3])
if (a >= b + c) or (b >= a + c) or (c >= b + a):
print("False")
else:
print("True")
|
16,073 | 63662220c3ac36cde6ddab964391e327a5be3ec8 | import json
from bson import json_util
from bson.json_util import dumps
from pymongo import MongoClient
connection = MongoClient('localhost', 27017)
database = connection['market']
collection = database['stocks']
def findDocument(query):
try:
line = "--" * 45
result=collection.find(query).count()
print(line +"\n")
print("Value of Documents: "+str(result)+" Documents")
print(line +"\n")
except ValidationError as ve:
abort(400, str(ve))
def main():
line = "--" * 45
print("\t\t Enter Two Numerical Values Down Below \n");
print(line+"\n")
high = float(raw_input("Enter Highest Values# "))
low = float(raw_input("Enter Lowest Value# "))
myDocument = { "50-Day Simple Moving Average" : {"$gt":high,"$lt":low}}
findDocument(myDocument)
main() |
16,074 | 4ca26eaf039f808879dc44ea500ca94051d4449e | import serial
import time
ser = serial.Serial('/dev/ttyACM0', 9600)
time.sleep(2)
if ser.isOpen():
print "Port Open"
print ser.write('s'.encode())
ser.open()
ser.close()
|
16,075 | 8b415429538cbcf3d4dba60ed4d03cdd29ba44bc | import math
#The following function converts a decimal number into a 'bit'long binary number
def bin(num, bit):
i = 1
s1 = []
a = ""
while(i<=bit):
s1.append("0")
i = i + 1
k = num
rem = 0
if k==0:
return "".join(s1)
i = 0
while(num>0):
rem = num%2
num = num/2
s1[i] = str(rem)
i = i + 1
return ("".join(s1))[::-1]
#The following function starts the QM algorithm, and bifurcates the minterms
def start(minterm, minterm_number, dontcare, dontcare_number, n, bit):
i = 0
j = 1
stage1 = []
for entry in dontcare:
minterm.append(entry)
temp = []
while(i<=bit):
for entry in minterm:
if entry.count('1')==i:
temp.append(entry)
stage1.append(temp)
temp = []
i = i + 1
#print("Stage 1 goes like")
#print(stage1)
return stage1
#The following function, checks the form of 2 highly similar functions
def corelation(s1, s2, bit):
i = 0
count = 0
while(i<bit):
if s1[i]==s2[i]:
count = count + 1
i = i + 1
if (count == (bit-1)):
return True
else:
return False
#The following function converts into the reduced form
def bind(s1, s2, bit):
i = 0
count = 0
temp = ""
while(i<bit):
if s1[i] != s2[i]:
temp = temp + "x"
else:
temp = temp + s1[i]
i = i + 1
return temp
#The following function removes the redundant bit patterns
def redundant(stage2, bit):
for entry in stage2:
i = 0
j = 0
while(i<len(entry)):
while(j!=i):
if(entry[i]==entry[j]):
entry.remove(entry[i])
j = j + 1
i = i + 1
return stage2
#The following function gets the decimal value of the binary coded number
def dec(s1, bit):
i = 1
dec = 0
while(i<=bit):
if(s1[i-1]=='1'):
dec = dec + 2**(bit-i)
i = i + 1
return dec
# The following function expands the required 'x' form into the minterms, for reverse analysis necessary
def simplify(s1,bit):
i = 0
listing = []
count = 0
doc = []
while(i<bit and (s1.count('x')!=0)):
if(s1[i]=='x'):
# print(s1[0:i-1]+"1"+s1[i:])
# print(s1[0:i-1]+"0"+s1[i:])
listing.append(simplify(s1[0:i]+"1"+s1[i+1:],bit))
listing.append(simplify(s1[0:i]+"0"+s1[i+1:],bit))
i = i + 1
if (s1.count('x')==0):
listing.append(s1)
# print(listing)
return s1
for entry in listing:
for subentry in listing:
doc.append(subentry)
return doc[0:2]
def main():
print("Specify the number of variables in the map")
n = input()
minterms = []
dontcare = []
minterm = []
i = 1
k = 0
bit = 0
stage1 = []
print("Enter number of minterms")
minterm_number = input()
print("Enter number of don't care")
dontcare_number = input()
while(i<=minterm_number):
k = input()
if k>bit:
bit = k
minterms.append(k)
i=i+1
bit = int(math.log(bit*1.0,2)+1)
print("No. of bits")
print(bit)
i = 0
while(i<minterm_number):
minterm.append(bin(minterms[i], bit))
i = i + 1
print(minterm)
i = 0
while(i<dontcare_number):
k = input()
dontcare.append(bin(k,bit))
i=i+1
i = 1
print(dontcare)
stage1 = start(minterm, minterm_number, dontcare, dontcare_number, n, bit)
i = 0
j = 1
c_count = 0
overall = 0
stage2 = []
temp = []
unlisted = []
flag = 0
temp = []
temp1 = []
i = 0
j = 0
temp = []
while(flag==0):
i = 0
j = 1
overall = 0
temp = []
kad = 0
subentry_count = 0
temp1 = []
while(i<len(stage1)-1):
temp = []
c_count = 0
for entry in stage1[i]:
subentry_count = 0
for subentry in stage1[j]:
if corelation(entry,subentry, bit)==True:
temp.append(bind(entry, subentry, bit))
temp1.append(entry)
temp1.append(subentry)
overall = overall + 1
kad = 0
stage2.append(temp)
j = j + 1
i = i + 1
if overall==0:
flag = 1
disc = []
for entry in stage1:
for subentry in entry:
disc.append(subentry)
#print("disc is")
#print(disc)
unlisted = list(set(temp1) - set(disc))
print("Unlisted Set")
print(unlisted)
print("Here we end")
print(redundant(stage1, bit))
else:
disc = []
for entry in stage1:
for subentry in entry:
disc.append(subentry)
#print("disc is")
#print(disc)
unlisted = list(set(temp1) - set(disc))
print("Unlisted Set")
print(unlisted)
stage1 = []
stage1 = stage2
stage2 = []
print("Next Stage is")
print(stage1)
print(stage1)
temp = []
print("unlisted is")
print(unlisted)
jadu2 = list(set(unlisted)-set(disc))
print("new unlisted is")
print(jadu2)
prime = []
prep = []
for entry in stage1:
for subentry in entry:
temp = []
prep = []
#print("entry")
#print(subentry)
prep = simplify(subentry, bit)
#print("prep")
#print(prep)
for x in prep:
for y in x:
temp.append(y)
prime.append(temp)
if jadu2:
prime.append(unlisted)
print(prime)
i = 0
j = 0
prime1 = prime
while(i<len(prime)):
print(prime[i])
j = 0
while(j<len(prime[i])):
prime1[i][j] = dec(prime[i][j], bit)
print(prime1[i][j])
j = j + 1
i = i + 1
print(prime1)
i = 0
j = 0
k = 0
count = []
alpha = []
stage_alpha = []
for entry in stage1:
for subentry in entry:
stage_alpha.append(subentry)
#Stage_alpha, is the combination of the final prime implicants (non-redundant)
print("stage_alpha is")
print(stage_alpha)
dup = stage_alpha
flag = 0
essential = []
while(flag==0):
count = []
alpha = []
while(i<len(minterms)):
count.append(0)
alpha.append(0)
i = i +1
i = 0
print(minterms)
# Following is the count-alpha computation
while(i<len(minterms)):
j =0
while(j<len(prime1)):
k = 0
while(k<len(prime1[j])):
#print(prime1[j][k])
#print(minterm[i])
if(prime1[j][k]==minterms[i]):
count[i] = count[i] + 1
alpha[i] = j
k = k + 1
j = j + 1
i = i + 1
print(count)
#count is the number of occurences
print(alpha)
#alpha is the index of occurence
print("prime1")
#prime1 is the listed set of minterms supplied by every prime implicant
print(prime1)
if(count.count(1)==0):
flag = 1
break
i = 0
temp = []
while(i<len(count)):
if(count[i]==1):
print(i)
print("alpha[i]")
print(alpha[i])
print(prime1[alpha[i]])
temp.append(alpha[i])
count[i] = 0
i = i + 1
temp1 = list(set(temp))
#print("temp")
#print(temp1)
for entry in temp1:
#print("entry")
#print(entry)
essential.append(dup[entry])
prime1[entry] = -1
dup[entry] = -1
i = 0
while(i<prime1.count(-1)):
prime1.remove(-1)
dup.remove(-1)
# print("new counts")
# print(count)
# print("essential - presenting")
# print essential
# print("prime1")
# print(prime1)
# print("end essential")
# print(essential)
#Do the essential prime implicants part.
|
16,076 | 7fb9f3b635ca4ba187ae55be9447b3c129c448c0 | from urllib.parse import urlparse
from twisted.internet import reactor
from twisted.names.client import createResolver
from scrapy import Spider, Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from tests.mockserver import MockServer, MockDNSServer
class LocalhostSpider(Spider):
name = "localhost_spider"
def start_requests(self):
yield Request(self.url)
def parse(self, response):
netloc = urlparse(response.url).netloc
self.logger.info("Host: %s" % netloc.split(":")[0])
self.logger.info("Type: %s" % type(response.ip_address))
self.logger.info("IP address: %s" % response.ip_address)
if __name__ == "__main__":
with MockServer() as mock_http_server, MockDNSServer() as mock_dns_server:
port = urlparse(mock_http_server.http_address).port
url = "http://not.a.real.domain:{port}/echo".format(port=port)
servers = [(mock_dns_server.host, mock_dns_server.port)]
reactor.installResolver(createResolver(servers=servers))
configure_logging()
runner = CrawlerRunner()
d = runner.crawl(LocalhostSpider, url=url)
d.addBoth(lambda _: reactor.stop())
reactor.run()
|
16,077 | c207416c5b0708b1198faa1934baa29c2ea82a17 | import random
computer_wins = 0
player_wins = 0
while True:
print("")
user_choice = input("Choose Rock, Paper or Scissors : ")
user_choice = user_choice.lower()
moves = ["rock","paper","scissors"]
comp_choice = random.choice(moves)
print("")
if user_choice == "rock":
if comp_choice == "rock":
print("You chose rock. The computer chose rock. You tied.")
elif comp_choice == "paper":
print("You chose rock. The computer chose paper. You lose.")
computer_wins = computer_wins + 1
elif comp_choice == "scissors":
print("You chose rock. The computer chose scissors. You win.")
player_wins = player_wins + 1
elif user_choice == "paper":
if comp_choice == "rock":
print("You chose paper. The computer chose rock. You win.")
player_wins = player_wins + 1
elif comp_choice == "paper":
print("You chose paper. The computer chose paper. You tied.")
elif comp_choice == "scissors":
print("You chose paper. The computer chose scissors. You lose.")
computer_wins = computer_wins + 1
elif user_choice == "scissors":
if comp_choice == "rock":
print("You chose scissors. The computer chose rock. You lose.")
computer_wins = computer_wins + 1
elif comp_choice == "paper":
print("You chose scissors. The computer chose paper. You win.")
player_wins = player_wins + 1
elif comp_choice == "scissors":
print("You chose scissors. The computer chose scissors. You tied.")
print("")
print("Player wins: " + str(player_wins))
print("Computer wins: " + str(computer_wins))
print("")
user_choice = input("Do you want to play again? (y/n) : ")
if user_choice in ["Y", "y", "yes", "Yes"]:
pass
elif user_choice in ["N", "n", "no", "No"]:
break
else:
break
|
16,078 | 2148b883a656e280e7842465ee4df90fb0b7ef26 | from django.conf.urls import url, include
from django.urls import path
from rest_framework.routers import SimpleRouter
from chat import views
from chat.views import ChatRoomViewSet
from chat.views import ChatMessageViewSet
router = SimpleRouter()
router.register('chatroom', ChatRoomViewSet)
router.register('chat', ChatMessageViewSet)
urlpatterns = [
path('', include(router.urls))
# url(r'^chat/$', views.index, name='index'),
# url(r'^chat/(?P<room_name>[^/]+)/$', views.room, name='room'),
]
|
16,079 | 1f7d678a3b1003cb19ddf1b95fd159364fb847d4 | '''
字符串
'''
s="hello"
# print(len(s))
# print(s[1])
# for i in range(0,len(s)):
# print(s[i]);
# print(s[7])
# print(s[4])
# print(s[-1])
#字符串切片
# line="zhangsan,20"
# name=line[0:8]
# print(name)
# # age=line[9:]
# age=line[9:11]
# print(age)
s="abcde"
print(s[0::2])
|
16,080 | 58135123b7f23c946749e6c7369f5ca4d0c39dfa |
# if conditon for boolean
check = True
if check:
print("true block")
# if else
check = False
if check:
print("True block")
else:
print("False block")
# if condition
number = 5
if number == 5:
print("number is 5")
'''
When we need to check if its a number/string , no need to be check if its exactly equal
Truthy --> Any number except 0 and any string value
Falsy --> 0 and No string
'''
number = 0
if number:
print("Truthy block for number")
number = 0
if not number:
print("Falsy block for 0 ")
name = "Vinay"
if name:
print("Truthy block for string")
# And or OR
number = 5
name = "Vinay"
if number ==5 and name == "Vinay":
print("And Block")
'''
Ternary
condition_if_true if condition else condition_if_false
(if_test_is_false, if_test_is_true)[test]
'''
a=3
b=4
print("Bigger") if a>b else print("Smaller")
size = True
personality = ("Big", "Small")[size]
print("The cat is", personality)
|
16,081 | c6a47dc23e94afe8b8a7156d41018b786c75fbc8 | import os
import luigi
import pptx
from typing import List
from luigi.contrib.external_program import ExternalProgramTask
from datetime import datetime
from dateutil import tz
class PrintDate(luigi.Task):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
return None
def output(self):
filename = self.pptx_filename.replace('.pptx', '_wdate.pptx')
return luigi.LocalTarget(os.path.join(self.workdir, filename))
def run(self):
from_zone = tz.gettz('UTC')
to_zone = tz.tzlocal()
# Convert utc time to local
build_time = datetime.utcnow()
build_time = build_time.replace(tzinfo=from_zone)
build_time = build_time.astimezone(to_zone)
build_timestamp: str = build_time.strftime('%d, %b %Y %H:%M:%S %p')
# Replace the placeholder with the timestamp
slide: int = int(self.pptx_filename.split('.pptx')[0].split('_')[-1])
pst: pptx.Presentation = pptx.Presentation(os.path.join(self.workdir, self.pptx_filename))
first_slide: pptx.slide.Slide = pst.slides[slide]
shapes: List[pptx.shapes.base] = first_slide.shapes
paragraphs = [shape.text_frame for shape in shapes if shape.has_text_frame]
for paragraph in paragraphs:
paragraph.text = paragraph.text.replace('[date]', build_timestamp)
paragraph.text = paragraph.text.replace('[title]', 'Pipelines with Luigi')
paragraph.text = paragraph.text.replace('[author]', 'Alejandro Rodríguez Díaz')
pst.save(self.output().path)
class ExtraProcessing(luigi.Task):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
return PrintDate(self.pptx_filename, self.workdir)
def output(self):
filename = self.pptx_filename.replace('.pptx', '_processed.pptx')
return luigi.LocalTarget(os.path.join(self.workdir, filename))
def run(self):
pst: pptx.Presentation = pptx.Presentation(self.input().path)
pst.save(self.output().path)
class Pptx2Pdf(ExternalProgramTask):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
return ExtraProcessing(self.pptx_filename, self.workdir)
def output(self):
filename = self.pptx_filename.replace('.pptx', '_processed.pptx')
pdf_filename = filename.replace('.pptx', '.pdf')
return luigi.LocalTarget(os.path.join(self.workdir, pdf_filename))
def program_args(self):
filename = self.pptx_filename.replace('.pptx', '_processed.pptx')
pdf_filename = filename.replace('.pptx', '.pdf')
return [
"docker",
"run",
"--rm",
"-v",
f"{self.workdir}:/data",
"seguins/soffice",
"bash",
"-c" ,
"soffice --headless --convert-to pdf:impress_pdf_Export /data/" + \
f"{filename} && cp {pdf_filename} /data"
]
class MergeSlides(ExternalProgramTask):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
target_from_index = lambda i: self.pptx_filename.replace('.pptx', f'_raw_{i}.pptx')
pst: pptx.Presentation = pptx.Presentation(os.path.join(self.workdir, self.pptx_filename))
for i in range(len(pst.slides)):
yield Pptx2Pdf(workdir=self.workdir, pptx_filename=target_from_index(i))
def output(self):
filename = self.pptx_filename.replace('.pptx', f'.pdf')
return luigi.LocalTarget(os.path.join(self.workdir, filename))
def program_args(self):
slides: List[str] = list(f.path for f in self.input())
args: List[str] = ['pdfunite']
args.extend(slides)
args.append(self.output().path)
return args
class ExtractSlides(luigi.Task):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
return None
def output(self):
target_from_index = lambda i: luigi.LocalTarget( \
os.path.join( \
self.workdir,
self.pptx_filename.replace('.pptx', f'_raw_{i}.pptx')))
pst: pptx.Presentation = pptx.Presentation(os.path.join(self.workdir, self.pptx_filename))
return {i:target_from_index(i) for i in range(len(pst.slides))}
def run(self):
pst: pptx.Presentation = pptx.Presentation(os.path.join(self.workdir, self.pptx_filename))
for slide in pst.slides:
slide._element.set('show', '0')
for i in range(len(pst.slides)):
pst.slides[i]._element.set('show', '1')
filename = self.pptx_filename.replace('.pptx', f'_raw_{i}.pptx')
pst.save(os.path.join(self.workdir, filename))
pst.slides[i]._element.set('show', '0')
class Pipeline(luigi.Task):
pptx_filename: str = luigi.Parameter()
workdir: str = luigi.Parameter()
def requires(self):
return ExtractSlides(workdir=self.workdir, pptx_filename=self.pptx_filename)
def output(self):
filename = self.pptx_filename.replace('.pptx', f'.pdf')
return luigi.LocalTarget(os.path.join(self.workdir, filename))
def run(self):
yield MergeSlides(workdir=self.workdir, pptx_filename=self.pptx_filename)
if __name__ == '__main__':
luigi.build([Pipeline(workdir=os.path.abspath('./slides'), pptx_filename='base.pptx')], workers=6)
|
16,082 | 48e52fd64bf177d4431d12c26ba357047d918887 | import sys, random, os
class Monster(object):
exp = 0
level = 1
decay = 0
def __init__(self, name, atk, arm, hp, spec, nextLevel, regen):
self.name = name
self.HP = hp
self.atk = atk
self.armor = armor
self.spec = spec
self.maxHP = hp
self.nextLevel = nextLevel
self.regen = regen
def attack(self, opp):
damage = random.randint(10, 18)
damage += self.atk
opp.defend(damage)
def specAttack(self, opp):
damage = random.randint(3, 25)
damage += self.spec
if damage < 10 or damage > 25:
opp.decay += self.spec / 2
opp.defend(damage)
def defend(self, damage):
damage /= self.armor
hp = self.HP
self.HP = max(0, self.HP - damage)
print("%s lost %s HP!" % (self.name,round( hp - self.HP, 1)))
def heal(self):
heal = random.randint(5, 20)
heal += max(1, self.regen)
hp = self.HP
self.HP = min(self.maxHP, self.HP + heal)
print("%s healed %s HP." % (self.name, round(self.HP - hp, 1)))
def addExp(self, exp):
self.exp += exp
if self.exp >= self.nextLevel:
self.levelUp()
def levelUp(self):
self.level += 1
self.maxHP += 5
self.armor = round(self.armor + 0.1, 1)
self.atk = round(self.atk + 0.1, 1)
print("%s reached level %s" % (self.name, self.level))
print("Stats: ")
if self.level % 4 == 0:
self.evolve()
elif self.level % 6 == 0:
self.regen += 0.5
self.spec += 0.5
self.nextLevel = round(self.nextLevel + self.nextLevel / 2)
print(self.stats())
def evolve(self):
invalidChoice = True
while invalidChoice:
print("1. Greater attack power")
print("2. Stronger armor")
print("3. Poison arrows")
print("4. Health regeneration")
print("5. More Health Points")
powerup = int(raw_input("Choose your powerup: "))
if powerup in range(1,6):
invalidChoice = False
if powerup == 1:
self.atk = 3
elif powerup == 2:
self.armor += 3
elif powerup == 3:
self.spec += 1
elif powerup == 4:
self.regen += 1
elif powerup == 5:
self.maxHP += 5
print()
print(self.stats())
def effects(self):
hp = self.HP
self.HP = max(0, self.HP - self.decay)
dif = self.HP - hp
hp = self.HP
self.HP = min(self.maxHP, self.HP + self.regen)
healdif = self.HP - hp
if self.decay > 10:
print("%s was greatly hurt by poison. (%sHP)" % (self.name, dif))
elif self.decay > 5:
print("%s was hurt by poison. (%sHP)" % (self.name, dif))
elif self.decay > 0:
print("%s is suffering from poison. (%sHP)" % (self.name, dif))
if self.regen > 0 and self.HP != self.maxHP:
print("%s ate some leftovers and was healed (%sHP)" % (self.name, healdif))
def restore(self):
self.decay = 0
self.HP = self.maxHP
def stats(self):
return "--------------------\n HP: %s\n ATK: %s\n DEF: %s\n SPEC: %s\n REGEN: %s\n EXP: %s\%s\n--------------------" % (self.maxHP, self.atk, self.armor, self.spec, self.regen, self.exp, self.nextLevel)
def clear_console():
print("\n\n")
print("%s: %s HP" % (opponent.name, round(opponent.HP, 1)))
print("%s: %s HP Lv. %s" % (player.name, round(player.HP, 1), player.level))
print("--------------------")
def turn():
clear_console()
player.effects()
print("1. Attack")
print("2. Heal")
print("3. Spec. Attack")
command = int(input("Pick a move: "))
if command not in range(1, 4):
turn()
else:
if command == 1:
player.attack(opponent)
elif command == 2:
player.heal()
elif command == 3:
player.specAttack(opponent)
def ai_turn():
opponent.effects()
heal = random.randint(1, 10) >= random.randint(1, 10)
if opponent.HP / opponent.maxHP < 0.3 and heal:
opponent.heal()
elif player.HP > 67 and opponent.spec > 0:
opponent.specAttack(player)
else:
opponent.attack(player)
hp = 100
atk = 1.0
armor = 1.0
spec = 0.0
regen = 0.0
monsters_slain = 0
opponent = Monster("Bobtimus Prime", 1, 1, 100, 0, 0, 0)
name = raw_input("Your name: ")
player = Monster(name, 1.0, 1.0, 125.0, 0.0, 20.0, 0)
while True:
turn()
if opponent.HP == 0:
player.addExp(10)
monsters_slain += 1
print("Opponent #%s slain! Exp. %s\%s" % (monsters_slain, player.exp, player.nextLevel))
if monsters_slain % 5 == 0:
atk += 0.2
armor += 0.2
if monsters_slain % 8 == 0:
hp += 5
if monsters_slain % 10 == 0:
spec += 0.5
regen += 0.5
opponent = Monster("B. Prime", atk, armor, hp, spec, 0, regen)
player.restore()
turn()
ai_turn()
if player.HP == 0:
print("Game Over!")
print("You reached level %s by slaying %s monsters!" % (player.level, monsters_slain))
break
|
16,083 | 579bc998ad5c8725d13c09f6bc240aea057d8788 | import sys, os
import shutil
from castepy import castepy
from castepy import constraint
from castepy import cell
from castepy import calc
from castepy.util import calc_from_path, path
relax_path = path("templates/spectral")
merge_cell = cell.Cell(open(os.path.join(relax_path, "spectral.cell")).read())
def make(source_dir, source_name, target_dir):
cal = calc.CastepCalc(source_dir, source_name)
c = cell.Cell(cal.cell_file)
c.other += merge_cell.other
target_cell = os.path.join(target_dir, "%s.cell" % source_name)
target_param = os.path.join(target_dir, "%s.param" % source_name)
target_sh = os.path.join(target_dir, "%s.sh" % source_name)
shutil.copyfile(os.path.join(relax_path, "spectral.param"), target_param)
shutil.copyfile(os.path.join(relax_path, "spectral.sh"), target_sh)
cell_out = open(target_cell, "w+")
print >>cell_out, str(c)
if __name__ == "__main__":
source_calc = str(sys.argv[1])
source_dir, source_name = calc_from_path(source_calc)
target_dir = str(sys.argv[2])
make(source_dir, source_name, target_dir)
|
16,084 | 936eff3263b72709bc11f873707b1711f2549e37 | ''' Design an algorithm that computes the successor of a node in a binary tree.
Assume that each node stores its parent.
Hint: Study the node's right subtree. What if the node does not have a right subtree?
'''
import unittest
from binary_tree import BinaryTree
def get_node_successor(n):
if n['right']:
n = n['right']
while n['left']:
n = n['left']
else:
while n['parent'] and n != n['parent']['left']:
n = n['parent']
n = n['parent']
return n
class Test_node_successor(unittest.TestCase):
def test_basic_functionality(self):
in_order = [16, 6, 108, -1, -3, 42, 3, 4, -6, 12, 36, 8]
pre_order = [3, 6, 16, -3, -1, 108, 42, 12, 4, -6, 8, 36]
tr = BinaryTree.from_in_and_pre_order_traversal(in_order, pre_order)
tr.populate_parent()
self.assertEquals(get_node_successor(tr.root['left'])['data'], 108)
self.assertEquals(get_node_successor(tr.root['left']['left'])['data'], 6)
self.assertEquals(get_node_successor(tr.root['right']['left'])['data'], -6)
self.assertEquals(get_node_successor(tr.root['right']['right']), None)
self.assertEquals(get_node_successor(tr.root['right']['right']['left'])['data'], 8)
self.assertEquals(get_node_successor(tr.root['left']['right']['left'])['data'], -3)
self.assertEquals(get_node_successor(tr.root['left']['right']['right'])['data'], 3)
if __name__ == '__main__':
unittest.main()
|
16,085 | 6c1cc62baaf73268bd6688b967a6f2b51224991c | import numpy as np
import pandas as pd
# import seaborn as sb
import matplotlib.pyplot as plt
# from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
def analyze_team_stats_season(season):
tbl = pd.read_csv("Season {}_team_stats.csv".format(season))
tbl_standing = pd.read_csv("Season {}_standings.csv".format(season))
column_to_consider = [tbl.columns[0]]
column_to_consider.extend(columns_to_use)
# scaled_tbl = pd.DataFrame(StandardScaler().fit_transform(tbl[tbl.columns[1:-1]]),columns = tbl.columns[1:-1])
# scaled_tbl[tbl.columns[0]]=tbl[tbl.columns[0]]
tbl = tbl[column_to_consider]
joined_tbl = tbl.merge(tbl_standing[['team_id','team_name','points','wins']],on="team_id",how='inner')
return joined_tbl
# cols = [param for param in tbl.columns[2:]]
# corr = tbl[cols].corr()
# plt.matshow(corr)
# plt.xticks(range(len(cols)),cols,rotation=90)
# plt.yticks(range(len(cols)),cols)
# cb = plt.colorbar()
# cb.ax.tick_params(labelsize=14)
def classify_and_predict(training_tbl):
X_poly = poly.fit_transform(training_tbl[columns_to_use])
poly.fit(X_poly,training_tbl['points'])
model = linear_model.LinearRegression()
model.fit(X_poly,training_tbl['points'])
return model
if __name__ == "__main__":
train_data_frame = None
# columns_to_use = ['Team All-outs Conceded','Team DOD Raid Points','Team Successful Raids',\
# 'Team Successful Tackles','Team Super Raid','Team Super Tackles']
columns_to_use = ['Team Average Raid Points','Team Average Tackle Points','Team Avg Points Scored']
poly = PolynomialFeatures(degree=1)
for season in range(1,6):
tbl = analyze_team_stats_season(season)
if train_data_frame is None:
train_data_frame = tbl
else:
train_data_frame.append(tbl)
test_Val = analyze_team_stats_season(7)
# columns_to_use.append("wins")
model = classify_and_predict(train_data_frame)
y_vals = model.predict(poly.fit_transform(test_Val[columns_to_use]))
print([int(val) for val in y_vals])
print(test_Val['points'].tolist())
print(test_Val['team_name'].tolist())
# plt.show() |
16,086 | cf60eb9a0f33939d01cded5df52bf20815bb5fc3 | import numpy as np
np.random.seed(5)
b0 = 2
b1 = 1
N = 100
step = 0.2
mu = 0 # pas de biais
sigma = 10
x = np.random.randn(int(N/step))*5
# x = np.arange(0, N, step)
e = np.random.normal(mu, sigma, int(N/step))
y = b0 + b1*x + e
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(8, 4))
ax.scatter(x, y, alpha=0.5, color='orchid')
fig.suptitle('Example OSL')
fig.tight_layout(pad=2);
ax.grid(True)
fig.savefig('data_osl.png', dpi=125)
import statsmodels.api as sm
# converti en matrice des features
x = sm.add_constant(x) # constant intercept term
# Model: y ~ x + c
model = sm.OLS(y, x)
fitted = model.fit()
x_pred = np.linspace(x.min(), x.max(), 50)
x_pred2 = sm.add_constant(x_pred)
y_pred = fitted.predict(x_pred2)
ax.plot(x_pred, y_pred, '-', color='darkorchid', linewidth=2)
fig.savefig('data_osl_droite.png', dpi=125)
print(fitted.params) # the estimated parameters for the regression line
print(fitted.summary()) # summary statistics for the regression
# Calcul de l intervalle de confiance
y_hat = fitted.predict(x) # x is an array from line 12 above
y_err = y - y_hat
mean_x = x.T[1].mean()
n = len(x)
dof = n - fitted.df_model - 1
from scipy import stats
# cet IC est corrige en fonction de la distribution des donnee
# le plus petit IC se trouve ou la plus forte concentration de donnee
t = stats.t.ppf(1-0.025, df=dof)
s_err = np.sum(np.power(y_err, 2))
conf = t * np.sqrt((s_err/(n-2))*(1.0/n + (np.power((x_pred-mean_x),2) /
((np.sum(np.power(x_pred,2))) - n*(np.power(mean_x,2))))))
upper = y_pred + abs(conf)
lower = y_pred - abs(conf)
ax.fill_between(x_pred, lower, upper, color='#888888', alpha=0.4)
fig.savefig('data_osl_droite_ICt.png', dpi=125)
from statsmodels.sandbox.regression.predstd import wls_prediction_std
sdev, lower, upper = wls_prediction_std(fitted, exog=x_pred2, alpha=0.05)
ax.fill_between(x_pred, lower, upper, color='#888888', alpha=0.1)
fig.savefig('filename4.png', dpi=125)
|
16,087 | 9fe40a62e818cc8eb1890f5fe505014dfa81173e | class MemberStore:
members = []
def get_all(self):
return(MemberStore.members)
def add(self, member):
self.members.append(member)
class PostStore:
posts=[]
def get_all(self):
return (PostStore.posts)
def add(self,post):
self.posts.append(post)
|
16,088 | b7fa0c4f24acdd4140bfff8f4b2afe2c4e09fcf2 | from django.http import HttpResponse
import json
class JSONResponseMixin(object):
def render_to_response(self, context):
"""Returns a JSON response containing 'context' as payload"""
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"""Construct an `HttpResponse` object."""
return HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"""Convert the context dictionary into a JSON object"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return json.dumps(context)
class AllowCORSMixin(object):
def add_access_control_headers(self, response):
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "X-Requested-With, Content-Type"
return response |
16,089 | a378350a741aeae0928b39285306374a19fc8d44 | from selenium import webdriver
from utils import *
PATH = 'C:\Program Files (x86)\chromedriver.exe' # Path to chrome driver
USERNAME = 'adming' # wordpress username
PASSWORD = 'admin' # wordpress password
driver = webdriver.Chrome(PATH)
if __name__ == '__main__':
# Loading my localhost server that contains the neve theme
driver.get('http://localhost/dragos/wp-admin/customize.php?theme=neve&return=http%3A%2F%2Flocalhost%2Fdragos%2Fwp-admin%2Fthemes.php')
# Login to the theme
login_function(USERNAME, PASSWORD, driver)
# Open the container panel
open_container(driver)
# Test container width
test_container_width(driver)
|
16,090 | c33df9d1befde8dae72385d43ce5b185a187da6d | def solution(answers):
tmp = []
cnt = 0
cnt_lst = [0,0,0]
p1 = [1,2,3,4,5] * ((len(answers) // 5)+1) # enumerate 써서 idx%len(p1) 하면 초기화 늘려줄 필요 없음.
p2 = [2,1,2,3,2,4,2,5] * ((len(answers) // 8)+1)
p3 = [3,3,1,1,2,2,4,4,5,5] * ((len(answers) // 10)+1)
for i in answers:
if p1[cnt] == i:
cnt_lst[0] += 1
if p2[cnt] == i:
cnt_lst[1] += 1
if p3[cnt] == i:
cnt_lst[2] += 1
cnt+=1
max_val = max(cnt_lst)
if cnt_lst[0] == cnt_lst[1] == cnt_lst[2]: # enumerate 써서 max값과 val이 같으면 그 때의 idx+1을 결과 lst에 append하면 됨.
return [1,2,3]
elif cnt_lst[0] == cnt_lst[1] and cnt_lst[0] > cnt_lst[2]:
return [1,2]
elif cnt_lst[1] == cnt_lst[2] and cnt_lst[1] > cnt_lst[0]:
return [2,3]
elif cnt_lst[0] == cnt_lst[2] and cnt_lst[2] > cnt_lst[1]:
return [1,3]
else:
if cnt_lst[0] == max_val:
return [1]
elif cnt_lst[1] == max_val:
return [2]
else:
return [3]
answers=[1,2,3,4,5]
print(solution(answers)) |
16,091 | ac3abe2518f838e05c02ddca7fe38794db66ba0f | lst = [1,2,4,3,5]
for x in lst:
if x % 2 == 0:
lst.remove(x)
print(lst)
# create a shallow copy of the list
for x in lst[:]:
if x % 2 == 0:
lst.remove(x)
print(lst)
s = 'beautiful'
for ch in s:
if ch in "aeiou":
s = s.replace(ch, '')
print(s) |
16,092 | 26759fbe839a9c0d5a7436576820f261f8267d5d | from django.urls import path
from . import views
urlpatterns = [
path('get_profile/', views.get_profile, name="get_profile"),
path('logout_profile/', views.logout_profile, name="logout_profile"),
path('show_my_profile/', views.show_my_profile, name="show_my_profile"),
path('update_my_profile/', views.update_my_profile, name="update_my_profile"),
] |
16,093 | e29e3225baf54051c387c22fbcf059708ac116c9 | import os
__author__ = 'huanpc'
from influxdb import InfluxDBClient
import xml.etree.ElementTree as ET
def store_data(xml_data=None):
root = ET.fromstring(xml_data)
ipe_id = root.find('./*[@name="ipeId"]').attrib['val']
app_id = root.find('./*[@name="appId"]').attrib['val']
category = root.find('./*[@name="category"]').attrib['val']
data = int(root.find('./*[@name="data"]').attrib['val'])
unit = root.find('./*[@name="unit"]').attrib['val']
json_body = [
{
"measurement": "sensor_status",
"tags": {
"sensor_id": app_id,
"ipe_id": ipe_id,
"category": category
},
"fields": {
"data": data,
"unit": unit
}
}
]
influxdb_host = 'localhost'
if os.environ.get('INFLUXDB_HOST_NAME'):
influxdb_host = os.environ['INFLUXDB_HOST_NAME']
client = InfluxDBClient(influxdb_host, os.environ.get('INFLUXDB_PORT'), 'root', 'root', 'oneM2M')
client.write_points(json_body)
# result = client.query('select * from sensor_status;')
# print("Result: {0}".format(result))
# if __name__ == '__main__':
# xml_data = '''
# <obj>
# <str val="demo" name="ipeId"/>
# <str val="TEMPERATURE_SENSOR" name="appId"/>
# <str val="temperature" name="category"/>
# <int val="77" name="data"/>
# <str val="celsius" name="unit"/>
# </obj>
# '''
# store_data(xml_data)
|
16,094 | 187413036295bf7a131bfe9d1c159c5cb9b8e070 | import unittest
import ControllerElenco
class TestElenco(unittest.TestCase):
def setUp(self):
ControllerElenco.RemoverTodosElenco()
def test_sem_ator(self):
elencos = ControllerElenco.BuscarTodosElenco()
print (elencos)
self.assertEqual(0,len(elencos))
def test_buscar_ator_filme(self):
ControllerElenco.AdicionarAtor(1,1,1,"coadjuvante")
e = ControllerElenco.BuscarElenco(1)
self.assertEqual(1,e[0])
self.assertEqual(1,e[1])
self.assertEqual(1,e[2])
self.assertEqual("coadjuvante",e[3])
def test_buscar_elenco(self):
ControllerElenco.AdicionarAtor(1,1,1,"coadjuvante")
e = ControllerElenco.BuscarElenco(1)
self.assertEqual(1,e[0])
self.assertEqual(1,e[1])
self.assertEqual(1,e[2])
self.assertEqual("coadjuvante",e[3])
def test_buscar_elenco_filme(self):
ControllerElenco.AdicionarAtor(1,1,1,"coadjuvante")
e = ControllerElenco.BuscarElencoFilme(1)
self.assertEqual(1,e[0])
self.assertEqual(1,e[1])
self.assertEqual(1,e[2])
self.assertEqual("coadjuvante",e[3])
def test_remover_elenco(self):
ControllerElenco.AdicionarAtor(1,1,1,"Coadjuvante")
ControllerElenco.RemoverElenco(1)
e = ControllerElenco.BuscarElenco(1)
self.assertIsNone(e)
def test_remover_todos_elenco(self):
ControllerElenco.AdicionarAtor(1,1,1,"Coadjuvante")
ControllerElenco.AdicionarAtor(1,1,1,"Principal")
e = ControllerElenco.RemoverTodosElenco()
self.assertEqual([],e)
def test_iniciar_elenco(self):
ControllerElenco.IniciarElenco()
e = ControllerElenco.BuscarTodosElenco()
self.assertEqual(2, len(e))
if __name__ == '__main__':
unittest.main(exit=False)
|
16,095 | 24498da6c33aa995c22b22debc289136866b0939 | # Ref: https://leetcode.com/problems/stickers-to-spell-word/discuss/108318/C%2B%2BJavaPython-DP-%2B-Memoization-with-optimization-29-ms-(C%2B%2B)
class Solution(object):
def minStickers(self, stickers, target):
num_sticker = len(stickers)
s_cnt = [collections.Counter(s) for s in stickers]
memo = {}
memo[""] = 0
def helper(target):
if target not in memo:
t_cnt = collections.Counter(target)
ans = float('inf')
for i in range(num_sticker):
if s_cnt[i][target[0]] == 0:
continue
s = "".join([c * (n - s_cnt[i][c]) for c, n in t_cnt.items() if n > s_cnt[i][c]])
tmp = helper(s)
if tmp != -1:
ans = min(ans, 1 + tmp)
memo[target] = ans if ans < float('inf') else -1
return memo[target]
return helper("".join(sorted(target)))
|
16,096 | 841b1962acc5208728c6d61e272b646adc1fbe17 | import tkinter as tk
app_state = {"counter": 0}
window = tk.Tk()
hello_label = tk.Label(master=window, text="Hello!")
hello_label.config(text="Hi!")
hello_label.pack()
message_label = tk.Label(master=window, text="")
message_label.pack()
def display_message():
message_label.config(text="Hello!")
hello_button = tk.Button(master=window, text="Say Hello!", command=display_message)
hello_button.pack()
counter_label = tk.Label(master=window, text=str(app_state["counter"]))
counter_label.pack()
def increment_count():
app_state["counter"] = app_state["counter"] + 1
counter_label.config(text=str(app_state["counter"]))
counter_button = tk.Button(master=window, text="increment", command=increment_count)
counter_button.pack()
window.mainloop()
|
16,097 | 43d0538b3b53bd4c9c86134b7d7d392684c8aff8 | # セミコロンは要らない
x = 1
y = 2
# 1行は80文字以内
x = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
# 変数が長くなりそうなときは2行にする
def test_func(x, y, z,
foperfjesoigntdrhoydjthktypgaejrpesotgmprsdmponbkf='test'):
"""
:param x:
:param y:
:param z:
:param foperfjesoigntdrhoydjthktypgaejrpesotgmprsdmponbkf:
:return:
URLは長くても1行で書いたほうが良い
See details at:http://naoyaabe.com/fijorwafubesgittorujgipeagjjjjjjrrrrrrrrrrrrrrrepoagrkdot
"""
# 条件が一つの場合は無駄な()を書かない
# if (x and y):
if x and y:
print('exists')
# インデントは4つ!
x = {'test': 'sss'}
# 代入は = の両端にスペースを入れる *関数宣言の引数はスペースいらない
x = y
# わかりやすいstrの書き方
word = 'hello'
word2 = '!'
# bad
new_word = '{}{}'.format(word, word2)
# good
new_word = word + word2
# forループで長い文字列を生成する際
# bad
long_word = ""
for word in ['fmwaempr', 'fmaopgmeprs', 'fmafmeosp']:
long_word += "{}fwejaifoera".format(word)
# good メモリの使用量が少ない
long_word = []
for word in ['fmwaempr', 'fmaopgmeprs', 'fmafmeosp']:
long_word.append("{}fjeior".format(word))
new_long_word = ''.join(long_word)
# 文字列 ''と""は会社のルールに合わせる
print('frmwaforp')
print("fwakofker")
# if文は2行
if x:
print('exit')
else:
print('else')
|
16,098 | a7a5553f5fa371a0a30635d5a53dc4f8ba1aa4f8 | #Equal dict or not
d={1:'abc',2:'def'}
d1={1:'abc',2:'def'}
print(d==d1) |
16,099 | 3ffc9bab51adf22e9582b310da7384f501cd7f69 | #Solution Code to CS50 Ai course Tic tac toe's problem by Alberto Pascal Garza
#albertopascalgarza@gmail.com
"""
Tic Tac Toe Player
"""
import math
import copy
from random import randint
X = "X"
O = "O"
EMPTY = None
def initial_state():
"""
Returns starting state of the board.
"""
return [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
def player(board):
"""
Returns player who has the next turn on a board.
"""
X_count = 0
O_count = 0
#to determine the turn, I will make a count of the X and O tokens on the board
for row in board:
#I create a dictionary with the count on each row
player_turns = {i: row.count(i) for i in row}
#I check if I have X and O tokens in the row, if not, create an entry with 0
if not (player_turns.get("X")):
player_turns['X'] = 0
if not player_turns.get("O"):
player_turns['O'] = 0
#I add to my counter the total amount of tokens found for each player in this row
X_count = X_count + int(player_turns['X'])
O_count = O_count + int(player_turns['O'])
#if X has the same amount of tokens than O, it means it is X's turn
if(X_count == O_count):
#It should be X's turn.
return "X"
#Otherwise, it is O's turn.
elif(X_count>O_count):
#it is O's turn.
return "O"
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
actions = set()
for row in range (0,len(board)):
#these are the rows on my board
for col in range (0,len(board[row])):
#these are the columns on my board
if(board[row][col] == EMPTY):
#for each position, I check if it is empty. If it is, it is a possible spot for me to move next.
actions.add((row,col))
if len(actions)> 0:
#if I have at least one possible action, I return them
return actions
else:
#otherwise, I return EMPTY because there are no more possible actions
return EMPTY
def result(board, action):
"""
Returns the board that results from making move (i, j) on the board.
"""
#we start by creating a deep copy of me board for me not to modify the original
new_board = copy.deepcopy(board)
#I get the player's turn in the current board.
action_token = player(new_board)
#If I the corresponding spot on my board is available
if (new_board[action[0]][action[1]] == EMPTY):
#then I will make that move with the current player
new_board[action[0]][action[1]] = action_token
return new_board
else:
#else, I raise a not a valid action error because the place is already taken or does not exist.
raise Exception('Not a valid action')
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
#To determine the winner, I need to know the board's final value.
token_value = utility(board)
#if it's 1, X won. If it's -1, O won. Else, it was a tie.
if(token_value == 1):
return 'X'
elif(token_value == -1):
return 'O'
else:
return None
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
#I need to check if any won or if I don't have any spaces left
game_ended = False
found_empty = False
#first of all, I check if I have empties.
for row in board:
if EMPTY in row:
#if I do have empties, I flag it. It is likely that I have not finished yet.
found_empty = True
#we check on the rows and columns for a winner
for i in range(0,3):
if (board[i][0] == board[i][1] and board[i][0] == board[i][2] and (board[i][0] is not EMPTY)) or (board[0][i] == board[1][i] and board[0][i] == board[2][i] and (board[0][i] is not EMPTY)):
game_ended = True
#we flag the game as ended if there is a winner and break the loop.
break
else:
#otherwise, we state that the game has no winners yet
game_ended = False
#If my game has no vertical or horizontal winners, I still have to check the diagonals.
if not game_ended:
#there were no horizontal nor vertical wins. I need to check diagonals
if (((board[0][0] == board[1][1] and board[2][2] == board[0][0]) or (board[0][2] == board[1][1] and board[2][0] == board[0][2]) ) and (board[1][1] is not EMPTY)):
game_ended = True
#Finally, if I found and empty and my game has no winners yet, it means I can keep playing
if found_empty and not game_ended:
return False
else:
#otherwise, I have a winner and the game ended either due to a winner or a tie.
return True
def utility(board):
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
game_winner = ""
#I will analyze every row first
for i in range(0,3):
#I check vertically and horizontally if the tokens are the same, meaning any of the two players has 3 in a row.
if (board[i][0] == board[i][1] and board[i][0] == board[i][2] and (board[i][0] is not EMPTY)):
#if I find a match vertically, I determine there was a winner and break the for cycle.
game_winner = board[i][0]
break
elif (board[0][i] == board[1][i] and board[0][i] == board[2][i] and (board[0][i] is not EMPTY)):
#if there is a match horizontally, I determine there was a winner and break the for cycle.
game_winner = board[0][i]
break
#checking diagonals in case there were no winners neither vertically nor horizontally.
if ((board[0][0] == board[1][1] and board[2][2] == board[0][0]) or (board[0][2] == board[1][1] and board[2][0] == board[0][2])) and (board[1][1] is not EMPTY):
game_winner = board[1][1]
#depending on my winning token, I will determine the value I should print.
if game_winner == "X":
return 1
elif game_winner == "O":
return -1
#Since we are assuming we will only receive terminal boards, if no winner was found, we have a tie and should return 0.
else:
return 0
def Max_Value(board):
#I need to evaluate all of the possible options of actions for the board until I find the "max possible result"
if(terminal(board)):
#If my board is a terminal board, my value can only be the utility.
return utility(board)
v = float('-inf')
#otherwise, I will iterate amongst its actions, alternating on turns to see if I should get max or min values
for action in actions(board):
new_board = result(board, action)
score = Min_Value(new_board)
#I will store my maximum possible value amongst all of these "possible futures"
v = max(v,score)
return v
def Min_Value(board):
#similar to max value, it will look for all of the possible actions until i find the one I find as "min possible result"
if(terminal(board)):
#if my board was terminal, I return my utility
return utility(board)
v = float('inf')
#otherwise, I iterate on actions alternating turns
for action in actions(board):
new_board = result(board, action)
score = Max_Value(new_board)
#this time I will store the lowest value possible since I am O.
v = min(v,score)
return v
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
#This function will return the best move.
#If Ai is playing as X, I can reduce the processing time by creating a random first move.
if (board == initial_state()):
coord1 = randint(0,2)
coord2 = randint(0,2)
return ((coord1,coord2))
#first I determine which player's turn it is
player_to_move = player(board)
best_action = None
#If I am X
if(player_to_move == "X"):
current_max = float('-inf')
#for every possible action I have right now, I'll call my "future" Min_Value since I will asume what will happen if I take this move.
for action in actions(board):
#peak on the future if I take that move
curr_score = Min_Value(result(board,action))
#if my future is favorable, I will store it as my current best option.
if curr_score>= current_max:
current_max = curr_score
best_action = action
else:
#If I am O, I do something similar.
current_max = float('inf')
#for every action I peak on the future for favorable results
for action in actions(board):
#this time, however, it would be X's turn so I need to start with Max_Value
curr_score = Max_Value(result(board,action))
#if my future is favorable, I store it
if curr_score<= current_max:
current_max = curr_score
best_action = action
#I return the best move.
return best_action
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.