text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from PIL import Image,ImageDraw,ImageFont,ImageFilter
from discord.ext import commands
from operator import itemgetter
from unidecode import unidecode
from random import randint
from .utils import utils
import traceback
import datetime
import aiohttp
import logging
import discord
import asyncio
import re
import io
log = logging.getLogger(__name__)
def is_cooldown(msg):
redis = utils.redis
config = redis.get("{}:Level:{}:rank:check".format(msg.message.guild.id,msg.message.author.id))
return not(bool(config))
class Level(commands.Cog):
"""
A level plugins, gain exp when talking.
"""
def __init__(self,bot):
self.bot = bot
self.redis = bot.db.redis
self.bot.say_edit = bot.say
self.column = utils.secret["column"] #U mad bro?
self.bg = utils.Background("level",60,30,self.level_reward,log)
self.bot.background.update({"level_reward":self.bg})
self.bg.start()
def cog_unload(self):
self.bg.stop()
def cog_check(self,ctx):
return utils.is_enable(ctx,"level")
#Those will set expire when member leave guild, to get new "space", they have 2 weeks to return, other wise, level data of their will be lost.
@commands.Cog.listener()
async def on_member_remove(self,member):
await self.redis.srem("{}:Level:Player".format(member.guild.id),member.id)
await self.redis.expire("{}:Level:Player:{}".format(member.guild.id,member.id),1209600)#setting expire dated for member, will last for 2 weeks, if there is change, it will stop expire, aka join back in guild
@commands.Cog.listener()
async def on_member_join(self,member):
if await self.redis.exists("{}:Level:Player:{}".format(member.guild.id,member.id)):
await self.redis.persist("{}:Level:Player:{}".format(member.guild.id,member.id))
@commands.Cog.listener()
async def on_member_update(self,before,after):
if before.display_name != after.display_name:
await self.redis.hset("{}:Level:Player:{}".format(after.guild.id,after.id),"Name",after.display_name)
async def is_ban(self,member):
if isinstance(member,discord.Member):
is_ban_member = await self.redis.smembers("{}:Level:banned_members".format(member.guild.id))
is_ban_role = await self.redis.smembers("{}:Level:banned_roles".format(member.guild.id))
if str(member.id) in is_ban_member:
return True
for role in member.roles:
if str(role.id) in is_ban_role:
return True
return False
async def on_message_global(self,msg,xp):
"""
I should die/feel shame for this.
"""
if await self.redis.get("Info:Level:Global_cooldown:{}".format(msg.author.id)): # If it true, return, it haven't cool down yet
return
# Setting cooldown, in case somthing happen, it wont increase xp twice while it still affect some reason, such as data slow.
await self.redis.set("Info:Level:Global_cooldown:{}".format(msg.author.id), 'cooldown', expire=60)
# If Cooldown expire, Add xp and stuff
check_exist = msg.author.id in await self.redis.smembers("Info:Level:Player") # Call of name and ID to get boolean
if check_exist is False: # if it False, then it will update a new list for player who wasn't in level record
await self.redis.sadd("Info:Level:Player",msg.author.id)
await self.redis.hincrby("Info:Level:Player_Total_XP",msg.author.id,increment = xp)
return
@commands.Cog.listener()
async def on_message(self,msg): #waiting for player reply
if msg.author == self.bot.user or isinstance(msg.channel,discord.DMChannel) or msg.author.bot:
return
if int(msg.author.discriminator) == 0000:
return #Webhook
log.debug("Got message from user: {0} - {0.id}".format(msg.author))
if await self.redis.hget("{}:Config:Cogs".format(msg.guild.id),"level") == "on":
log.debug("Enable level")
xp = randint(5,10)
await self.on_message_global(msg,xp)
#first we check if it blacklist channel, otherwise we will check members, if not blacklist we can give exp to them.
if str(msg.channel.id) in await self.redis.smembers("{}:Level:banned_channels".format(msg.guild.id)): #a banned channel
return
if await self.is_ban(msg.author) is True:
return
#Getting ID
player = msg.author.id
guild = msg.guild.id
await self.redis.sadd("{}:Level:Player".format(guild),player)
name = "{}:Level:Player:{}".format(guild,player) #future references for easier to use
check_id = await self.redis.hget(name,"ID")
if check_id is None: #some reason ID wasnt found and hence didn't show in table
await self.redis.hset(name,"ID",player)
await self.redis.hincrby(name,"Total Message Count",increment = 1)
if await self.redis.get("{}:Level:{}:xp:check".format(guild,player)):#If it true, return, it haven't cool down yet
return
#Setting cooldown, in case something happen, it wont increase xp twice while it still affect some reason, such as data being slow.
await self.redis.set("{}:Level:{}:xp:check".format(guild,player),'cooldown',expire = 60)
#If Cooldown expire, Add xp and stuff
await self.redis.sadd("{}:Level:Player".format(guild),player)
total_xp = await self.redis.hincrby(name,"Total_XP",increment = xp)
await self.redis.hincrby(name,"Message Count",increment = 1)
level,remain_xp,next_xp = self.next_Level(total_xp)
log.debug("Level:{} next xp: {} total xp:{}".format(level,next_xp,total_xp))
lvl_db = await self.redis.hget(name,"lvl")
if lvl_db is None:
return await self.redis.hset(name,"lvl",0)
elif level != int(lvl_db):
await self.redis.hset(name, "lvl", level)
utils.prCyan("{} - {} - {} ({}) Level up!".format(msg.guild.name,guild,msg.author,player))
announce = await self.redis.hgetall("{}:Level:Config".format(guild))
if announce.get("announce") == "on":
try:
if announce.get("whisper") == "on":
await msg.author.send(announce["announce_message"].format(player = msg.author.display_name,level = level))
else:
await msg.channel.send(announce["announce_message"].format(player = msg.author.display_name,level = level))
except discord.Forbidden:
pass #unable to send message be it perm or user block bot.
def next_Level(self,xp,lvl=0):
f = 2*(lvl**2)+20*(lvl)+100
if xp >= f:
return self.next_Level(xp-f,lvl+1)
return lvl,xp,f
async def level_reward(self):
try:
for guild in list(self.bot.guilds):
log.debug(guild)
if await self.redis.hget("{}:Config:Cogs".format(guild.id),"level") in (None,"off"):
log.debug("level reward disable")
continue
if guild.me.top_role.permissions.manage_roles: #if got Manage roles permission, can grant roles
log.debug("Got manage roles permissions")
raw_data = await self.redis.hgetall("{}:Level:role_reward".format(guild.id))
raw_member = [int(x) for x in await self.redis.smembers("{}:Level:Player".format(guild.id))] #Rewrite, ID Str -> Int
guild_roles = guild.roles
for member in guild.members:
if member.id not in raw_member:
continue
member_role = [x.id for x in member.roles]
member_level = self.next_Level(int(await self.redis.hget("{}:Level:Player:{}".format(guild.id,member.id),"Total_XP")))[0] #return first index, which is level
remove_role = []
add_role = []
for role_id, role_level in raw_data.items():
role_level = int(role_level)
role_id = int(role_id)
if role_level == 0:
continue
if role_id in member_role:
if role_level > member_level:#if change role, and no more grant for that, remove it
log.debug("role_level is bigger than member_level")
temp = [x for x in guild_roles if x.id == role_id]
if temp:
remove_role.append(temp[0])
elif role_id not in member_role:
if member_level >= role_level:
log.debug("role_level is less than member_level, so adding it")
temp = [x for x in guild_roles if x.id == role_id]
if temp:
add_role.append(temp[0])
if remove_role or add_role:
log.debug(member)
log.debug("checking if nure can add roles to member")
if guild.me.top_role > member.top_role:
if remove_role:
log.debug("removing it")
await member.remove_roles(*remove_role, reason = "Unable to meet Role level condition requirement , current level:{}".format(member_level))
await asyncio.sleep(1)
elif add_role:
log.debug("adding it")
await member.add_roles(*add_role,reason = "Role reward for reaching level {}".format(member_level))
except asyncio.CancelledError:
return utils.prRed("Asyncio Cancelled Error")
except Exception as e:
utils.prRed(e)
utils.prRed(traceback.format_exc())
#########################################################################
# _____ _ #
# / ____| | | #
# | | ___ _ __ ___ _ __ ___ __ _ _ __ __| | #
# | | / _ \ | '_ ` _ \ | '_ ` _ \ / _` | | '_ \ / _` | #
# | |____ | (_) | | | | | | | | | | | | | | (_| | | | | | | (_| | #
# \_____| \___/ |_| |_| |_| |_| |_| |_| \__,_| |_| |_| \__,_| #
# #
#########################################################################
@commands.group(name="levels",aliases=["level","leaderboard"],brief="Prints a link of the guild's leaderboard",pass_context=True,invoke_without_command=True)
async def level_link(self,ctx):
await self.bot.say(ctx, content = "Check this out!\nhttp://nurevam.site/level/{}".format(ctx.message.guild.id))
@level_link.command(name="guild",brief="Prints a link of the guild leaderboard",pass_context=True)
async def guild_level_link(self,ctx):
await self.bot.say(ctx, content = "Check this out!\nhttp://nurevam.site/level/{}".format(ctx.message.guild.id))
def rank_embed(self,player,level,remain_xp,next_xp,rank,total_rank,description=""):
embed = discord.Embed(description=description)
embed.set_author(name=str(player),icon_url=player.avatar_url)
embed.add_field(name = "Level",value=str(level))
embed.add_field(name = "EXP",value="{}/{}".format(remain_xp,next_xp))
embed.add_field(name = "Rank",value="{}/{}".format(rank,total_rank))
if player.colour.value:
embed.colour = player.color
return embed
@commands.group(brief="Prints your rank",pass_context=True,invoke_without_command=True)
@commands.check(is_cooldown)
async def rank(self, ctx,member:discord.Member = None):
"""
Prints out of your rank,
<prefix> rank
will print out of your rank
unless you did <prefix> rank @mention
which will show someone's rank.
"""
guild = ctx.message.guild.id
player = member or ctx.message.author #if member is None, then it mean it is player self
if await self.is_ban(player): #checking if user are banned or not
if player.id == ctx.message.author.id: #checking if it same ID then that person is banned
return await self.bot.say(ctx,content = "I am sorry, but you are banned. In case this is a mistake, please informate the guild owner")
else:
return await self.bot.say(ctx,content = "I am sorry, but {0.display_name} is banned.".format(player))
#getting data while checking if it exists or not.
player_data = await self.redis.hgetall("{}:Level:Player:{}".format(ctx.message.guild.id, player.id))
utils.prGreen("Player_data show {}".format(player_data))
if player_data is False: #checking if user are in database
if player.id != ctx.message.author.id: #if it mention
return await self.bot.say(ctx,content = "{} doesn't seem to be ranked yet. Tell that person to talk more!".format(player.display_name))
else: #if it just player self
return await self.bot.say(ctx,content = "I am sorry, but you don't seem to be ranked yet! Talk more!")
#Getting rank places
#it get all thing, then put them in order(which is reversed) then get player's rank positions
data = await self.redis.sort("{}:Level:Player".format(guild),by="{}:Level:Player:*->Total_XP".format(guild),offset = 0,count = -1)
data = list(reversed(data))
try:
player_rank = data.index(str(player.id))+1
except:
return await self.bot.say(ctx,content = "There is problem with this, maybe you haven't got exp until now. Try chat for a few minutes then try again.")
player_data = await self.redis.hgetall("{}:Level:Player:{}".format(ctx.message.guild.id, player.id))
level,remain_xp,next_xp = self.next_Level(int(player_data["Total_XP"]))
#then make embed of it.
embed = self.rank_embed(player,level,remain_xp,next_xp,player_rank,len(data))
await self.bot.say(ctx,embed=embed)
cooldown = await self.redis.hget("{}:Level:Config".format(guild),"rank_cooldown")
if cooldown is None or int(cooldown) == 0: #Checking guild's setting for cooldown, if not found, return. if it zero, then still return
return
await self.redis.set("{}:Level:{}:rank:check".format(guild, ctx.message.author.id), 'cooldown', expire=int(cooldown))
@rank.command(name = "global",brief="Prints your global rank",pass_context=True)
async def global_rank(self,ctx,member:discord.Member = None):
"""
Print out global rank, meaning overall guild that share guild we were in.
<prefix> global rank
will print out your own rank, unless you did <prefix> global rank @mention
which will show his/her global rank.
Note:
It is may not be accurate at this moment(rank positions),
"""
player = member or ctx.message.author #if member is None, then it mean it is player self
data = await self.redis.hgetall("Info:Level:Player_Total_XP")
total_exp = data.get(str(player.id))
if total_exp is None:
return await ctx.send(content = "I am sorry, but there seem to be problem with this")
# current_exp = await self.redis.hget("Info:Level:Player_Current_XP",player.id)
level,remain_xp,next_xp = self.next_Level(int(total_exp))
rank_data = sorted(data.values(),key = int,reverse = True) #getting values of dict instead then sort it and make it from highest to lowest
try:
rank = rank_data.index(total_exp) + 1
except:
return self.bot.say(ctx,"There is problem with this, maybe you haven't got exp until now. Try chat for a few minutes then try again.")
embed = self.rank_embed(player,level,remain_xp,next_xp,rank,len(rank_data),description="Global Rank")
await self.bot.say(ctx,embed=embed)
async def table(self,ctx,current_page,guild=None,description = ""):
theme_setting = None
#cache it there early, so we don't have to repeat it called when user want to go to next current_page
#if there is guild, then it is not global
if guild:
log.debug("Guild requests")
full_data = list(reversed(await self.redis.sort("{}:Level:Player".format(guild.id),
"{}:Level:Player:*->ID".format(guild.id),
"{}:Level:Player:*->Total_XP".format(guild.id),by = "{}:Level:Player:*->Total_XP".format(guild.id),offset = 0,count = -1)))
log.debug(full_data)
theme_setting = await self.redis.get("{}:Level:pic".format(guild.id))
log.debug("The theme setting is {}".format(theme_setting))
else: #global
log.debug("Global requests")
temp_id = await self.redis.smembers("Info:Level:Player")
temp_total = await self.redis.hgetall("Info:Level:Player_Total_XP")
data = sorted([(int(temp_total[x]),x) for x in temp_id],key = itemgetter(0),reverse=True)
full_data = [str(main) for x in data for main in x]
log.debug(full_data)
#making 2D from 1D, full data contain total_xp,id pattern. so [ [total_xp1,id1][total_xp2,id2]...]
full_data = [full_data[x:x+2] for x in range(0,len(full_data),2)]
def proper_page(datax,n):
#We are setting rank where user want to see 10 range. 1-10, 11-20 etc
temp = datax[1*(10*(n-1)):10*n]
temp = [x for x in temp if None not in x] #making sure there is no None.
if not(temp):
return proper_page(datax,n-1)
return temp,n
full_data,page = proper_page(full_data,current_page)
#setting up picture data
log.debug("Going to make picture.")
pic_data = [["Rank", "User", "Level", "EXP","Total EXP"]]
for index,(total_exp,user_id) in enumerate(full_data,start = 1*(10*(page-1)) + 1):
temp = []
if guild:
name = guild.get_member(int(user_id))
log.debug("under guild and member name is {} ||| {}".format(name, user_id))
if name is None: # assuming player left server and Nure didn't knew he/she have left
log.debug("Removing member's ID from data")
await self.redis.srem("{}:Level:Player".format(guild.id), user_id)
continue
else:
name = name.display_name
else:
name = self.bot.get_user(int(user_id))
name = name.name if name is not None else "???"
level, remain_xp, next_exp = self.next_Level(int(total_exp))
exp = "{} / {}".format(remain_xp, next_exp)
#now we will store into pic_data in order - Rank, User, Level, EXP, Total EXP
temp.append(str(index))
temp.append(unidecode(name[:19]))
temp.append(str(level))
temp.append(exp)
temp.append(total_exp)
pic_data.append(temp)
if theme_setting is None and guild: #I know I know, Shame on me for doing this way.
await ctx.send("I just want to info you that embed are replaced with pictures. "
"To remove this message, please go to <https://nurevam.site/level/theme/{}> (admin only) or access from Level plugin dashboard "
"once you visit there, select \"Enable picture theme\", change some setting if desire, then update it.".format(ctx.message.guild.id))
return await self.theme_table(ctx,pic_data,is_global = not(guild))
@commands.group(name = "table",brief = "Prints the top 10 of the leaderbord",pass_context = True,invoke_without_command = True)
async def rank_table(self, ctx,page = 1 ):
return await self.table(ctx,page,guild = ctx.message.guild)
@rank_table.command(name = "global",brief = "Prints the top 10 of the leaderboard global",pass_context = True)
async def global_table(self,ctx,page = 1):
return await self.table(ctx,page,description="Global Rank Leaderboard")
async def theme_table(self,ctx,raw_data,is_global = False):
"""
Most of this code are credit to XCang, for done most math.
"""
img = Image.new("RGBA", (1000,1000), color=(0, 0, 0, 0))
fnt = ImageFont.truetype('WhitneyBook.ttf', 12)
fntb = ImageFont.truetype('WhitneySemiBold.ttf', 12)
draw = ImageDraw.Draw(img)
color_setting = await self.redis.hgetall("{}:Level:color".format(ctx.message.guild.id))
#setting color setting
border = tuple((int(x) for x in color_setting.get("border",("255,255,255,96")).split(",")))
row = tuple((int(x) for x in color_setting.get("row",("255,255,255,48")).split(",")))
if is_global:
text = (255,255,255)
outlier = (0,0,0)
else:
text = tuple((int(x) for x in color_setting.get("text",("255,255,255")).split(",")))
outlier = tuple((int(x) for x in color_setting.get("outlier",("0,0,0")).split(",")))
m = [0] * len(raw_data[0])
for i, el in enumerate(raw_data):
for j, e in enumerate(el):
# if i == 0:
# wdth, hght = draw.textsize(e, font=fntb)
# if wdth > m[j]: m[j] = wdth
# else:
wdth, hght = draw.textsize(e, font=fnt)
if wdth > m[j]: m[j] = wdth
crop_width,crop_height = (10 + sum(m[:]) + 8 * len(m), 10 + 18 * len(raw_data) + 7)
setting = await self.redis.hgetall("{}:Level:pic_setting".format(ctx.message.guild.id))
if is_global:
pic_data = "http://www.solidbackgrounds.com/images/2560x1440/2560x1440-black-solid-color-background.jpg"
else:
pic_data = await self.redis.hget("{}:Level:Config".format(ctx.message.guild.id), "pic")
if pic_data:
async with aiohttp.ClientSession() as session:
async with session.get(pic_data) as resp:
pic = Image.open(io.BytesIO(await resp.read())) #read pic and save it to memory then declare new object called im (Image)
aspectratio = pic.width / pic.height
pic = pic.resize((crop_width,int(crop_width / aspectratio)),Image.ANTIALIAS)
pic = pic.crop(box = (0,int((pic.height-crop_height)/2),crop_width,int(crop_height+(pic.height-crop_height)/2)))
pic = pic.convert('RGB') #Just in case if pic is png or palette
if setting.get("blur") == "on":
pic = pic.filter(ImageFilter.BLUR)
img.paste(pic)
#adding text to picture
"""
Runs enumerate twice as list is 2D
It will take size of text and then return width and height
then check if statement, for first run, which is first row (rank,user,level,exp,total exp)
once i is not 0 anymore, It will run second statement which we can assume after first rows
Those math are done to taken positions of putting text in
draw.text(...)x4 for outlier then last one for overwrite and put white
so it can be look like white text with black outlier
"""
for i, el in enumerate(raw_data):
for j, txt in enumerate(el):
wdth, hght = draw.textsize(txt, font=fntb)
font = fntb
if i == 0:
if j == 0:
w,h = (int(10 + (m[j] - wdth) / 2), 10)
else:
w,h= (int(10 + sum(m[:j]) + (m[j] - wdth) / 2 + 8 * j), 10)
else:
if j == 0:
w,h = (int(10 + (m[j] - wdth) / 2), 10 + 18 * i + 5)
else:
font = fnt
wdth, hght = draw.textsize(txt, font=fnt)
w,h= (int(10 + sum(m[:j]) + (m[j] - wdth) / 2 + 8 * j), 10 + 18 * i + 5)
if setting.get("outlier") == "on": # Text Outline
draw.text((w - 1, h), txt, font=font,fill=outlier)
draw.text((w + 1, h), txt, font=font,fill=outlier)
draw.text((w, h - 1), txt, font=font,fill=outlier)
draw.text((w, h + 1), txt, font=font,fill=outlier)
draw.text((w, h), txt, font=font,fill = text) #The main text
del draw
#making pic crop
img = img.crop(box=(0, 0,crop_width,crop_height))
utils.prCyan(setting)
draw = ImageDraw.Draw(img)
if setting.get("border") == "on":
#border area
draw.line((5, 5, 5, img.size[1] - 5), fill=border, width=2)
draw.line((5, 5, img.size[0] - 5, 5), fill=border, width=2)
draw.line((5, img.size[1] - 5, img.size[0] - 4, img.size[1] - 5), fill=border, width=2)
draw.line((img.size[0] - 5, 5, img.size[0] - 5, img.size[1] - 5), fill=border, width=2)
if setting.get("row") == "on":
#row/column lines
for i in range(1, len(m)):
draw.line((int(5 + sum(m[:i]) + 8 * i), 7, int(5 + sum(m[:i]) + 8 * i), img.size[1] - 5),fill=row, width=1)
for i in range(1, len(raw_data)):
if i == 1:
draw.line((7, 7 + 18 * i + 2, img.size[0] - 5, 7 + 18 * i + 2), fill=row, width=2)
else:
draw.line((7, 7 + 18 * i + 7, img.size[0] - 5, 7 + 18 * i + 7), fill=row, width=1)
del draw
fp = io.BytesIO()
img.save(fp, format='PNG')
fp.seek(0)
fp = discord.File(fp,filename="top10.png")
await ctx.send(file=fp)
def setup(bot):
bot.add_cog(Level(bot))
|
Maverun/Nurevam
|
Bot/cogs/level.py
|
Python
|
mit
| 26,750
|
[
"VisIt"
] |
eb1378146178cc3ae49243266f6e84ef6dfa581dffcd061f5de2d75d38c196b7
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects relating to stories."""
import copy
import re
from constants import constants
from core.domain import change_domain
from core.domain import html_cleaner
import feconf
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
STORY_PROPERTY_TITLE = 'title'
STORY_PROPERTY_DESCRIPTION = 'description'
STORY_PROPERTY_NOTES = 'notes'
STORY_PROPERTY_LANGUAGE_CODE = 'language_code'
STORY_NODE_PROPERTY_DESTINATION_NODE_IDS = 'destination_node_ids'
STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS = 'acquired_skill_ids'
STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS = 'prerequisite_skill_ids'
STORY_NODE_PROPERTY_OUTLINE = 'outline'
STORY_NODE_PROPERTY_TITLE = 'title'
STORY_NODE_PROPERTY_EXPLORATION_ID = 'exploration_id'
INITIAL_NODE_ID = 'initial_node_id'
CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_STORY_PROPERTY = 'update_story_property'
CMD_UPDATE_STORY_NODE_PROPERTY = 'update_story_node_property'
CMD_UPDATE_STORY_CONTENTS_PROPERTY = 'update_story_contents_property'
# These take node_id as parameter.
CMD_ADD_STORY_NODE = 'add_story_node'
CMD_DELETE_STORY_NODE = 'delete_story_node'
CMD_UPDATE_STORY_NODE_OUTLINE_STATUS = 'update_story_node_outline_status'
# This takes additional 'title' parameters.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_PUBLISH_STORY = 'publish_story'
CMD_UNPUBLISH_STORY = 'unpublish_story'
ROLE_MANAGER = 'manager'
ROLE_NONE = 'none'
# The prefix for all node ids of a story.
NODE_ID_PREFIX = 'node_'
class StoryChange(change_domain.BaseChange):
"""Domain object for changes made to story object.
The allowed commands, together with the attributes:
- 'add_story_node' (with node_id, title)
- 'delete_story_node' (with node_id)
- 'update_story_node_outline_status' (with node_id, old_value
and new_value)
- 'update_story_property' (with property_name, new_value
and old_value)
- 'update_story_node_property' (with property_name, new_value
and old_value)
- 'update_story_contents_property' (with property_name,
new_value and old_value)
- 'migrate_schema_to_latest_version' (with from_version and
to_version)
- 'create_new' (with title)
"""
# The allowed list of story properties which can be used in
# update_story_property command.
STORY_PROPERTIES = (
STORY_PROPERTY_TITLE, STORY_PROPERTY_DESCRIPTION,
STORY_PROPERTY_NOTES, STORY_PROPERTY_LANGUAGE_CODE)
# The allowed list of story node properties which can be used in
# update_story_node_property command.
STORY_NODE_PROPERTIES = (
STORY_NODE_PROPERTY_DESTINATION_NODE_IDS,
STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS,
STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS, STORY_NODE_PROPERTY_OUTLINE,
STORY_NODE_PROPERTY_EXPLORATION_ID, STORY_NODE_PROPERTY_TITLE)
# The allowed list of story contente properties which can be used in
# update_story_contents_property command.
STORY_CONTENTS_PROPERTIES = (INITIAL_NODE_ID,)
ALLOWED_COMMANDS = [{
'name': CMD_UPDATE_STORY_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_PROPERTIES}
}, {
'name': CMD_UPDATE_STORY_NODE_PROPERTY,
'required_attribute_names': [
'node_id', 'property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_NODE_PROPERTIES}
}, {
'name': CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_CONTENTS_PROPERTIES}
}, {
'name': CMD_ADD_STORY_NODE,
'required_attribute_names': ['node_id', 'title'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_STORY_NODE,
'required_attribute_names': ['node_id'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'required_attribute_names': ['node_id', 'old_value', 'new_value'],
'optional_attribute_names': []
}, {
'name': CMD_CREATE_NEW,
'required_attribute_names': ['title'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}]
class StoryNode(object):
"""Domain object describing a node in the exploration graph of a
story.
"""
def __init__(
self, node_id, title, destination_node_ids,
acquired_skill_ids, prerequisite_skill_ids,
outline, outline_is_finalized, exploration_id):
"""Initializes a StoryNode domain object.
Args:
node_id: str. The unique id for each node.
title: str. The title of the story node.
destination_node_ids: list(str). The list of destination node ids
that this node points to in the story graph.
acquired_skill_ids: list(str). The list of skill ids acquired by
the user on completion of the node.
prerequisite_skill_ids: list(str). The list of skill ids required
before starting a node.
outline: str. Free-form annotations that a lesson implementer
can use to construct the exploration. It describes the basic
theme or template of the story and is to be provided in html
form.
outline_is_finalized: bool. Whether the outline for the story
node is finalized or not.
exploration_id: str or None. The valid exploration id that fits the
story node. It can be None initially, when the story creator
has just created a story with the basic storyline (by providing
outlines) without linking an exploration to any node.
"""
self.id = node_id
self.title = title
self.destination_node_ids = destination_node_ids
self.acquired_skill_ids = acquired_skill_ids
self.prerequisite_skill_ids = prerequisite_skill_ids
self.outline = html_cleaner.clean(outline)
self.outline_is_finalized = outline_is_finalized
self.exploration_id = exploration_id
@classmethod
def get_number_from_node_id(cls, node_id):
"""Decodes the node_id to get the number at the end of the id.
Args:
node_id: str. The id of the node.
Returns:
int. The number at the end of the id.
"""
return int(node_id.replace(NODE_ID_PREFIX, ''))
@classmethod
def get_incremented_node_id(cls, node_id):
"""Increments the next node id of the story.
Returns:
str. The new next node id.
"""
current_number = StoryNode.get_number_from_node_id(node_id)
incremented_node_id = NODE_ID_PREFIX + str(current_number + 1)
return incremented_node_id
@classmethod
def require_valid_node_id(cls, node_id):
"""Validates the node id for a StoryNode object.
Args:
node_id: str. The node id to be validated.
"""
if not isinstance(node_id, basestring):
raise utils.ValidationError(
'Expected node ID to be a string, received %s' %
node_id)
pattern = re.compile('%s[0-9]+' % NODE_ID_PREFIX)
if not pattern.match(node_id):
raise utils.ValidationError(
'Invalid node_id: %s' % node_id)
def to_dict(self):
"""Returns a dict representing this StoryNode domain object.
Returns:
A dict, mapping all fields of StoryNode instance.
"""
return {
'id': self.id,
'title': self.title,
'destination_node_ids': self.destination_node_ids,
'acquired_skill_ids': self.acquired_skill_ids,
'prerequisite_skill_ids': self.prerequisite_skill_ids,
'outline': self.outline,
'outline_is_finalized': self.outline_is_finalized,
'exploration_id': self.exploration_id
}
@classmethod
def from_dict(cls, node_dict):
"""Return a StoryNode domain object from a dict.
Args:
node_dict: dict. The dict representation of StoryNode object.
Returns:
StoryNode. The corresponding StoryNode domain object.
"""
node = cls(
node_dict['id'], node_dict['title'],
node_dict['destination_node_ids'],
node_dict['acquired_skill_ids'],
node_dict['prerequisite_skill_ids'], node_dict['outline'],
node_dict['outline_is_finalized'], node_dict['exploration_id'])
return node
@classmethod
def create_default_story_node(cls, node_id, title):
"""Returns a StoryNode domain object with default values.
Args:
node_id: str. The id of the node.
title: str. The title of the node.
Returns:
StoryNode. The StoryNode domain object with default
value.
"""
return cls(node_id, title, [], [], [], '', False, None)
def validate(self):
"""Validates various properties of the story node.
Raises:
ValidationError: One or more attributes of the story node are
invalid.
"""
if self.exploration_id:
if not isinstance(self.exploration_id, basestring):
raise utils.ValidationError(
'Expected exploration ID to be a string, received %s' %
self.exploration_id)
if not isinstance(self.outline, basestring):
raise utils.ValidationError(
'Expected outline to be a string, received %s' %
self.outline)
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' %
self.title)
if not isinstance(self.outline_is_finalized, bool):
raise utils.ValidationError(
'Expected outline_is_finalized to be a boolean, received %s' %
self.outline_is_finalized)
self.require_valid_node_id(self.id)
if not isinstance(self.prerequisite_skill_ids, list):
raise utils.ValidationError(
'Expected prerequisite skill ids to be a list, received %s' %
self.prerequisite_skill_ids)
for skill_id in self.prerequisite_skill_ids:
if not isinstance(skill_id, basestring):
raise utils.ValidationError(
'Expected each prerequisite skill id to be a string, '
'received %s' % skill_id)
if (
len(self.prerequisite_skill_ids) >
len(set(self.prerequisite_skill_ids))):
raise utils.ValidationError(
'Expected all prerequisite skills to be distinct.')
if not isinstance(self.acquired_skill_ids, list):
raise utils.ValidationError(
'Expected acquired skill ids to be a list, received %s' %
self.acquired_skill_ids)
for skill_id in self.acquired_skill_ids:
if not isinstance(skill_id, basestring):
raise utils.ValidationError(
'Expected each acquired skill id to be a string, '
'received %s' % skill_id)
if (
len(self.acquired_skill_ids) >
len(set(self.acquired_skill_ids))):
raise utils.ValidationError(
'Expected all acquired skills to be distinct.')
for skill_id in self.prerequisite_skill_ids:
if skill_id in self.acquired_skill_ids:
raise utils.ValidationError(
'Expected prerequisite skill ids and acquired skill ids '
'to be mutually exclusive. The skill_id %s intersects '
% skill_id)
if not isinstance(self.destination_node_ids, list):
raise utils.ValidationError(
'Expected destination node ids to be a list, received %s' %
self.destination_node_ids)
for node_id in self.destination_node_ids:
self.require_valid_node_id(node_id)
if node_id == self.id:
raise utils.ValidationError(
'The story node with ID %s points to itself.' % node_id)
class StoryContents(object):
"""Domain object representing the story_contents dict."""
def __init__(self, story_nodes, initial_node_id, next_node_id):
"""Constructs a StoryContents domain object.
Args:
story_nodes: list(StoryNode). The list of story nodes that are part
of this story.
initial_node_id: str. The id of the starting node of the story.
next_node_id: str. The id for the next node to be added to the
story.
"""
self.initial_node_id = initial_node_id
self.nodes = story_nodes
self.next_node_id = next_node_id
def validate(self):
"""Validates various properties of the story contents object.
Raises:
ValidationError: One or more attributes of the story contents are
invalid.
"""
if not isinstance(self.nodes, list):
raise utils.ValidationError(
'Expected nodes field to be a list, received %s' % self.nodes)
if len(self.nodes) > 0:
StoryNode.require_valid_node_id(self.initial_node_id)
StoryNode.require_valid_node_id(self.next_node_id)
initial_node_is_present = False
node_id_list = []
for node in self.nodes:
if not isinstance(node, StoryNode):
raise utils.ValidationError(
'Expected each node to be a StoryNode object, received %s' %
node)
node.validate()
for destination_node_id in node.destination_node_ids:
if next(
(node for node in self.nodes
if node.id == destination_node_id), None) is None:
raise utils.ValidationError(
'Expected all destination nodes to exist')
if node.id == self.initial_node_id:
initial_node_is_present = True
# Checks whether the number in the id of any node is greater than
# the value of next_node_id.
if (StoryNode.get_number_from_node_id(node.id) >=
StoryNode.get_number_from_node_id(self.next_node_id)):
raise utils.ValidationError(
'The node with id %s is out of bounds.' % node.id)
node_id_list.append(node.id)
if len(self.nodes) > 0:
if not initial_node_is_present:
raise utils.ValidationError('Expected starting node to exist.')
if len(node_id_list) > len(set(node_id_list)):
raise utils.ValidationError(
'Expected all node ids to be distinct.')
# nodes_queue stores the pending nodes to visit in the story that
# are unlocked, in a 'queue' form with a First In First Out
# structure.
nodes_queue = []
is_node_visited = [False] * len(self.nodes)
starting_node_index = self.get_node_index(self.initial_node_id)
nodes_queue.append(self.nodes[starting_node_index].id)
# The user is assumed to have all the prerequisite skills of the
# starting node before starting the story. Also, this list models
# the skill IDs acquired by a learner as they progress through the
# story.
simulated_skill_ids = copy.deepcopy(
self.nodes[starting_node_index].prerequisite_skill_ids)
# The following loop employs a Breadth First Search from the given
# starting node and makes sure that the user has acquired all the
# prerequisite skills required by the destination nodes 'unlocked'
# by visiting a particular node by the time that node is finished.
while len(nodes_queue) > 0:
current_node_id = nodes_queue.pop()
current_node_index = self.get_node_index(current_node_id)
is_node_visited[current_node_index] = True
current_node = self.nodes[current_node_index]
for skill_id in current_node.acquired_skill_ids:
simulated_skill_ids.append(skill_id)
for node_id in current_node.destination_node_ids:
node_index = self.get_node_index(node_id)
# The following condition checks whether the destination
# node for a particular node, has already been visited, in
# which case the story would have loops, which are not
# allowed.
if is_node_visited[node_index]:
raise utils.ValidationError(
'Loops are not allowed in stories.')
destination_node = self.nodes[node_index]
if not (
set(
destination_node.prerequisite_skill_ids
).issubset(simulated_skill_ids)):
raise utils.ValidationError(
'The prerequisite skills ' +
' '.join(
set(destination_node.prerequisite_skill_ids) -
set(simulated_skill_ids)) +
' were not completed before the node with id %s'
' was unlocked.' % node_id)
nodes_queue.append(node_id)
for index, node_visited in enumerate(is_node_visited):
if not node_visited:
raise utils.ValidationError(
'The node with id %s is disconnected from the '
'story graph.' % self.nodes[index].id)
def get_node_index(self, node_id):
"""Returns the index of the story node with the given node
id, or None if the node id is not in the story contents dict.
Args:
node_id: str. The id of the node.
Returns:
int or None. The index of the corresponding node, or None if there
is no such node.
"""
for ind, node in enumerate(self.nodes):
if node.id == node_id:
return ind
return None
def get_ordered_nodes(self):
"""Returns a list of nodes ordered by how they would appear sequentially
to a learner.
NOTE: Currently, this function assumes only a linear arrangement of
nodes.
Returns:
list(StoryNode). The ordered list of nodes.
"""
initial_index = self.get_node_index(self.initial_node_id)
current_node = self.nodes[initial_index]
ordered_nodes_list = [current_node]
while current_node.destination_node_ids:
next_node_id = current_node.destination_node_ids[0]
current_node = self.nodes[self.get_node_index(next_node_id)]
ordered_nodes_list.append(current_node)
return ordered_nodes_list
def to_dict(self):
"""Returns a dict representing this StoryContents domain object.
Returns:
A dict, mapping all fields of StoryContents instance.
"""
return {
'nodes': [
node.to_dict() for node in self.nodes
],
'initial_node_id': self.initial_node_id,
'next_node_id': self.next_node_id
}
@classmethod
def from_dict(cls, story_contents_dict):
"""Return a StoryContents domain object from a dict.
Args:
story_contents_dict: dict. The dict representation of
StoryContents object.
Returns:
StoryContents. The corresponding StoryContents domain object.
"""
story_contents = cls(
[
StoryNode.from_dict(story_node_dict)
for story_node_dict in story_contents_dict['nodes']
], story_contents_dict['initial_node_id'],
story_contents_dict['next_node_id']
)
return story_contents
class Story(object):
"""Domain object for an Oppia Story."""
def __init__(
self, story_id, title, description, notes,
story_contents, story_contents_schema_version, language_code,
corresponding_topic_id, version, created_on=None,
last_updated=None):
"""Constructs a Story domain object.
Args:
story_id: str. The unique ID of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting. To be provided in html form.
story_contents: StoryContents. The StoryContents instance
representing the contents (like nodes) that are part of the
story.
story_contents_schema_version: int. The schema version for the
story contents object.
language_code: str. The ISO 639-1 code for the language this
story is written in.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
version: int. The version of the story.
created_on: datetime.datetime. Date and time when the story is
created.
last_updated: datetime.datetime. Date and time when the
story was last updated.
"""
self.id = story_id
self.title = title
self.description = description
self.notes = html_cleaner.clean(notes)
self.story_contents = story_contents
self.story_contents_schema_version = story_contents_schema_version
self.language_code = language_code
self.corresponding_topic_id = corresponding_topic_id
self.created_on = created_on
self.last_updated = last_updated
self.version = version
def validate(self):
"""Validates various properties of the story object.
Raises:
ValidationError: One or more attributes of story are invalid.
"""
self.require_valid_title(self.title)
if not isinstance(self.description, basestring):
raise utils.ValidationError(
'Expected description to be a string, received %s'
% self.description)
if not isinstance(self.notes, basestring):
raise utils.ValidationError(
'Expected notes to be a string, received %s' % self.notes)
if not isinstance(self.story_contents_schema_version, int):
raise utils.ValidationError(
'Expected story contents schema version to be an integer, '
'received %s' % self.story_contents_schema_version)
if (self.story_contents_schema_version !=
feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected story contents schema version to be %s, '
'received %s' % (
feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION,
self.story_contents_schema_version))
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.corresponding_topic_id, basestring):
raise utils.ValidationError(
'Expected corresponding_topic_id should be a string, received: '
'%s' % self.corresponding_topic_id)
self.story_contents.validate()
@classmethod
def require_valid_story_id(cls, story_id):
"""Checks whether the story id is a valid one.
Args:
story_id: str. The story id to validate.
"""
if not isinstance(story_id, basestring):
raise utils.ValidationError(
'Story id should be a string, received: %s' % story_id)
if len(story_id) != 12:
raise utils.ValidationError('Invalid story id.')
@classmethod
def require_valid_title(cls, title):
"""Checks whether the story title is a valid one.
Args:
title: str. The title to validate.
"""
if not isinstance(title, basestring):
raise utils.ValidationError('Title should be a string.')
if title == '':
raise utils.ValidationError('Title field should not be empty')
def get_acquired_skill_ids_for_node_ids(self, node_ids):
"""Returns the acquired skill ids of the nodes having the given
node ids.
Args:
node_ids: list(str). The list of IDs of the nodes inside
the story.
Returns:
list(str). The union of the acquired skill IDs corresponding to
each of the node IDs.
"""
acquired_skill_ids = []
for node in self.story_contents.nodes:
if node.id in node_ids:
for skill_id in node.acquired_skill_ids:
if skill_id not in acquired_skill_ids:
acquired_skill_ids.append(skill_id)
return acquired_skill_ids
def get_prerequisite_skill_ids_for_exp_id(self, exp_id):
"""Returns the prerequisite skill ids of the node having the given
exploration id.
Args:
exp_id: str. The ID of the exploration linked to the story,
Returns:
list(str)|None. The list of prerequisite skill ids for the
exploration or None, if no node is linked to it.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exp_id:
return node.prerequisite_skill_ids
return None
def has_exploration(self, exp_id):
"""Checks whether an exploration is present in the story.
Args:
exp_id: str. The ID of the exploration linked to the story,
Returns:
bool. Whether the exploration is linked to the story.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exp_id:
return True
return False
def to_dict(self):
"""Returns a dict representing this Story domain object.
Returns:
A dict, mapping all fields of Story instance.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description,
'notes': self.notes,
'language_code': self.language_code,
'story_contents_schema_version': self.story_contents_schema_version,
'corresponding_topic_id': self.corresponding_topic_id,
'version': self.version,
'story_contents': self.story_contents.to_dict()
}
@classmethod
def create_default_story(cls, story_id, title, corresponding_topic_id):
"""Returns a story domain object with default values. This is for
the frontend where a default blank story would be shown to the user
when the story is created for the first time.
Args:
story_id: str. The unique id of the story.
title: str. The title for the newly created story.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
Returns:
Story. The Story domain object with the default values.
"""
# Initial node id for a new story.
initial_node_id = '%s1' % NODE_ID_PREFIX
story_contents = StoryContents([], None, initial_node_id)
return cls(
story_id, title,
feconf.DEFAULT_STORY_DESCRIPTION, feconf.DEFAULT_STORY_NOTES,
story_contents, feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, corresponding_topic_id, 0)
@classmethod
def update_story_contents_from_model(
cls, versioned_story_contents, current_version):
"""Converts the story_contents blob contained in the given
versioned_story_contents dict from current_version to
current_version + 1. Note that the versioned_story_contents being
passed in is modified in-place.
Args:
versioned_story_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
story_contents dict.
- story_contents: dict. The dict comprising the story
contents.
current_version: int. The current schema version of story_contents.
"""
versioned_story_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_story_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_story_contents['story_contents'] = conversion_fn(
versioned_story_contents['story_contents'])
def update_title(self, title):
"""Updates the title of the story.
Args:
title: str. The new title of the story.
"""
self.title = title
def update_description(self, description):
"""Updates the description of the story.
Args:
description: str. The new description of the story.
"""
self.description = description
def update_notes(self, notes):
"""Updates the notes of the story.
Args:
notes: str. The new notes of the story.
"""
self.notes = notes
def update_language_code(self, language_code):
"""Updates the language code of the story.
Args:
language_code: str. The new language code of the story.
"""
self.language_code = language_code
def add_node(self, desired_node_id, node_title):
"""Adds a new default node with the id as story_contents.next_node_id.
Args:
desired_node_id: str. The node id to be given to the new node in the
story.
node_title: str. The title for the new story node.
Raises:
Exception: The desired_node_id differs from
story_contents.next_node_id.
"""
if self.story_contents.next_node_id != desired_node_id:
raise Exception(
'The node id %s does not match the expected '
'next node id for the story.' % desired_node_id)
self.story_contents.nodes.append(
StoryNode.create_default_story_node(desired_node_id, node_title))
self.story_contents.next_node_id = (
StoryNode.get_incremented_node_id(self.story_contents.next_node_id))
if self.story_contents.initial_node_id is None:
self.story_contents.initial_node_id = desired_node_id
def _check_exploration_id_already_present(self, exploration_id):
"""Returns whether a node with the given exploration id is already
present in story_contents.
Args:
exploration_id: str. The id of the exploration.
Returns:
bool. Whether a node with the given exploration ID is already
present.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exploration_id:
return True
return False
def delete_node(self, node_id):
"""Deletes a node with the given node_id.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
if node_id == self.story_contents.initial_node_id:
if len(self.story_contents.nodes) == 1:
self.story_contents.initial_node_id = None
else:
raise ValueError(
'The node with id %s is the starting node for the story, '
'change the starting node before deleting it.' % node_id)
for node in self.story_contents.nodes:
if node_id in node.destination_node_ids:
node.destination_node_ids.remove(node_id)
del self.story_contents.nodes[node_index]
def update_node_outline(self, node_id, new_outline):
"""Updates the outline field of a given node.
Args:
node_id: str. The id of the node.
new_outline: str. The new outline of the given node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline = new_outline
def update_node_title(self, node_id, new_title):
"""Updates the title field of a given node.
Args:
node_id: str. The id of the node.
new_title: str. The new title of the given node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].title = new_title
def mark_node_outline_as_finalized(self, node_id):
"""Updates the outline_is_finalized field of the node with the given
node_id as True.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline_is_finalized = True
def mark_node_outline_as_unfinalized(self, node_id):
"""Updates the outline_is_finalized field of the node with the given
node_id as False.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline_is_finalized = False
def update_node_acquired_skill_ids(self, node_id, new_acquired_skill_ids):
"""Updates the acquired skill ids field of a given node.
Args:
node_id: str. The id of the node.
new_acquired_skill_ids: list(str). The updated acquired skill id
list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].acquired_skill_ids = (
new_acquired_skill_ids)
def update_node_prerequisite_skill_ids(
self, node_id, new_prerequisite_skill_ids):
"""Updates the prerequisite skill ids field of a given node.
Args:
node_id: str. The id of the node.
new_prerequisite_skill_ids: list(str). The updated prerequisite
skill id list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].prerequisite_skill_ids = (
new_prerequisite_skill_ids)
def update_node_destination_node_ids(
self, node_id, new_destination_node_ids):
"""Updates the destination_node_ids field of a given node.
Args:
node_id: str. The id of the node.
new_destination_node_ids: list(str). The updated destination
node id list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].destination_node_ids = (
new_destination_node_ids)
def update_node_exploration_id(
self, node_id, new_exploration_id):
"""Updates the exploration id field of a given node.
Args:
node_id: str. The id of the node.
new_exploration_id: str. The updated exploration id for a node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story.' % node_id)
if self._check_exploration_id_already_present(new_exploration_id):
raise ValueError(
'A node with exploration id %s already exists.' %
new_exploration_id)
self.story_contents.nodes[node_index].exploration_id = (
new_exploration_id)
def update_initial_node(self, new_initial_node_id):
"""Updates the starting node of the story.
Args:
new_initial_node_id: str. The new starting node id.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(new_initial_node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story.'
% new_initial_node_id)
self.story_contents.initial_node_id = new_initial_node_id
class StorySummary(object):
"""Domain object for Story Summary."""
def __init__(
self, story_id, title, description, language_code, version,
node_count, story_model_created_on,
story_model_last_updated):
"""Constructs a StorySummary domain object.
Args:
story_id: str. The unique id of the story.
title: str. The title of the story.
description: str. The description of the story.
language_code: str. The language code of the story.
version: int. The version of the story.
node_count: int. The number of nodes present in the story.
story_model_created_on: datetime.datetime. Date and time when
the story model is created.
story_model_last_updated: datetime.datetime. Date and time
when the story model was last updated.
"""
self.id = story_id
self.title = title
self.description = description
self.language_code = language_code
self.version = version
self.node_count = node_count
self.story_model_created_on = story_model_created_on
self.story_model_last_updated = story_model_last_updated
def validate(self):
"""Validates various properties of the story summary object.
Raises:
ValidationError: One or more attributes of story summary are
invalid.
"""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
if self.title == '':
raise utils.ValidationError('Title field should not be empty')
if not isinstance(self.description, basestring):
raise utils.ValidationError(
'Expected description to be a string, received %s'
% self.description)
if not isinstance(self.node_count, int):
raise utils.ValidationError(
'Expected node_count to be an int, received \'%s\'' % (
self.node_count))
if self.node_count < 0:
raise utils.ValidationError(
'Expected node_count to be non-negative, received \'%s\'' % (
self.node_count))
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this StorySummary object.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description,
'language_code': self.language_code,
'version': self.version,
'node_count': self.node_count,
'story_model_created_on': utils.get_time_in_millisecs(
self.story_model_created_on),
'story_model_last_updated': utils.get_time_in_millisecs(
self.story_model_last_updated)
}
def to_human_readable_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this StorySummary object.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description
}
class StoryRights(object):
"""Domain object for story rights."""
def __init__(self, story_id, manager_ids, story_is_published):
"""Constructs a StoryRights domain object.
Args:
story_id: str. The id of the story.
manager_ids: list(str). The id of the users who have been assigned
as managers for the story.
story_is_published: bool. Whether the story is viewable by a
learner.
"""
self.id = story_id
self.manager_ids = manager_ids
self.story_is_published = story_is_published
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of StoryRights suitable for use by the
frontend.
"""
return {
'story_id': self.id,
'manager_names': self.manager_ids,
'story_is_published': self.story_is_published
}
def is_manager(self, user_id):
"""Checks whether given user is a manager of the story.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is a manager of this story.
"""
return bool(user_id in self.manager_ids)
class StoryRightsChange(change_domain.BaseChange):
"""Domain object for changes made to a story rights object.
The allowed commands, together with the attributes:
- 'change_role' (with assignee_id, new_role and old_role)
- 'create_new'
- 'publish_story'
- 'unpublish_story'.
"""
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_NONE, ROLE_MANAGER]
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'new_role', 'old_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_PUBLISH_STORY,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_UNPUBLISH_STORY,
'required_attribute_names': [],
'optional_attribute_names': []
}]
|
souravbadami/oppia
|
core/domain/story_domain.py
|
Python
|
apache-2.0
| 46,403
|
[
"VisIt"
] |
6a5970ece334b5ae931817cdba33036b31d225f35528cf1ee4fd76c00d8badd8
|
"""
Window functions (:mod:`scipy.signal.windows`)
==============================================
The suite of window functions for filtering and spectral estimation.
.. currentmodule:: scipy.signal.windows
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
dpss -- Discrete prolate spheroidal sequences
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_cosine -- Generalized Cosine window
general_gaussian -- Generalized Gaussian window
general_hamming -- Generalized Hamming window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
triang -- Triangular window
tukey -- Tukey window
"""
from .windows import *
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
'general_hamming', 'chebwin', 'cosine', 'hann',
'exponential', 'tukey', 'get_window', 'dpss']
|
nmayorov/scipy
|
scipy/signal/windows/__init__.py
|
Python
|
bsd-3-clause
| 1,722
|
[
"Gaussian"
] |
3864dee43eb468a50f7a231c4fa4bd22a5f45933ca9c00413c647244dc688721
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.vaspio.vasp_output has been moved "
"pymatgen.io.vasp.outputs "
"This stub will be removed in pymatgen 4.0.")
from pymatgen.io.vasp.outputs import *
|
migueldiascosta/pymatgen
|
pymatgen/io/vaspio/vasp_output.py
|
Python
|
mit
| 632
|
[
"VASP",
"pymatgen"
] |
bd562c8b853a64875969936f3ecf375282068d784d554a4e95967dc2f59c2d47
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a MolecularOrbital class to represent band character in
solids. Usefull for predicting PDOS character from structural information.
"""
from itertools import chain, combinations
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
class MolecularOrbitals:
"""
Represents the character of bands in a solid. The input is a chemical
formula, since no structural characteristics are taken into account.
The band character of a crystal emerges from the atomic orbitals of the
constituant ions, hybridization/covalent bonds, and the spin-orbit
interaction (ex: Fe2O3). Right now the orbitals are only built from
the uncharged atomic species. Functionality can be improved by:
1) calculate charged ion orbital energies
2) incorportate the coordination enviornment to account for covalant bonds
The atomic orbital energies are stored in pymatgen.core.periodic_table.JSON
>>> MOs = MolecularOrbitals('SrTiO3')
>>> MOs.band_edges
{'HOMO':['O','2p',-0.338381], 'LUMO':['Ti','3d',-0.17001], 'metal':False}
"""
def __init__(self, formula):
"""
Args:
chemical formula as a string. formula must have integer subscripts
Ex: 'SrTiO3'
Attributes:
composition: the composition as a dictionary.
Ex: {'Sr': 1, 'Ti': 1, 'O', 3}
elements: the dictionary keys for the composition
elec_neg: the maximum pairwise electronegetivity difference
aos: the consituant atomic orbitals for each element as a
dictionary
band_edges: dictionary containing the highest occupied molecular
orbital (HOMO), lowest unocupied molecular orbital
(LUMO), and whether the material is predicted to be a
metal
"""
self.composition = Composition(formula).as_dict()
self.elements = self.composition.keys()
for subscript in self.composition.values():
if not float(subscript).is_integer():
raise ValueError("composition subscripts must be integers")
self.elec_neg = self.max_electronegativity()
self.aos = {str(el): [[str(el), k, v] for k, v in Element(el).atomic_orbitals.items()] for el in self.elements}
self.band_edges = self.obtain_band_edges()
def max_electronegativity(self):
"""
Returns:
The maximum pairwise electronegativity difference.
"""
maximum = 0
for e1, e2 in combinations(self.elements, 2):
if abs(Element(e1).X - Element(e2).X) > maximum:
maximum = abs(Element(e1).X - Element(e2).X)
return maximum
def aos_as_list(self):
"""
Returns:
A list of atomic orbitals, sorted from lowest to highest energy.
The orbitals energies in eV are represented as
[['O', '1s', -18.758245], ['O', '2s', -0.871362], ['O', '2p', -0.338381]]
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
"""
return sorted(
chain.from_iterable([self.aos[el] * int(self.composition[el]) for el in self.elements]),
key=lambda x: x[2],
)
def obtain_band_edges(self):
"""
Fill up the atomic orbitals with available electrons.
Returns:
HOMO, LUMO, and whether it's a metal.
"""
orbitals = self.aos_as_list()
electrons = Composition(self.composition).total_electrons
partial_filled = []
for orbital in orbitals:
if electrons <= 0:
break
if "s" in orbital[1]:
electrons += -2
elif "p" in orbital[1]:
electrons += -6
elif "d" in orbital[1]:
electrons += -10
elif "f" in orbital[1]:
electrons += -14
partial_filled.append(orbital)
if electrons != 0:
homo = partial_filled[-1]
lumo = partial_filled[-1]
else:
homo = partial_filled[-1]
try:
lumo = orbitals[len(partial_filled)]
except Exception:
lumo = None
return {"HOMO": homo, "LUMO": lumo, "metal": homo == lumo}
|
davidwaroquiers/pymatgen
|
pymatgen/core/molecular_orbitals.py
|
Python
|
mit
| 4,608
|
[
"CRYSTAL",
"pymatgen"
] |
82e959c616040a69ba5bf87b031399787dceb7a0dda91eb3edf40f973668840f
|
""" :mod: DErrno
==========================
.. module: DErrno
:synopsis: Error list and utilities for handling errors in DIRAC
This module contains list of errors that can be encountered in DIRAC.
It complements the errno module of python.
It also contains utilities to manipulate these errors.
Finally, it contains a DErrno class that contains an error number
as well as a low level error message. It behaves like a string for
compatibility reasons
In order to add extension specific error, you need to create in your extension the file
Core/Utilities/DErrno.py, which will contain the following dictionary:
* extra_dErrName: keys are the error name, values the number of it
* extra_dErrorCode: same as dErrorCode. keys are the error code, values the name
(we don't simply revert the previous dict in case we do not
have a one to one mapping)
* extra_dStrError: same as dStrError, Keys are the error code, values the error description
* extra_compatErrorString: same as compatErrorString. The compatible error strings are
added to the existing one, and not replacing them.
Example of extension file :
extra_dErrName = { 'ELHCBSPE' : 3001 }
extra_dErrorCode = { 3001 : 'ELHCBSPE'}
extra_dStrError = { 3001 : "This is a description text of the specific LHCb error" }
extra_compatErrorString = { 3001 : ["living easy, living free"],
DErrno.ERRX : ['An error message for ERRX that is specific to LHCb']}
"""
import os
import traceback
import errno
import imp
import sys
# To avoid conflict, the error numbers should be greater than 1000
# We decided to group the by range of 100 per system
# 1000: Generic
# 1100: Core
# 1200: Framework
# 1300: Interfaces
# 1400: Config
# 1500: WMS / Workflow
# 1600: DMS/StorageManagement
# 1700: RMS
# 1800: Accounting
# 1900: TS
# 2000: RSS
# Generic
ERRX = 1001
ERRY = 1002
EIMPERR = 1003
ENOMETH = 1004
ECONF = 1006
# DMS/StorageManagement
EFILESIZE = 1601
EGFAL = 1602
EBADCKS = 1603
# This translates the integer number into the name of the variable
dErrorCode = { 1001 : 'ERRX',
1002 : 'ERRY',
1003 : 'EIMPERR',
1004 : 'ENOMETH',
1006 : 'ECONF',
# DMS/StorageManagement
1601 : 'EFILESIZE',
1602 : 'EGFAL',
1603 : 'EBADCKS',
}
dStrError = { ERRX : "A human readable error message for ERRX",
ERRY : "A nice message for ERRY",
EIMPERR : "Failed to import library",
ENOMETH : "No such method or function",
ECONF : "Configuration error",
# DMS/StorageManagement
EFILESIZE : "Bad file size",
EGFAL : "Error with the gfal call",
EBADCKS : "Bad checksum",
}
# In case the error is returned as a string, and not as a DErrno object,
# these strings are used to test the error.
compatErrorString = {
# ERRX : ['not found', 'X'],
errno.ENOENT : ['File does not exist']
}
def strerror(code):
""" This method wraps up os.strerror, and behave the same way.
It completes it with the DIRAC specific errors.
"""
errMsg = "Unknown error %s"%code
try:
errMsg = dStrError[code]
except KeyError:
# It is not a DIRAC specific error, try the os one
try:
errMsg = os.strerror( code )
# On some system, os.strerror raises an exception with unknown code,
# on others, it returns a message...
except ValueError:
pass
return errMsg
class DError( object ):
""" This class is used to propagate errors through DIRAC.
It contains a error code that should be one defined here or in errno python module.
It also contains an error message which is not a human readable description, but the real
low level technical message.
Its interface is to be compatible with the one of a string in order to keep compatibility
with the old error handling system
CAUTION. The callstack attribute is used to print the sequence of events
that lead to the error. It is set automatically in the __init__.
It should be overwritten only for serializing
"""
def __init__( self, errno, errmsg = "" ):
""" Initialize
:param errno : error code
:param errmsg : technical message
"""
self.errno = errno
self.errmsg = errmsg
try:
self._callStack = traceback.format_stack()
self._callStack.pop()
except:
self._callStack = []
def __repr__( self ):
""" String representation """
reprStr = "%s ( %s : %s)" % ( strerror( self.errno ), self.errno, self.errmsg )
isVerbose = False
stack = traceback.extract_stack()
for filename, _linenb, function_name, _text in stack:
if 'FrameworkSystem/private/logging/Logger.py' in filename:
if function_name == 'debug':
isVerbose = True
break
if isVerbose:
reprStr += "\n" + "".join( self._callStack )
return reprStr
def __contains__( self, errorStr ):
""" For compatibility reasons.
Checks whether 'errorStr' is in the human readable form of the error msg or compat err msg
errorStr has to be an str
"""
# Check if the errorStr is in the standard message
ret = ( errorStr in strerror( self.errno ) )
if ret:
return ret
# If not, check whether the errorStr is in one of the compatibility error message
ret = reduce( lambda x, y : x or ( errorStr in y ), compatErrorString.get( self.errno, [] ), False )
return ret
def __cmp__( self, errorStr ):
""" For compatibility reasons.
Checks whether 'errorStr', which should be a string, is equal to the human readable form of the error msg
"""
# !!! Caution, if there is equality, we have to return 0 (rules of __cmp__)
try:
if errorStr == strerror( self.errno ):
return 0
except:
pass
if errorStr in compatErrorString.get( self.errno, [] ):
return 0
return 1
def __getitem__( self, key ):
""" Emulate the behavior of S_ERROR
"""
if key == 'OK':
return False
elif key == 'Message':
return "%s" % self
raise KeyError( "{0} does not exist".format( key ) )
def get(self, key, defaultValue = None):
""" method like the "get" of a dictionary.
Returns the value matching the key if exists,
otherwise the default value
:param key: item to lookup for
:param defaultValue": if the key does not exist, return this value
:return: the value matching the key or the default value
"""
try:
return self.__getitem__( key )
except KeyError:
return defaultValue
def cmpError( inErr, candidate ):
""" This function compares an error (in its old form (a string...) or new (DError instance))
with a candidate error code.
:param inErr : a string, an integer, a DError instance
:param candidate : error code to compare with
:return True or False
If a DError instance is passed, we compare the code with DError.errno
If it is a Integer, we do a direct comparison
If it is a String, we use compatErrorString and strerror to check the error string
"""
if isinstance( inErr, basestring ) : # old style
# Create a DError object to represent the candidate
derr = DError( candidate )
return inErr == derr
elif isinstance( inErr, dict ): # if the S_ERROR structure is given
# Create a DError object to represent the candidate
derr = DError( candidate )
return inErr.get( 'Message' ) == derr
elif isinstance( inErr, int ):
return inErr == candidate
elif isinstance( inErr, DError ):
return inErr.errno == candidate
elif isinstance( inErr, dict ): # S_ERROR object is given
# Create a DError object to represent the candidate
derr = DError( candidate )
return inErr.get( 'Message', '' ) == derr
else:
raise TypeError( "Unknown input error type %s" % type( inErr ) )
def includeExtensionErrors():
""" Merge all the errors of all the extensions into the errors of these modules
Should be called only at the initialization of DIRAC, so by the parseCommandLine,
dirac-agent.py, dirac-service.py, dirac-executor.py
"""
def __recurseImport( modName, parentModule = None, fullName = False ):
""" Internal function to load modules
"""
if isinstance( modName, basestring ):
modName = modName.split( "." )
if not fullName:
fullName = ".".join( modName )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError:
return None
if len( modName ) == 1:
return impModule
return __recurseImport( modName[1:], impModule, fullName = fullName )
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
allExtensions = CSGlobals.getCSExtensions()
for extension in allExtensions:
ext_derrno = None
try:
ext_derrno = __recurseImport( '%sDIRAC.Core.Utilities.DErrno' % extension )
if ext_derrno:
# The next 3 dictionary MUST be present for consistency
# Global name of errors
sys.modules[__name__].__dict__.update( ext_derrno.extra_dErrName )
# Dictionary with the error codes
sys.modules[__name__].dErrorCode.update( ext_derrno.extra_dErrorCode )
# Error description string
sys.modules[__name__].dStrError.update( ext_derrno.extra_dStrError )
# extra_compatErrorString is optional
for err in getattr( ext_derrno, 'extra_compatErrorString', [] ) :
sys.modules[__name__].compatErrorString.setdefault( err, [] ).extend( ext_derrno.extra_compatErrorString[err] )
except:
pass
|
vmendez/DIRAC
|
Core/Utilities/DErrno.py
|
Python
|
gpl-3.0
| 10,189
|
[
"DIRAC"
] |
55a2acfa8e8c263693585275cb114bb1b87a0d185f315a482df7856f609c9e80
|
from periodic.crystal import Crystal
latticevecs = ((4.59373000, 0.00000000, 0.00000000),
(0.00000000, 4.59373000, 0.00000000),
(0.00000000, 0.00000000, 2.95812000))
basis = (('Ti', (0.00000000, 0.00000000, 0.00000000)),
('Ti', (2.29686500, 2.29686500, 1.47906000)),
('O', (1.40246577, 1.40246577, 0.00000000)),
('O', (-1.40246577, -1.40246577, 0.00000000)),
('O', (3.69933077, 0.89439923, 1.47906000)),
('O', (0.89439923, 3.69933077, 1.47906000)))
if __name__ == '__main__':
TiO2 = Crystal(latticevecs, basis)
rcutoff = 2e0
atomlist = TiO2.tile_radially(rcutoff)
filename = 'TiO2_rcut_{0:.2f}.xyz'.format(rcutoff)
with open(filename, 'w') as f:
f.write(str(len(atomlist))+'\n')
f.write('crystal.py-generated xyz file'+'\n')
for (symbol, position) in atomlist:
f.write(symbol+' '+str(position)[1:-1]+'\n')
|
danielsjensen1/crystalpy
|
tests/TiO2.py
|
Python
|
bsd-3-clause
| 969
|
[
"CRYSTAL"
] |
e6b1efa21d22a620a55786d63b21fceaa98b6eafe9e9f78bda5832612ffeef5f
|
instruments = {
"Acoustic Grand Piano": 0,
"Bright Acoustic Piano": 1,
"Electric Grand Piano": 2,
"Honky-tonk Piano": 3,
"Electric Piano 1": 4,
"Electric Piano 2": 5,
"Harpsichord": 6,
"Clavinet": 7,
"Celesta": 8,
"Glockenspiel": 9,
"Music Box": 10,
"Vibraphone": 11,
"Marimba": 12,
"Xylophone": 13,
"Tubular Bells": 14,
"Dulcimer": 15,
"Drawbar Organ": 16,
"Percussive Organ": 17,
"Rock Organ": 18,
"Church Organ": 19,
"Reed Organ": 20,
"Accordion": 21,
"Harmonica": 22,
"Tango Accordion": 23,
"Acoustic Guitar (nylon)": 24,
"Acoustic Guitar (steel)": 25,
"Electric Guitar (jazz)": 26,
"Electric Guitar (clean)": 27,
"Electric Guitar (muted)": 28,
"Overdriven Guitar": 29,
"Distortion Guitar": 30,
"Guitar harmonics": 31,
"Acoustic Bass": 32,
"Electric Bass (finger)": 33,
"Electric Bass (pick)": 34,
"Fretless Bass": 35,
"Slap Bass 1": 36,
"Slap Bass 2": 37,
"Synth Bass 1": 38,
"Synth Bass 2": 39,
"Violin": 40,
"Viola": 41,
"Cello": 42,
"Contrabass": 43,
"Tremolo Strings": 44,
"Pizzicato Strings": 45,
"Orchestral Harp": 46,
"Timpani": 47,
"String Ensemble 1": 48,
"String Ensemble 2": 49,
"Synth Strings 1": 50,
"Synth Strings 2": 51,
"Choir Aahs": 52,
"Voice Oohs": 53,
"Synth Voice": 54,
"Orchestra Hit": 55,
"Trumpet": 56,
"Trombone": 57,
"Tuba": 58,
"Muted Trumpet": 59,
"French Horn": 60,
"Brass Section": 61,
"Synth Brass 1": 62,
"Synth Brass 2": 63,
"Soprano Sax": 64,
"Alto Sax": 65,
"Tenor Sax": 66,
"Baritone Sax": 67,
"Oboe": 68,
"English Horn": 69,
"Bassoon": 70,
"Clarinet": 71,
"Piccolo": 72,
"Flute": 73,
"Recorder": 74,
"Pan Flute": 75,
"Blown Bottle": 76,
"Shakuhachi": 77,
"Whistle": 78,
"Ocarina": 79,
"Lead 1 (square)": 80,
"Lead 2 (sawtooth)": 81,
"Lead 3 (calliope)": 82,
"Lead 4 (chiff)": 83,
"Lead 5 (charang)": 84,
"Lead 6 (voice)": 85,
"Lead 7 (fifths)": 86,
"Lead 8 (bass + lead)": 87,
"Pad 1 (new age)": 88,
"Pad 2 (warm)": 89,
"Pad 3 (polysynth)": 90,
"Pad 4 (choir)": 91,
"Pad 5 (bowed)": 92,
"Pad 6 (metallic)": 93,
"Pad 7 (halo)": 94,
"Pad 8 (sweep)": 95,
"FX 1 (rain)": 96,
"FX 2 (soundtrack)": 97,
"FX 3 (crystal)": 98,
"FX 4 (atmosphere)": 99,
"FX 5 (brightness)": 100,
"FX 6 (goblins)": 101,
"FX 7 (echoes)": 102,
"FX 8 (sci-fi)": 103,
"Sitar": 104,
"Banjo": 105,
"Shamisen": 106,
"Koto": 107,
"Kalimba": 108,
"Bag pipe": 109,
"Fiddle": 110,
"Shanai": 111,
"Tinkle Bell": 112,
"Agogo": 113,
"Steel Drums": 114,
"Woodblock": 115,
"Taiko Drum": 116,
"Melodic Tom": 117,
"Synth Drum": 118,
"Reverse Cymbal": 119,
"Guitar Fret Noise": 120,
"Breath Noise": 121,
"Seashore": 122,
"Bird Tweet": 123,
"Telephone Ring": 124,
"Helicopter": 125,
"Applause": 126,
"Gunshot": 127
}
|
anshulgupta0803/txt2midi
|
txt2midi/instruments.py
|
Python
|
gpl-3.0
| 2,739
|
[
"CRYSTAL"
] |
e2d110de03fde525c146c52a0560ece0574349cf6c26fcb3718901c716809c87
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Finetune a pretrained ViT-L/32-SNGP on CIFAR-10/100 and ImageNet.
This config is used for models pretrained on either JFT-300M or ImageNet-21K.
"""
# pylint: enable=line-too-long
import ml_collections
import sweep_utils # local file import from baselines.jft.experiments
def get_config():
"""Config for finetuning."""
config = ml_collections.ConfigDict()
config.model_init = '' # set in sweep
config.dataset = '' # set in sweep
config.data_dir = None
config.test_split = '' # set in sweep
config.val_split = '' # set in sweep
config.train_split = '' # set in sweep
config.num_classes = None # set in sweep
config.batch_size = 512
config.total_steps = None # set in sweep
config.pp_train = '' # set in sweep
config.pp_eval = '' # set in sweep
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 100
config.log_eval_steps = 1000
config.checkpoint_steps = 5000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# OOD evaluation. They're all set in the sweep.
config.ood_datasets = []
config.ood_num_classes = []
config.ood_split = ''
config.ood_methods = []
config.pp_eval_ood = []
config.eval_on_cifar_10h = False
config.pp_eval_cifar_10h = ''
config.eval_on_imagenet_real = False
config.pp_eval_imagenet_real = ''
# Model section
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [32, 32]
config.model.hidden_size = 1024
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.mlp_dim = 4096
config.model.transformer.num_heads = 16
config.model.transformer.num_layers = 24
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.classifier = 'token'
# This is "no head" fine-tuning, which we use by default.
config.model.representation_size = None
# Gaussian process layer section
config.gp_layer = ml_collections.ConfigDict()
config.gp_layer.ridge_penalty = 1.
# Disable momentum in order to use exact covariance update for finetuning.
config.gp_layer.covmat_momentum = -1. # Disable to allow exact cov update.
config.gp_layer.mean_field_factor = 5.
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.001
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
return config
def get_sweep(hyper):
"""Sweeps over datasets."""
checkpoints = ['/path/to/pretrained_model_ckpt.npz']
checkpoints = [checkpoints[0]]
cifar10_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.cifar10(
hyper, steps=int(10_000 * s), warmup=int(500 * s)))
for s in [0.5, 1.0, 1.5, 2.0]
]),
hyper.sweep('config.lr.base', [0.03, 0.01, 0.003, 0.001]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1., 0.1, 0.2, 0.3, 0.5, 1., 2., 3., 5., 10., 20]),
])
cifar100_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.cifar100(
hyper, steps=int(10_000 * s), warmup=int(500 * s)))
for s in [0.5, 1.0, 1.5, 2.0]
]),
hyper.sweep('config.lr.base', [0.03, 0.01, 0.003, 0.001]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1., 0.1, 0.2, 0.3, 0.5, 1., 2., 3., 5., 10., 20]),
])
imagenet_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.imagenet(
hyper, steps=int(20_000 * s), warmup=int(500 * s)))
for s in [0.5, 1.0, 1.5, 2.0]
]),
hyper.sweep('config.lr.base', [0.1, 0.06, 0.03, 0.01]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1., 0.1, 0.2, 0.3, 0.5, 1., 2., 3., 5., 10., 20]),
])
imagenet_1shot_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.imagenet_fewshot(
hyper, fewshot='1shot', steps=200, warmup=s,
log_eval_steps=20)) for s in [1, 5, 10, 20, 30, 40, 50]
]),
hyper.sweep('config.lr.base', [0.06, 0.05, 0.04, 0.03, 0.02, 0.01]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1, 1e-4, 1e-3, 1e-2, 0.1, 0.5, 1, 2]),
])
imagenet_5shot_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.imagenet_fewshot(
hyper, fewshot='5shot', steps=1000, warmup=s,
log_eval_steps=100)) for s in [1, 5, 10, 20, 30, 40, 50]
]),
hyper.sweep('config.lr.base', [0.06, 0.05, 0.04, 0.03, 0.02, 0.01]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1, 1e-4, 1e-3, 1e-2, 0.1, 0.5, 1, 2]),
])
imagenet_10shot_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.imagenet_fewshot(
hyper, fewshot='10shot', steps=2000, warmup=s,
log_eval_steps=200)) for s in [1, 5, 10, 20, 30, 40, 50]
]),
hyper.sweep('config.lr.base', [0.06, 0.05, 0.04, 0.03, 0.02, 0.01]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1, 1e-4, 1e-3, 1e-2, 0.1, 0.5, 1, 2]),
])
return hyper.product([
hyper.chainit([
cifar10_sweep,
cifar100_sweep,
imagenet_sweep,
imagenet_1shot_sweep,
imagenet_5shot_sweep,
imagenet_10shot_sweep,
]),
hyper.sweep('config.model_init', checkpoints),
])
|
google/uncertainty-baselines
|
baselines/jft/experiments/vit_l32_sngp_finetune.py
|
Python
|
apache-2.0
| 6,367
|
[
"Gaussian"
] |
5de5a53ae4eb4b1ba5525ee181e7bfb0a6e49a5a73e250c81a552cd0debc52ce
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is used for analysis of materials with potential application as
intercalation batteries.
"""
__author__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "ajain@lbl.gov"
__date__ = "Jan 13, 2012"
__status__ = "Beta"
import itertools
from pymatgen.core.composition import Composition
from pymatgen.core.units import Charge, Time
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.phase_diagram import PDEntry
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.core.periodic_table import Element
from scipy.constants import N_A
class InsertionElectrode(AbstractElectrode):
"""
A set of topotactically related compounds, with different amounts of a
single element, e.g. TiO2 and LiTiO2, that can be used to define an
insertion battery electrode.
"""
def __init__(self, entries, working_ion_entry):
"""
Create a new InsertionElectrode.
Args:
entries: A list of ComputedStructureEntries (or subclasses)
representing the different topotactic states of the battery,
e.g. TiO2 and LiTiO2.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
"""
self._entries = entries
self._working_ion = working_ion_entry.composition.elements[0]
self._working_ion_entry = working_ion_entry
# Prepare to make phase diagram: determine elements and set their energy
# to be very high
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
# Set an artificial energy for each element for convex hull generation
element_energy = max([entry.energy_per_atom for entry in entries]) + 10
pdentries = []
pdentries.extend(entries)
pdentries.extend([PDEntry(Composition({el: 1}), element_energy)
for el in elements])
# Make phase diagram to determine which entries are stable vs. unstable
pd = PhaseDiagram(pdentries)
def lifrac(e):
return e.composition.get_atomic_fraction(self._working_ion)
# stable entries ordered by amount of Li asc
self._stable_entries = tuple(sorted([e for e in pd.stable_entries
if e in entries], key=lifrac))
# unstable entries ordered by amount of Li asc
self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries
if e in entries], key=lifrac))
# create voltage pairs
self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i],
self._stable_entries[i + 1],
working_ion_entry)
for i in range(len(self._stable_entries) - 1)])
@property
def working_ion(self):
"""
The working ion as an Element object
"""
return self._working_ion
@property
def working_ion_entry(self):
return self._working_ion_entry
@property
def voltage_pairs(self):
return self._vpairs
def get_stable_entries(self, charge_to_discharge=True):
"""
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion.
"""
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_unstable_entries(self, charge_to_discharge=True):
"""
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion.
"""
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_all_entries(self, charge_to_discharge=True):
"""
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion.
"""
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
# sort all entries by amount of working ion ASC
all_entries = sorted([e for e in all_entries],
key=lambda e: e.composition.get_atomic_fraction(self.working_ion))
return all_entries if charge_to_discharge else all_entries.reverse()
@property
def fully_charged_entry(self):
"""
The most charged entry along the topotactic path.
"""
return self._stable_entries[0]
@property
def fully_discharged_entry(self):
"""
The most discharged entry along the topotactic path.
"""
return self._stable_entries[-1]
def get_max_instability(self, min_voltage=None, max_voltage=None):
"""
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None
def get_min_instability(self, min_voltage=None, max_voltage=None):
"""
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None
def get_max_muO2(self, min_voltage=None, max_voltage=None):
"""
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
if pair.muO2_charge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
return max(data) if len(data) > 0 else None
def get_min_muO2(self, min_voltage=None, max_voltage=None):
"""
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
if pair.muO2_charge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
return min(data) if len(data) > 0 else None
def get_sub_electrodes(self, adjacent_only=True, include_myself=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set True.
include_myself: Include this identical electrode in the list of
results.
Returns:
A list of InsertionElectrode objects
"""
battery_list = []
pair_it = self._vpairs if adjacent_only \
else itertools.combinations_with_replacement(self._vpairs, 2)
ion = self._working_ion
for pair in pair_it:
entry_charge = pair.entry_charge if adjacent_only \
else pair[0].entry_charge
entry_discharge = pair.entry_discharge if adjacent_only \
else pair[1].entry_discharge
chg_frac = entry_charge.composition.get_atomic_fraction(ion)
dischg_frac = entry_discharge.composition.get_atomic_fraction(ion)
def in_range(entry):
frac = entry.composition.get_atomic_fraction(ion)
return chg_frac <= frac <= dischg_frac
if include_myself or entry_charge != self.fully_charged_entry \
or entry_discharge != self.fully_discharged_entry:
unstable_entries = filter(in_range,
self.get_unstable_entries())
stable_entries = filter(in_range, self.get_stable_entries())
all_entries = list(stable_entries)
all_entries.extend(unstable_entries)
battery_list.append(self.__class__(all_entries,
self.working_ion_entry))
return battery_list
def as_dict_summary(self, print_subelectrodes=True):
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"id_charge": self.fully_charged_entry.entry_id,
"formula_discharge": dischg_comp.reduced_formula,
"id_discharge": self.fully_discharged_entry.entry_id,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability(),
"material_ids": [itr_ent.entry_id for itr_ent in self._entries],
"stable_material_ids": [itr_ent.entry_id for itr_ent in self.get_stable_entries()],
"unstable_material_ids": [itr_ent.entry_id for itr_ent in self.get_unstable_entries()],
}
if all(['decomposition_energy' in itr_ent.data for itr_ent in self._entries]):
d.update({"stability_charge": self.fully_charged_entry.data['decomposition_energy'],
"stability_discharge": self.fully_discharged_entry.data['decomposition_energy'],
"stability_data": {itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in
self._entries},
})
if all(['muO2' in itr_ent.data for itr_ent in self._entries]):
d.update({"muO2_data": {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}})
if print_subelectrodes:
def f_dict(c):
return c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=False)))
return d
def __str__(self):
return self.__repr__()
def __repr__(self):
output = []
chg_form = self.fully_charged_entry.composition.reduced_formula
dischg_form = self.fully_discharged_entry.composition.reduced_formula
output.append("InsertionElectrode with endpoints at {} and {}".format(
chg_form, dischg_form))
output.append("Avg. volt. = {} V".format(self.get_average_voltage()))
output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav()))
output.append("Vol. cap. = {}".format(self.get_capacity_vol()))
return "\n".join(output)
@classmethod
def from_dict(cls, d):
from monty.json import MontyDecoder
dec = MontyDecoder()
return cls(dec.process_decoded(d["entries"]),
dec.process_decoded(d["working_ion_entry"]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self._entries],
"working_ion_entry": self.working_ion_entry.as_dict()}
class InsertionVoltagePair(AbstractVoltagePair):
"""
Defines an Insertion Voltage Pair.
Args:
entry1: Entry corresponding to one of the entries in the voltage step.
entry2: Entry corresponding to the other entry in the voltage step.
working_ion_entry: A single ComputedEntry or PDEntry representing
the element that carries charge across the battery, e.g. Li.
"""
def __init__(self, entry1, entry2, working_ion_entry):
# initialize some internal variables
working_element = working_ion_entry.composition.elements[0]
entry_charge = entry1
entry_discharge = entry2
if entry_charge.composition.get_atomic_fraction(working_element) \
> entry2.composition.get_atomic_fraction(working_element):
(entry_charge, entry_discharge) = (entry_discharge, entry_charge)
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
ion_sym = working_element.symbol
frame_charge_comp = Composition({el: comp_charge[el]
for el in comp_charge
if el.symbol != ion_sym})
frame_discharge_comp = Composition({el: comp_discharge[el]
for el in comp_discharge
if el.symbol != ion_sym})
# Data validation
# check that the ion is just a single element
if not working_ion_entry.composition.is_element:
raise ValueError("VoltagePair: The working ion specified must be "
"an element")
# check that at least one of the entries contains the working element
if not comp_charge.get_atomic_fraction(working_element) > 0 and \
not comp_discharge.get_atomic_fraction(working_element) > 0:
raise ValueError("VoltagePair: The working ion must be present in "
"one of the entries")
# check that the entries do not contain the same amount of the workin
# element
if comp_charge.get_atomic_fraction(working_element) == comp_discharge.get_atomic_fraction(working_element):
raise ValueError("VoltagePair: The working ion atomic percentage "
"cannot be the same in both the entries")
# check that the frameworks of the entries are equivalent
if not frame_charge_comp.reduced_formula == frame_discharge_comp.reduced_formula:
raise ValueError("VoltagePair: the specified entries must have the"
" same compositional framework")
# Initialize normalization factors, charged and discharged entries
valence_list = Element(ion_sym).oxidation_states
working_ion_valence = abs(max(valence_list))
(self.framework,
norm_charge) = frame_charge_comp.get_reduced_composition_and_factor()
norm_discharge = \
frame_discharge_comp.get_reduced_composition_and_factor()[1]
self._working_ion_entry = working_ion_entry
# Initialize normalized properties
self._vol_charge = entry_charge.structure.volume / norm_charge
self._vol_discharge = entry_discharge.structure.volume / norm_discharge
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
self._mass_charge = comp_charge.weight / norm_charge
self._mass_discharge = comp_discharge.weight / norm_discharge
self._num_ions_transferred = (comp_discharge[working_element] / norm_discharge) \
- (comp_charge[working_element] / norm_charge)
self._voltage = \
(((entry_charge.energy / norm_charge) -
(entry_discharge.energy / norm_discharge)) /
self._num_ions_transferred + working_ion_entry.energy_per_atom) / working_ion_valence
self._mAh = self._num_ions_transferred * Charge(1, "e").to("C") * \
Time(1, "s").to("h") * N_A * 1000 * working_ion_valence
# Step 4: add (optional) hull and muO2 data
self.decomp_e_charge = \
entry_charge.data.get("decomposition_energy", None)
self.decomp_e_discharge = \
entry_discharge.data.get("decomposition_energy", None)
self.muO2_charge = entry_charge.data.get("muO2", None)
self.muO2_discharge = entry_discharge.data.get("muO2", None)
self.entry_charge = entry_charge
self.entry_discharge = entry_discharge
self.normalization_charge = norm_charge
self.normalization_discharge = norm_discharge
self._frac_charge = comp_charge.get_atomic_fraction(working_element)
self._frac_discharge = \
comp_discharge.get_atomic_fraction(working_element)
@property
def frac_charge(self):
return self._frac_charge
@property
def frac_discharge(self):
return self._frac_discharge
@property
def voltage(self):
return self._voltage
@property
def mAh(self):
return self._mAh
@property
def mass_charge(self):
return self._mass_charge
@property
def mass_discharge(self):
return self._mass_discharge
@property
def vol_charge(self):
return self._vol_charge
@property
def vol_discharge(self):
return self._vol_discharge
@property
def working_ion_entry(self):
return self._working_ion_entry
def __repr__(self):
output = ["Insertion voltage pair with working ion {}".format(
self._working_ion_entry.composition.reduced_formula),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"mass_charge = {}, mass_discharge = {}".format(
self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}".format(
self.vol_charge, self.vol_discharge),
"frac_charge = {}, frac_discharge = {}".format(
self.frac_charge, self.frac_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
|
fraricci/pymatgen
|
pymatgen/apps/battery/insertion_battery.py
|
Python
|
mit
| 21,772
|
[
"pymatgen"
] |
e18f04251b007369232b79831b4d4f11defe7b14078b32838cda91ab2f52b2ae
|
"""
Notifications for Android TV notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.nfandroidtv/
"""
import logging
import io
import base64
import requests
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_DATA, BaseNotificationService,
PLATFORM_SCHEMA)
from homeassistant.const import CONF_TIMEOUT
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_IP = 'host'
CONF_DURATION = 'duration'
CONF_FONTSIZE = 'fontsize'
CONF_POSITION = 'position'
CONF_TRANSPARENCY = 'transparency'
CONF_COLOR = 'color'
CONF_INTERRUPT = 'interrupt'
DEFAULT_DURATION = 5
DEFAULT_FONTSIZE = 'medium'
DEFAULT_POSITION = 'bottom-right'
DEFAULT_TRANSPARENCY = 'default'
DEFAULT_COLOR = 'grey'
DEFAULT_INTERRUPT = False
DEFAULT_TIMEOUT = 5
DEFAULT_ICON = (
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP6zwAAAgcBApo'
'cMXEAAAAASUVORK5CYII=')
ATTR_DURATION = 'duration'
ATTR_FONTSIZE = 'fontsize'
ATTR_POSITION = 'position'
ATTR_TRANSPARENCY = 'transparency'
ATTR_COLOR = 'color'
ATTR_BKGCOLOR = 'bkgcolor'
ATTR_INTERRUPT = 'interrupt'
ATTR_IMAGE = 'filename2'
ATTR_FILE = 'file'
# Attributes contained in file
ATTR_FILE_URL = 'url'
ATTR_FILE_PATH = 'path'
ATTR_FILE_USERNAME = 'username'
ATTR_FILE_PASSWORD = 'password'
ATTR_FILE_AUTH = 'auth'
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_FILE_AUTH_DIGEST = 'digest'
FONTSIZES = {
'small': 1,
'medium': 0,
'large': 2,
'max': 3
}
POSITIONS = {
'bottom-right': 0,
'bottom-left': 1,
'top-right': 2,
'top-left': 3,
'center': 4,
}
TRANSPARENCIES = {
'default': 0,
'0%': 1,
'25%': 2,
'50%': 3,
'75%': 4,
'100%': 5,
}
COLORS = {
'grey': '#607d8b',
'black': '#000000',
'indigo': '#303F9F',
'green': '#4CAF50',
'red': '#F44336',
'cyan': '#00BCD4',
'teal': '#009688',
'amber': '#FFC107',
'pink': '#E91E63',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP): cv.string,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Coerce(int),
vol.Optional(CONF_FONTSIZE, default=DEFAULT_FONTSIZE):
vol.In(FONTSIZES.keys()),
vol.Optional(CONF_POSITION, default=DEFAULT_POSITION):
vol.In(POSITIONS.keys()),
vol.Optional(CONF_TRANSPARENCY, default=DEFAULT_TRANSPARENCY):
vol.In(TRANSPARENCIES.keys()),
vol.Optional(CONF_COLOR, default=DEFAULT_COLOR):
vol.In(COLORS.keys()),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_INTERRUPT, default=DEFAULT_INTERRUPT): cv.boolean,
})
def get_service(hass, config, discovery_info=None):
"""Get the Notifications for Android TV notification service."""
remoteip = config.get(CONF_IP)
duration = config.get(CONF_DURATION)
fontsize = config.get(CONF_FONTSIZE)
position = config.get(CONF_POSITION)
transparency = config.get(CONF_TRANSPARENCY)
color = config.get(CONF_COLOR)
interrupt = config.get(CONF_INTERRUPT)
timeout = config.get(CONF_TIMEOUT)
return NFAndroidTVNotificationService(
remoteip, duration, fontsize, position,
transparency, color, interrupt, timeout, hass.config.is_allowed_path)
class NFAndroidTVNotificationService(BaseNotificationService):
"""Notification service for Notifications for Android TV."""
def __init__(self, remoteip, duration, fontsize, position, transparency,
color, interrupt, timeout, is_allowed_path):
"""Initialize the service."""
self._target = 'http://{}:7676'.format(remoteip)
self._default_duration = duration
self._default_fontsize = fontsize
self._default_position = position
self._default_transparency = transparency
self._default_color = color
self._default_interrupt = interrupt
self._timeout = timeout
self._icon_file = io.BytesIO(base64.b64decode(DEFAULT_ICON))
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to a Android TV device."""
_LOGGER.debug("Sending notification to: %s", self._target)
payload = dict(filename=('icon.png', self._icon_file,
'application/octet-stream',
{'Expires': '0'}), type='0',
title=kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
msg=message, duration='%i' % self._default_duration,
fontsize='%i' % FONTSIZES.get(self._default_fontsize),
position='%i' % POSITIONS.get(self._default_position),
bkgcolor='%s' % COLORS.get(self._default_color),
transparency='%i' % TRANSPARENCIES.get(
self._default_transparency),
offset='0', app=ATTR_TITLE_DEFAULT, force='true',
interrupt='%i' % self._default_interrupt,)
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_DURATION in data:
duration = data.get(ATTR_DURATION)
try:
payload[ATTR_DURATION] = '%i' % int(duration)
except ValueError:
_LOGGER.warning("Invalid duration-value: %s",
str(duration))
if ATTR_FONTSIZE in data:
fontsize = data.get(ATTR_FONTSIZE)
if fontsize in FONTSIZES:
payload[ATTR_FONTSIZE] = '%i' % FONTSIZES.get(fontsize)
else:
_LOGGER.warning("Invalid fontsize-value: %s",
str(fontsize))
if ATTR_POSITION in data:
position = data.get(ATTR_POSITION)
if position in POSITIONS:
payload[ATTR_POSITION] = '%i' % POSITIONS.get(position)
else:
_LOGGER.warning("Invalid position-value: %s",
str(position))
if ATTR_TRANSPARENCY in data:
transparency = data.get(ATTR_TRANSPARENCY)
if transparency in TRANSPARENCIES:
payload[ATTR_TRANSPARENCY] = '%i' % TRANSPARENCIES.get(
transparency)
else:
_LOGGER.warning("Invalid transparency-value: %s",
str(transparency))
if ATTR_COLOR in data:
color = data.get(ATTR_COLOR)
if color in COLORS:
payload[ATTR_BKGCOLOR] = '%s' % COLORS.get(color)
else:
_LOGGER.warning("Invalid color-value: %s", str(color))
if ATTR_INTERRUPT in data:
interrupt = data.get(ATTR_INTERRUPT)
try:
payload[ATTR_INTERRUPT] = '%i' % cv.boolean(interrupt)
except vol.Invalid:
_LOGGER.warning("Invalid interrupt-value: %s",
str(interrupt))
filedata = data.get(ATTR_FILE) if data else None
if filedata is not None:
# Load from file or URL
file_as_bytes = self.load_file(
url=filedata.get(ATTR_FILE_URL),
local_path=filedata.get(ATTR_FILE_PATH),
username=filedata.get(ATTR_FILE_USERNAME),
password=filedata.get(ATTR_FILE_PASSWORD),
auth=filedata.get(ATTR_FILE_AUTH))
if file_as_bytes:
payload[ATTR_IMAGE] = (
'image', file_as_bytes,
'application/octet-stream', {'Expires': '0'})
try:
_LOGGER.debug("Payload: %s", str(payload))
response = requests.post(
self._target, files=payload, timeout=self._timeout)
if response.status_code != 200:
_LOGGER.error("Error sending message: %s", str(response))
except requests.exceptions.ConnectionError as err:
_LOGGER.error("Error communicating with %s: %s",
self._target, str(err))
def load_file(self, url=None, local_path=None, username=None,
password=None, auth=None):
"""Load image/document/etc from a local path or URL."""
try:
if url is not None:
# Check whether authentication parameters are provided
if username is not None and password is not None:
# Use digest or basic authentication
if ATTR_FILE_AUTH_DIGEST == auth:
auth_ = HTTPDigestAuth(username, password)
else:
auth_ = HTTPBasicAuth(username, password)
# Load file from URL with authentication
req = requests.get(
url, auth=auth_, timeout=DEFAULT_TIMEOUT)
else:
# Load file from URL without authentication
req = requests.get(url, timeout=DEFAULT_TIMEOUT)
return req.content
if local_path is not None:
# Check whether path is whitelisted in configuration.yaml
if self.is_allowed_path(local_path):
return open(local_path, "rb")
_LOGGER.warning("'%s' is not secure to load data from!",
local_path)
else:
_LOGGER.warning("Neither URL nor local path found in params!")
except OSError as error:
_LOGGER.error("Can't load from url or local path: %s", error)
return None
|
nugget/home-assistant
|
homeassistant/components/notify/nfandroidtv.py
|
Python
|
apache-2.0
| 10,033
|
[
"Amber"
] |
3e4511afb6460362b48feb3a508ba6f7b8ec143cc60e20af994fcd78a19b616e
|
# -*- coding: utf-8 -*-
import os
import sys
import contextlib
import logging
from os.path import join as pjoin
from hashlib import md5
from shutil import copyfileobj
import numpy as np
import nibabel as nib
import tarfile
import zipfile
from dipy.core.gradients import (gradient_table,
gradient_table_from_gradient_strength_bvecs)
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti, load_nifti_data
from urllib.request import urlopen
# Set a user-writeable file-system location to put files:
if 'DIPY_HOME' in os.environ:
dipy_home = os.environ['DIPY_HOME']
else:
dipy_home = pjoin(os.path.expanduser('~'), '.dipy')
# The URL to the University of Washington Researchworks repository:
UW_RW_URL = \
"https://digital.lib.washington.edu/researchworks/bitstream/handle/"
class FetcherError(Exception):
pass
def _log(msg):
"""Helper function used as short hand for logging.
"""
logger = logging.getLogger(__name__)
logger.info(msg)
def update_progressbar(progress, total_length):
"""Show progressbar
Takes a number between 0 and 1 to indicate progress from 0 to 100%.
"""
# TODO: To improve bar management, https://gist.github.com/jtriley/1108174
bar_length = 40
block = int(round(bar_length * progress))
size_string = "{0:.2f} MB".format(float(total_length) / (1024 * 1024))
text = "\rDownload Progress: [{0}] {1:.2f}% of {2}".format(
"#" * block + "-" * (bar_length - block), progress * 100, size_string)
sys.stdout.write(text)
sys.stdout.flush()
def copyfileobj_withprogress(fsrc, fdst, total_length, length=16 * 1024):
copied = 0
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied += len(buf)
progress = float(copied) / float(total_length)
update_progressbar(progress, total_length)
def _already_there_msg(folder):
"""
Prints a message indicating that a certain data-set is already in place
"""
msg = 'Dataset is already in place. If you want to fetch it again '
msg += 'please first remove the folder %s ' % folder
_log(msg)
def _get_file_md5(filename):
"""Compute the md5 checksum of a file"""
md5_data = md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5_data.block_size), b''):
md5_data.update(chunk)
return md5_data.hexdigest()
def check_md5(filename, stored_md5=None):
"""
Computes the md5 of filename and check if it matches with the supplied
string md5
Parameters
-----------
filename : string
Path to a file.
md5 : string
Known md5 of filename to check against. If None (default), checking is
skipped
"""
if stored_md5 is not None:
computed_md5 = _get_file_md5(filename)
if stored_md5 != computed_md5:
msg = """The downloaded file, %s, does not have the expected md5
checksum of "%s". Instead, the md5 checksum was: "%s". This could mean that
something is wrong with the file or that the upstream file has been updated.
You can try downloading the file again or updating to the newest version of
dipy.""" % (filename, stored_md5,
computed_md5)
raise FetcherError(msg)
def _get_file_data(fname, url):
with contextlib.closing(urlopen(url)) as opener:
try:
response_size = opener.headers['content-length']
except KeyError:
response_size = None
with open(fname, 'wb') as data:
if(response_size is None):
copyfileobj(opener, data)
else:
copyfileobj_withprogress(opener, data, response_size)
def fetch_data(files, folder, data_size=None):
"""Downloads files to folder and checks their md5 checksums
Parameters
----------
files : dictionary
For each file in `files` the value should be (url, md5). The file will
be downloaded from url if the file does not already exist or if the
file exists but the md5 checksum does not match.
folder : str
The directory where to save the file, the directory will be created if
it does not already exist.
data_size : str, optional
A string describing the size of the data (e.g. "91 MB") to be logged to
the screen. Default does not produce any information about data size.
Raises
------
FetcherError
Raises if the md5 checksum of the file does not match the expected
value. The downloaded file is not deleted when this error is raised.
"""
if not os.path.exists(folder):
_log("Creating new folder %s" % (folder))
os.makedirs(folder)
if data_size is not None:
_log('Data size is approximately %s' % data_size)
all_skip = True
for f in files:
url, md5 = files[f]
fullpath = pjoin(folder, f)
if os.path.exists(fullpath) and (_get_file_md5(fullpath) == md5):
continue
all_skip = False
_log('Downloading "%s" to %s' % (f, folder))
_get_file_data(fullpath, url)
check_md5(fullpath, md5)
if all_skip:
_already_there_msg(folder)
else:
_log("Files successfully downloaded to %s" % (folder))
def _make_fetcher(name, folder, baseurl, remote_fnames, local_fnames,
md5_list=None, doc="", data_size=None, msg=None,
unzip=False):
""" Create a new fetcher
Parameters
----------
name : str
The name of the fetcher function.
folder : str
The full path to the folder in which the files would be placed locally.
Typically, this is something like 'pjoin(dipy_home, 'foo')'
baseurl : str
The URL from which this fetcher reads files
remote_fnames : list of strings
The names of the files in the baseurl location
local_fnames : list of strings
The names of the files to be saved on the local filesystem
md5_list : list of strings, optional
The md5 checksums of the files. Used to verify the content of the
files. Default: None, skipping checking md5.
doc : str, optional.
Documentation of the fetcher.
data_size : str, optional.
If provided, is sent as a message to the user before downloading
starts.
msg : str, optional.
A message to print to screen when fetching takes place. Default (None)
is to print nothing
unzip : bool, optional
Whether to unzip the file(s) after downloading them. Supports zip, gz,
and tar.gz files.
returns
-------
fetcher : function
A function that, when called, fetches data according to the designated
inputs
"""
def fetcher():
files = {}
for i, (f, n), in enumerate(zip(remote_fnames, local_fnames)):
files[n] = (baseurl + f, md5_list[i] if
md5_list is not None else None)
fetch_data(files, folder, data_size)
if msg is not None:
_log(msg)
if unzip:
for f in local_fnames:
split_ext = os.path.splitext(f)
if split_ext[-1] == '.gz' or split_ext[-1] == '.bz2':
if os.path.splitext(split_ext[0])[-1] == '.tar':
ar = tarfile.open(pjoin(folder, f))
ar.extractall(path=folder)
ar.close()
else:
raise ValueError('File extension is not recognized')
elif split_ext[-1] == '.zip':
z = zipfile.ZipFile(pjoin(folder, f), 'r')
files[f] += (tuple(z.namelist()), )
z.extractall(folder)
z.close()
else:
raise ValueError('File extension is not recognized')
return files, folder
fetcher.__name__ = name
fetcher.__doc__ = doc
return fetcher
fetch_isbi2013_2shell = _make_fetcher(
"fetch_isbi2013_2shell",
pjoin(dipy_home, 'isbi2013'),
UW_RW_URL + '1773/38465/',
['phantom64.nii.gz',
'phantom64.bval',
'phantom64.bvec'],
['phantom64.nii.gz', 'phantom64.bval', 'phantom64.bvec'],
['42911a70f232321cf246315192d69c42',
'90e8cf66e0f4d9737a3b3c0da24df5ea',
'4b7aa2757a1ccab140667b76e8075cb1'],
doc="Download a 2-shell software phantom dataset",
data_size="")
fetch_stanford_labels = _make_fetcher(
"fetch_stanford_labels",
pjoin(dipy_home, 'stanford_hardi'),
'https://stacks.stanford.edu/file/druid:yx282xq2090/',
["aparc-reduced.nii.gz", "label_info.txt"],
["aparc-reduced.nii.gz", "label_info.txt"],
['742de90090d06e687ce486f680f6d71a',
'39db9f0f5e173d7a2c2e51b07d5d711b'],
doc="Download reduced freesurfer aparc image from stanford web site")
fetch_sherbrooke_3shell = _make_fetcher(
"fetch_sherbrooke_3shell",
pjoin(dipy_home, 'sherbrooke_3shell'),
UW_RW_URL + "1773/38475/",
['HARDI193.nii.gz', 'HARDI193.bval', 'HARDI193.bvec'],
['HARDI193.nii.gz', 'HARDI193.bval', 'HARDI193.bvec'],
['0b735e8f16695a37bfbd66aab136eb66',
'e9b9bb56252503ea49d31fb30a0ac637',
'0c83f7e8b917cd677ad58a078658ebb7'],
doc="Download a 3shell HARDI dataset with 192 gradient direction")
fetch_stanford_hardi = _make_fetcher(
"fetch_stanford_hardi",
pjoin(dipy_home, 'stanford_hardi'),
'https://stacks.stanford.edu/file/druid:yx282xq2090/',
['dwi.nii.gz', 'dwi.bvals', 'dwi.bvecs'],
['HARDI150.nii.gz', 'HARDI150.bval', 'HARDI150.bvec'],
['0b18513b46132b4d1051ed3364f2acbc',
'4e08ee9e2b1d2ec3fddb68c70ae23c36',
'4c63a586f29afc6a48a5809524a76cb4'],
doc="Download a HARDI dataset with 160 gradient directions")
fetch_stanford_t1 = _make_fetcher(
"fetch_stanford_t1",
pjoin(dipy_home, 'stanford_hardi'),
'https://stacks.stanford.edu/file/druid:yx282xq2090/',
['t1.nii.gz'],
['t1.nii.gz'],
['a6a140da6a947d4131b2368752951b0a'])
fetch_stanford_pve_maps = _make_fetcher(
"fetch_stanford_pve_maps",
pjoin(dipy_home, 'stanford_hardi'),
'https://stacks.stanford.edu/file/druid:yx282xq2090/',
['pve_csf.nii.gz', 'pve_gm.nii.gz', 'pve_wm.nii.gz'],
['pve_csf.nii.gz', 'pve_gm.nii.gz', 'pve_wm.nii.gz'],
['2c498e4fed32bca7f726e28aa86e9c18',
'1654b20aeb35fc2734a0d7928b713874',
'2e244983cf92aaf9f9d37bc7716b37d5'])
fetch_taiwan_ntu_dsi = _make_fetcher(
"fetch_taiwan_ntu_dsi",
pjoin(dipy_home, 'taiwan_ntu_dsi'),
UW_RW_URL + "1773/38480/",
['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt'],
['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt'],
['950408c0980a7154cb188666a885a91f',
'602e5cb5fad2e7163e8025011d8a6755',
'a95eb1be44748c20214dc7aa654f9e6b',
'7fa1d5e272533e832cc7453eeba23f44'],
doc="Download a DSI dataset with 203 gradient directions",
msg="See DSI203_license.txt for LICENSE. For the complete datasets" +
" please visit http://dsi-studio.labsolver.org",
data_size="91MB")
fetch_syn_data = _make_fetcher(
"fetch_syn_data",
pjoin(dipy_home, 'syn_test'),
UW_RW_URL + "1773/38476/",
['t1.nii.gz', 'b0.nii.gz'],
['t1.nii.gz', 'b0.nii.gz'],
['701bda02bb769655c7d4a9b1df2b73a6',
'e4b741f0c77b6039e67abb2885c97a78'],
data_size="12MB",
doc="Download t1 and b0 volumes from the same session")
fetch_mni_template = _make_fetcher(
"fetch_mni_template",
pjoin(dipy_home, 'mni_template'),
'https://ndownloader.figshare.com/files/',
['5572676?private_link=4b8666116a0128560fb5',
'5572673?private_link=93216e750d5a7e568bda',
'5572670?private_link=33c92d54d1afb9aa7ed2',
'5572661?private_link=584319b23e7343fed707'],
['mni_icbm152_t2_tal_nlin_asym_09a.nii',
'mni_icbm152_t1_tal_nlin_asym_09a.nii',
'mni_icbm152_t1_tal_nlin_asym_09c_mask.nii',
'mni_icbm152_t1_tal_nlin_asym_09c.nii'],
['f41f2e1516d880547fbf7d6a83884f0d',
'1ea8f4f1e41bc17a94602e48141fdbc8',
'a243e249cd01a23dc30f033b9656a786',
'3d5dd9b0cd727a17ceec610b782f66c1'],
doc="fetch the MNI 2009a T1 and T2, and 2009c T1 and T1 mask files",
data_size="70MB")
fetch_scil_b0 = _make_fetcher(
"fetch_scil_b0",
dipy_home,
UW_RW_URL + "1773/38479/",
['datasets_multi-site_all_companies.zip'],
['datasets_multi-site_all_companies.zip'],
["e9810fa5bf21b99da786647994d7d5b7"],
doc="Download b=0 datasets from multiple MR systems (GE, Philips, " +
"Siemens) and different magnetic fields (1.5T and 3T)",
data_size="9.2MB",
unzip=True)
fetch_bundles_2_subjects = _make_fetcher(
"fetch_bundles_2_subjects",
pjoin(dipy_home, 'exp_bundles_and_maps'),
UW_RW_URL + '1773/38477/',
['bundles_2_subjects.tar.gz'],
['bundles_2_subjects.tar.gz'],
['97756fbef11ce2df31f1bedf1fc7aac7'],
data_size="234MB",
doc="Download 2 subjects from the SNAIL dataset with their bundles",
unzip=True)
fetch_ivim = _make_fetcher(
"fetch_ivim",
pjoin(dipy_home, 'ivim'),
'https://ndownloader.figshare.com/files/',
['5305243', '5305246', '5305249'],
['ivim.nii.gz', 'ivim.bval', 'ivim.bvec'],
['cda596f89dc2676af7d9bf1cabccf600',
'f03d89f84aa9a9397103a400e43af43a',
'fb633a06b02807355e49ccd85cb92565'],
doc="Download IVIM dataset")
fetch_cfin_multib = _make_fetcher(
"fetch_cfin_multib",
pjoin(dipy_home, 'cfin_multib'),
UW_RW_URL + '/1773/38488/',
['T1.nii',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec'],
['T1.nii',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval',
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec'],
['889883b5e7d93a6e372bc760ea887e7c',
'9daea1d01d68fd0055a3b34f5ffd5f6e',
'3ee44135fde7ea5c9b8c801414bdde2c',
'948373391de950e7cc1201ba9f696bf0'],
doc="Download CFIN multi b-value diffusion data",
msg=("This data was provided by Brian Hansen and Sune Jespersen" +
" More details about the data are available in their paper: " +
" https://www.nature.com/articles/sdata201672"))
fetch_file_formats = _make_fetcher(
"bundle_file_formats_example",
pjoin(dipy_home, 'bundle_file_formats_example'),
'https://zenodo.org/record/3352379/files/',
['cc_m_sub.trk', 'laf_m_sub.tck', 'lpt_m_sub.fib',
'raf_m_sub.vtk', 'rpt_m_sub.dpy', 'template0.nii.gz'],
['cc_m_sub.trk', 'laf_m_sub.tck', 'lpt_m_sub.fib',
'raf_m_sub.vtk', 'rpt_m_sub.dpy', 'template0.nii.gz'],
['78ed7bead3e129fb4b4edd6da1d7e2d2', '20009796ccd43dc8d2d5403b25dff717',
'8afa8419e2efe04ede75cce1f53c77d8', '9edcbea30c7a83b467c3cdae6ce963c8',
'42bff2538a650a7ff1e57bfd9ed90ad6', '99c37a2134026d2c4bbb7add5088ddc6'],
doc="Download 5 bundles in various file formats and their reference",
data_size="25MB")
fetch_bundle_atlas_hcp842 = _make_fetcher(
"fetch_bundle_atlas_hcp842",
pjoin(dipy_home, 'bundle_atlas_hcp842'),
'https://ndownloader.figshare.com/files/',
['13638644'],
['Atlas_80_Bundles.zip'],
['78331d527a10ec000d4f33bac472e099'],
doc="Download atlas tractogram from the hcp842 dataset with 80 bundles",
data_size="300MB",
unzip=True)
fetch_target_tractogram_hcp = _make_fetcher(
"fetch_target_tractogram_hcp",
pjoin(dipy_home, 'target_tractogram_hcp'),
'https://ndownloader.figshare.com/files/',
['12871127'],
['hcp_tractogram.zip'],
['fa25ef19c9d3748929b6423397963b6a'],
doc="Download tractogram of one of the hcp dataset subjects",
data_size="541MB",
unzip=True)
fetch_bundle_fa_hcp = _make_fetcher(
"fetch_bundle_fa_hcp",
pjoin(dipy_home, 'bundle_fa_hcp'),
'https://ndownloader.figshare.com/files/',
['14035265'],
['hcp_bundle_fa.nii.gz'],
['2d5c0036b0575597378ddf39191028ea'],
doc=("Download map of FA within two bundles in one" +
"of the hcp dataset subjects"),
data_size="230kb")
fetch_qtdMRI_test_retest_2subjects = _make_fetcher(
"fetch_qtdMRI_test_retest_2subjects",
pjoin(dipy_home, 'qtdMRI_test_retest_2subjects'),
'https://zenodo.org/record/996889/files/',
['subject1_dwis_test.nii.gz', 'subject2_dwis_test.nii.gz',
'subject1_dwis_retest.nii.gz', 'subject2_dwis_retest.nii.gz',
'subject1_ccmask_test.nii.gz', 'subject2_ccmask_test.nii.gz',
'subject1_ccmask_retest.nii.gz', 'subject2_ccmask_retest.nii.gz',
'subject1_scheme_test.txt', 'subject2_scheme_test.txt',
'subject1_scheme_retest.txt', 'subject2_scheme_retest.txt'],
['subject1_dwis_test.nii.gz', 'subject2_dwis_test.nii.gz',
'subject1_dwis_retest.nii.gz', 'subject2_dwis_retest.nii.gz',
'subject1_ccmask_test.nii.gz', 'subject2_ccmask_test.nii.gz',
'subject1_ccmask_retest.nii.gz', 'subject2_ccmask_retest.nii.gz',
'subject1_scheme_test.txt', 'subject2_scheme_test.txt',
'subject1_scheme_retest.txt', 'subject2_scheme_retest.txt'],
['ebd7441f32c40e25c28b9e069bd81981',
'dd6a64dd68c8b321c75b9d5fb42c275a',
'830a7a028a66d1b9812f93309a3f9eae',
'd7f1951e726c35842f7ea0a15d990814',
'ddb8dfae908165d5e82c846bcc317cab',
'5630c06c267a0f9f388b07b3e563403c',
'02e9f92b31e8980f658da99e532e14b5',
'6e7ce416e7cfda21cecce3731f81712b',
'957cb969f97d89e06edd7a04ffd61db0',
'5540c0c9bd635c29fc88dd599cbbf5e6',
'5540c0c9bd635c29fc88dd599cbbf5e6',
'5540c0c9bd635c29fc88dd599cbbf5e6'],
doc="Downloads test-retest qt-dMRI acquisitions of two C57Bl6 mice.",
data_size="298.2MB")
fetch_gold_standard_io = _make_fetcher(
"fetch_gold_standard_io",
pjoin(dipy_home, 'gold_standard_io'),
'https://zenodo.org/record/2651349/files/',
['gs.trk', 'gs.tck', 'gs.fib', 'gs.dpy', 'gs.nii', 'gs_3mm.nii',
'gs_rasmm_space.txt', 'gs_voxmm_space.txt', 'gs_vox_space.txt',
'points_data.txt', 'streamlines_data.txt'],
['gs.trk', 'gs.tck', 'gs.fib', 'gs.dpy', 'gs.nii', 'gs_3mm.nii',
'gs_rasmm_space.txt', 'gs_voxmm_space.txt', 'gs_vox_space.txt',
'points_data.json', 'streamlines_data.json'],
['3acf565779f4d5107f96b2ef90578d64',
'151a30cf356c002060d720bf9d577245',
'e9818e07bef5bd605dea0877df14a2b0',
'248606297e400d1a9b1786845aad8de3',
'a2d4d8f62d1de0ab9927782c7d51cb27',
'217b3ae0712a02b2463b8eedfe9a0a68',
'ca193a5508d3313d542231aaf262960f',
'3284de59dfd9ca3130e6e01258ed9022',
'a2a89c387f45adab733652a92f6602d5',
'4bcca0c6195871fc05e93cdfabec22b4',
'578f29052ac03a6d8a98580eb7c70d97'],
doc="Downloads the gold standard for streamlines io testing.",
data_size="47.KB")
def get_fnames(name='small_64D'):
"""Provide full paths to example or test datasets.
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
- 'small_64D' small region of interest nifti,bvecs,bvals 64 directions
- 'small_101D' small region of interest nifti, bvecs, bvals
101 directions
- 'aniso_vox' volume with anisotropic voxel size as Nifti
- 'fornix' 300 tracks in Trackvis format (from Pittsburgh
Brain Competition)
- 'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions
of 101 directions tested on Siemens 3T Trio
- 'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
- 'test_piesno' slice of N=8, K=14 diffusion data
- 'reg_c' small 2D image used for validating registration
- 'reg_o' small 2D image used for validation registration
- 'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
--------
>>> import numpy as np
>>> from dipy.io.image import load_nifti
>>> from dipy.data import get_fnames
>>> fimg, fbvals, fbvecs = get_fnames('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> data, affine = load_nifti(fimg)
>>> data.shape == (6, 10, 10, 102)
True
>>> bvals.shape == (102,)
True
>>> bvecs.shape == (102, 3)
True
"""
DATA_DIR = pjoin(os.path.dirname(__file__), 'files')
if name == 'small_64D':
fbvals = pjoin(DATA_DIR, 'small_64D.bval')
fbvecs = pjoin(DATA_DIR, 'small_64D.bvec')
fimg = pjoin(DATA_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(DATA_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(DATA_DIR, 'small_101D.bval')
fbvecs = pjoin(DATA_DIR, 'small_101D.bvec')
fimg = pjoin(DATA_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(DATA_DIR, 'aniso_vox.nii.gz')
if name == 'ascm_test':
return pjoin(DATA_DIR, 'ascm_out_test.nii.gz')
if name == 'fornix':
return pjoin(DATA_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(DATA_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(DATA_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(DATA_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(DATA_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(DATA_DIR, 'small_25.bval')
fbvecs = pjoin(DATA_DIR, 'small_25.bvec')
fimg = pjoin(DATA_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == 'small_25_streamlines':
fstreamlines = pjoin(DATA_DIR, 'EuDX_small_25.trk')
return fstreamlines
if name == "S0_10":
fimg = pjoin(DATA_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(DATA_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(DATA_DIR, 'C.npy')
if name == "reg_o":
return pjoin(DATA_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(DATA_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(DATA_DIR, 't1_coronal_slice.npy')
if name == "t-design":
N = 45
return pjoin(DATA_DIR, 'tdesign' + str(N) + '.txt')
if name == 'scil_b0':
files, folder = fetch_scil_b0()
files = files['datasets_multi-site_all_companies.zip'][2]
files = [pjoin(folder, f) for f in files]
return [f for f in files if os.path.isfile(f)]
if name == 'stanford_hardi':
files, folder = fetch_stanford_hardi()
fraw = pjoin(folder, 'HARDI150.nii.gz')
fbval = pjoin(folder, 'HARDI150.bval')
fbvec = pjoin(folder, 'HARDI150.bvec')
return fraw, fbval, fbvec
if name == 'taiwan_ntu_dsi':
files, folder = fetch_taiwan_ntu_dsi()
fraw = pjoin(folder, 'DSI203.nii.gz')
fbval = pjoin(folder, 'DSI203.bval')
fbvec = pjoin(folder, 'DSI203.bvec')
return fraw, fbval, fbvec
if name == 'sherbrooke_3shell':
files, folder = fetch_sherbrooke_3shell()
fraw = pjoin(folder, 'HARDI193.nii.gz')
fbval = pjoin(folder, 'HARDI193.bval')
fbvec = pjoin(folder, 'HARDI193.bvec')
return fraw, fbval, fbvec
if name == 'isbi2013_2shell':
files, folder = fetch_isbi2013_2shell()
fraw = pjoin(folder, 'phantom64.nii.gz')
fbval = pjoin(folder, 'phantom64.bval')
fbvec = pjoin(folder, 'phantom64.bvec')
return fraw, fbval, fbvec
if name == 'stanford_labels':
files, folder = fetch_stanford_labels()
return pjoin(folder, "aparc-reduced.nii.gz")
if name == 'syn_data':
files, folder = fetch_syn_data()
t1_name = pjoin(folder, 't1.nii.gz')
b0_name = pjoin(folder, 'b0.nii.gz')
return t1_name, b0_name
if name == 'stanford_t1':
files, folder = fetch_stanford_t1()
return pjoin(folder, 't1.nii.gz')
if name == 'stanford_pve_maps':
files, folder = fetch_stanford_pve_maps()
f_pve_csf = pjoin(folder, 'pve_csf.nii.gz')
f_pve_gm = pjoin(folder, 'pve_gm.nii.gz')
f_pve_wm = pjoin(folder, 'pve_wm.nii.gz')
return f_pve_csf, f_pve_gm, f_pve_wm
if name == 'ivim':
files, folder = fetch_ivim()
fraw = pjoin(folder, 'ivim.nii.gz')
fbval = pjoin(folder, 'ivim.bval')
fbvec = pjoin(folder, 'ivim.bvec')
return fraw, fbval, fbvec
if name == 'tissue_data':
files, folder = fetch_tissue_data()
t1_name = pjoin(folder, 't1_brain.nii.gz')
t1d_name = pjoin(folder, 't1_brain_denoised.nii.gz')
ap_name = pjoin(folder, 'power_map.nii.gz')
return t1_name, t1d_name, ap_name
if name == 'cfin_multib':
files, folder = fetch_cfin_multib()
t1_name = pjoin(folder, 'T1.nii')
fraw = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii')
fbval = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval')
fbvec = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec')
return fraw, fbval, fbvec, t1_name
if name == 'target_tractrogram_hcp':
files, folder = fetch_target_tractogram_hcp()
return pjoin(folder, 'target_tractogram_hcp', 'hcp_tractogram',
'streamlines.trk')
if name == 'bundle_atlas_hcp842':
files, folder = fetch_bundle_atlas_hcp842()
return get_bundle_atlas_hcp842()
def read_qtdMRI_test_retest_2subjects():
""" Load test-retest qt-dMRI acquisitions of two C57Bl6 mice. These
datasets were used to study test-retest reproducibility of time-dependent
q-space indices (q$\tau$-indices) in the corpus callosum of two mice [1].
The data itself and its details are publicly available and can be cited at
[2].
The test-retest diffusion MRI spin echo sequences were acquired from two
C57Bl6 wild-type mice on an 11.7 Tesla Bruker scanner. The test and retest
acquisition were taken 48 hours from each other. The (processed) data
consists of 80x160x5 voxels of size 110x110x500μm. Each data set consists
of 515 Diffusion-Weighted Images (DWIs) spread over 35 acquisition shells.
The shells are spread over 7 gradient strength shells with a maximum
gradient strength of 491 mT/m, 5 pulse separation shells between
[10.8 - 20.0]ms, and a pulse length of 5ms. We manually created a brain
mask and corrected the data from eddy currents and motion artifacts using
FSL's eddy. A region of interest was then drawn in the middle slice in the
corpus callosum, where the tissue is reasonably coherent.
Returns
-------
data : list of length 4
contains the dwi datasets ordered as
(subject1_test, subject1_retest, subject2_test, subject2_retest)
cc_masks : list of length 4
contains the corpus callosum masks ordered in the same order as data.
gtabs : list of length 4
contains the qt-dMRI gradient tables of the data sets.
References
----------
.. [1] Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
.. [2] Wassermann, Demian, et al., "Test-Retest qt-dMRI datasets for
`Non-Parametric GraphNet-Regularized Representation of dMRI in Space
and Time'". doi:10.5281/zenodo.996889, 2017.
"""
data = []
data_names = [
'subject1_dwis_test.nii.gz', 'subject1_dwis_retest.nii.gz',
'subject2_dwis_test.nii.gz', 'subject2_dwis_retest.nii.gz'
]
for data_name in data_names:
data_loc = pjoin(dipy_home, 'qtdMRI_test_retest_2subjects', data_name)
data.append(load_nifti_data(data_loc))
cc_masks = []
mask_names = [
'subject1_ccmask_test.nii.gz', 'subject1_ccmask_retest.nii.gz',
'subject2_ccmask_test.nii.gz', 'subject2_ccmask_retest.nii.gz'
]
for mask_name in mask_names:
mask_loc = pjoin(dipy_home, 'qtdMRI_test_retest_2subjects', mask_name)
cc_masks.append(load_nifti_data(mask_loc))
gtabs = []
gtab_txt_names = [
'subject1_scheme_test.txt', 'subject1_scheme_retest.txt',
'subject2_scheme_test.txt', 'subject2_scheme_retest.txt'
]
for gtab_txt_name in gtab_txt_names:
txt_loc = pjoin(dipy_home, 'qtdMRI_test_retest_2subjects',
gtab_txt_name)
qtdmri_scheme = np.loadtxt(txt_loc, skiprows=1)
bvecs = qtdmri_scheme[:, 1:4]
G = qtdmri_scheme[:, 4] / 1e3 # because dipy takes T/mm not T/m
small_delta = qtdmri_scheme[:, 5]
big_delta = qtdmri_scheme[:, 6]
gtab = gradient_table_from_gradient_strength_bvecs(
G, bvecs, big_delta, small_delta
)
gtabs.append(gtab)
return data, cc_masks, gtabs
def read_scil_b0():
"""Load GE 3T b0 image form the scil b0 dataset.
Returns
-------
img : obj,
Nifti1Image
"""
fnames = get_fnames('scil_b0')
return nib.load(fnames[0])
def read_siemens_scil_b0():
"""Load Siemens 1.5T b0 image from the scil b0 dataset.
Returns
-------
img : obj,
Nifti1Image
"""
fnames = get_fnames('scil_b0')
return nib.load(fnames[1])
def read_isbi2013_2shell():
"""Load ISBI 2013 2-shell synthetic dataset.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec = get_fnames('isbi2013_2shell')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
img = nib.load(fraw)
return img, gtab
def read_sherbrooke_3shell():
"""Load Sherbrooke 3-shell HARDI dataset.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec = get_fnames('sherbrooke_3shell')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
img = nib.load(fraw)
return img, gtab
def read_stanford_labels():
"""Read stanford hardi data and label map."""
# First get the hardi data
hard_img, gtab = read_stanford_hardi()
# Fetch and load
labels_file = get_fnames('stanford_labels')
labels_img = nib.load(labels_file)
return hard_img, gtab, labels_img
def read_stanford_hardi():
"""Load Stanford HARDI dataset.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec = get_fnames('stanford_hardi')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
img = nib.load(fraw)
return img, gtab
def read_stanford_t1():
f_t1 = get_fnames('stanford_t1')
img = nib.load(f_t1)
return img
def read_stanford_pve_maps():
f_pve_csf, f_pve_gm, f_pve_wm = get_fnames('stanford_pve_maps')
img_pve_csf = nib.load(f_pve_csf)
img_pve_gm = nib.load(f_pve_gm)
img_pve_wm = nib.load(f_pve_wm)
return (img_pve_csf, img_pve_gm, img_pve_wm)
def read_taiwan_ntu_dsi():
"""Load Taiwan NTU dataset.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec = get_fnames('taiwan_ntu_dsi')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
bvecs[1:] = (bvecs[1:] /
np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None])
gtab = gradient_table(bvals, bvecs)
img = nib.load(fraw)
return img, gtab
def read_syn_data():
"""Load t1 and b0 volumes from the same session.
Returns
-------
t1 : obj,
Nifti1Image
b0 : obj,
Nifti1Image
"""
t1_name, b0_name = get_fnames('syn_data')
t1 = nib.load(t1_name)
b0 = nib.load(b0_name)
return t1, b0
def fetch_tissue_data():
""" Download images to be used for tissue classification
"""
t1 = 'https://ndownloader.figshare.com/files/6965969'
t1d = 'https://ndownloader.figshare.com/files/6965981'
ap = 'https://ndownloader.figshare.com/files/6965984'
folder = pjoin(dipy_home, 'tissue_data')
md5_list = ['99c4b77267a6855cbfd96716d5d65b70', # t1
'4b87e1b02b19994fbd462490cc784fa3', # t1d
'c0ea00ed7f2ff8b28740f18aa74bff6a'] # ap
url_list = [t1, t1d, ap]
fname_list = ['t1_brain.nii.gz', 't1_brain_denoised.nii.gz',
'power_map.nii.gz']
if not os.path.exists(folder):
_log('Creating new directory %s' % folder)
os.makedirs(folder)
msg = 'Downloading 3 Nifti1 images (9.3MB)...'
_log(msg)
for i in range(len(md5_list)):
_get_file_data(pjoin(folder, fname_list[i]), url_list[i])
check_md5(pjoin(folder, fname_list[i]), md5_list[i])
_log('Done.')
_log('Files copied in folder %s' % folder)
else:
_already_there_msg(folder)
return fname_list, folder
def read_tissue_data(contrast='T1'):
""" Load images to be used for tissue classification
Parameters
----------
constrast : str
'T1', 'T1 denoised' or 'Anisotropic Power'
Returns
-------
image : obj,
Nifti1Image
"""
folder = pjoin(dipy_home, 'tissue_data')
t1_name = pjoin(folder, 't1_brain.nii.gz')
t1d_name = pjoin(folder, 't1_brain_denoised.nii.gz')
ap_name = pjoin(folder, 'power_map.nii.gz')
md5_dict = {'t1': '99c4b77267a6855cbfd96716d5d65b70',
't1d': '4b87e1b02b19994fbd462490cc784fa3',
'ap': 'c0ea00ed7f2ff8b28740f18aa74bff6a'}
check_md5(t1_name, md5_dict['t1'])
check_md5(t1d_name, md5_dict['t1d'])
check_md5(ap_name, md5_dict['ap'])
if contrast == 'T1 denoised':
return nib.load(t1d_name)
elif contrast == 'Anisotropic Power':
return nib.load(ap_name)
else:
return nib.load(t1_name)
mni_notes = \
"""
Notes
-----
The templates were downloaded from the MNI (McGill University)
`website <http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009>`_
in July 2015.
The following publications should be referenced when using these templates:
.. [1] VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins
and BDCG, Unbiased average age-appropriate atlases for pediatric
studies, NeuroImage, 54:1053-8119,
DOI: 10.1016/j.neuroimage.2010.07.033
.. [2] VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins,
Unbiased nonlinear average age-appropriate brain templates from
birth to adulthood, NeuroImage, 47:S102
Organization for Human Brain Mapping 2009 Annual Meeting,
DOI: https://doi.org/10.1016/S1053-8119(09)70884-5
**License for the MNI templates:**
Copyright (C) 1993-2004, Louis Collins McConnell Brain Imaging Centre,
Montreal Neurological Institute, McGill University. Permission to use,
copy, modify, and distribute this software and its documentation for any
purpose and without fee is hereby granted, provided that the above
copyright notice appear in all copies. The authors and McGill University
make no representations about the suitability of this software for any
purpose. It is provided "as is" without express or implied warranty. The
authors are not responsible for any data loss, equipment damage, property
loss, or injury to subjects or patients resulting from the use or misuse
of this software package.
"""
def read_mni_template(version="a", contrast="T2"):
"""Read the MNI template from disk.
Parameters
----------
version: string
There are two MNI templates 2009a and 2009c, so options available are:
"a" and "c".
contrast : list or string, optional
Which of the contrast templates to read. For version "a" two contrasts
are available: "T1" and "T2". Similarly for version "c" there are two
options, "T1" and "mask". You can input contrast as a string or a list
Returns
-------
list : contains the nibabel.Nifti1Image objects requested, according to the
order they were requested in the input.
Examples
--------
>>> # Get only the T1 file for version c:
>>> T1 = read_mni_template("c", contrast = "T1") # doctest: +SKIP
>>> # Get both files in this order for version a:
>>> T1, T2 = read_mni_template(contrast = ["T1", "T2"]) # doctest: +SKIP
"""
files, folder = fetch_mni_template()
file_dict_a = {"T1": pjoin(folder, 'mni_icbm152_t1_tal_nlin_asym_09a.nii'),
"T2": pjoin(folder, 'mni_icbm152_t2_tal_nlin_asym_09a.nii')}
file_dict_c = {
"T1": pjoin(
folder, 'mni_icbm152_t1_tal_nlin_asym_09c.nii'), "mask": pjoin(
folder, 'mni_icbm152_t1_tal_nlin_asym_09c_mask.nii')}
if contrast == "T2" and version == "c":
raise ValueError("No T2 image for MNI template 2009c")
if contrast == "mask" and version == "a":
raise ValueError("No template mask available for MNI 2009a")
if not(isinstance(contrast, str)) and version == "c":
for k in contrast:
if k == "T2":
raise ValueError("No T2 image for MNI template 2009c")
if version == "a":
if isinstance(contrast, str):
return nib.load(file_dict_a[contrast])
else:
out_list = []
for k in contrast:
out_list.append(nib.load(file_dict_a[k]))
elif version == "c":
if isinstance(contrast, str):
return nib.load(file_dict_c[contrast])
else:
out_list = []
for k in contrast:
out_list.append(nib.load(file_dict_c[k]))
else:
raise ValueError("Only 2009a and 2009c versions are available")
return out_list
# Add the references to both MNI-related functions:
read_mni_template.__doc__ += mni_notes
fetch_mni_template.__doc__ += mni_notes
def fetch_cenir_multib(with_raw=False):
"""Fetch 'HCP-like' data, collected at multiple b-values.
Parameters
----------
with_raw : bool
Whether to fetch the raw data. Per default, this is False, which means
that only eddy-current/motion corrected data is fetched
"""
folder = pjoin(dipy_home, 'cenir_multib')
fname_list = ['4D_dwi_eddycor_B200.nii.gz',
'dwi_bvals_B200', 'dwi_bvecs_B200',
'4D_dwieddycor_B400.nii.gz',
'bvals_B400', 'bvecs_B400',
'4D_dwieddycor_B1000.nii.gz',
'bvals_B1000', 'bvecs_B1000',
'4D_dwieddycor_B2000.nii.gz',
'bvals_B2000', 'bvecs_B2000',
'4D_dwieddycor_B3000.nii.gz',
'bvals_B3000', 'bvecs_B3000']
md5_list = ['fd704aa3deb83c1c7229202cb3db8c48',
'80ae5df76a575fe5bf9f1164bb0d4cfb',
'18e90f8a3e6a4db2457e5b1ba1cc98a9',
'3d0f2b8ef7b6a4a3aa5c4f7a90c9cfec',
'c38056c40c9cc42372232d6e75c47f54',
'810d79b4c30cb7dff3b2000017d5f72a',
'dde8037601a14436b2173f4345b5fd17',
'97de6a492ae304f39e0b418b6ebac64c',
'f28a0faa701bdfc66e31bde471a5b992',
'c5e4b96e3afdee99c0e994eff3b2331a',
'9c83b8d5caf9c3def240f320f2d2f56c',
'05446bd261d57193d8dbc097e06db5ff',
'f0d70456ce424fda2cecd48e64f3a151',
'336accdb56acbbeff8dac1748d15ceb8',
'27089f3baaf881d96f6a9da202e3d69b']
if with_raw:
fname_list.extend(['4D_dwi_B200.nii.gz', '4D_dwi_B400.nii.gz',
'4D_dwi_B1000.nii.gz', '4D_dwi_B2000.nii.gz',
'4D_dwi_B3000.nii.gz'])
md5_list.extend(['a8c36e76101f2da2ca8119474ded21d5',
'a0e7939f6d977458afbb2f4659062a79',
'87fc307bdc2e56e105dffc81b711a808',
'7c23e8a5198624aa29455f0578025d4f',
'4e4324c676f5a97b3ded8bbb100bf6e5'])
files = {}
baseurl = UW_RW_URL + '1773/33311/'
for f, m in zip(fname_list, md5_list):
files[f] = (baseurl + f, m)
fetch_data(files, folder)
return files, folder
def read_cenir_multib(bvals=None):
"""Read CENIR multi b-value data.
Parameters
----------
bvals : list or int
The b-values to read from file (200, 400, 1000, 2000, 3000).
Returns
-------
gtab : a GradientTable class instance
img : nibabel.Nifti1Image
"""
files, folder = fetch_cenir_multib(with_raw=False)
if bvals is None:
bvals = [200, 400, 1000, 2000, 3000]
if isinstance(bvals, int):
bvals = [bvals]
file_dict = {200: {'DWI': pjoin(folder, '4D_dwi_eddycor_B200.nii.gz'),
'bvals': pjoin(folder, 'dwi_bvals_B200'),
'bvecs': pjoin(folder, 'dwi_bvecs_B200')},
400: {'DWI': pjoin(folder, '4D_dwieddycor_B400.nii.gz'),
'bvals': pjoin(folder, 'bvals_B400'),
'bvecs': pjoin(folder, 'bvecs_B400')},
1000: {'DWI': pjoin(folder, '4D_dwieddycor_B1000.nii.gz'),
'bvals': pjoin(folder, 'bvals_B1000'),
'bvecs': pjoin(folder, 'bvecs_B1000')},
2000: {'DWI': pjoin(folder, '4D_dwieddycor_B2000.nii.gz'),
'bvals': pjoin(folder, 'bvals_B2000'),
'bvecs': pjoin(folder, 'bvecs_B2000')},
3000: {'DWI': pjoin(folder, '4D_dwieddycor_B3000.nii.gz'),
'bvals': pjoin(folder, 'bvals_B3000'),
'bvecs': pjoin(folder, 'bvecs_B3000')}}
data = []
bval_list = []
bvec_list = []
for bval in bvals:
data.append(load_nifti_data(file_dict[bval]['DWI']))
bval_list.extend(np.loadtxt(file_dict[bval]['bvals']))
bvec_list.append(np.loadtxt(file_dict[bval]['bvecs']))
# All affines are the same, so grab the last one:
aff = nib.load(file_dict[bval]['DWI']).affine
return (nib.Nifti1Image(np.concatenate(data, -1), aff),
gradient_table(bval_list, np.concatenate(bvec_list, -1)))
CENIR_notes = \
"""
Notes
-----
Details of the acquisition and processing, and additional meta-data are
available through UW researchworks:
https://digital.lib.washington.edu/researchworks/handle/1773/33311
"""
fetch_cenir_multib.__doc__ += CENIR_notes
read_cenir_multib.__doc__ += CENIR_notes
def read_bundles_2_subjects(subj_id='subj_1', metrics=['fa'],
bundles=['af.left', 'cst.right', 'cc_1']):
r"""Read images and streamlines from 2 subjects of the SNAIL dataset.
Parameters
----------
subj_id : string
Either ``subj_1`` or ``subj_2``.
metrics : list
Either ['fa'] or ['t1'] or ['fa', 't1']
bundles : list
E.g., ['af.left', 'cst.right', 'cc_1']. See all the available bundles
in the ``exp_bundles_maps/bundles_2_subjects`` directory of your
``$HOME/.dipy`` folder.
Returns
-------
dix : dict
Dictionary with data of the metrics and the bundles as keys.
Notes
-----
If you are using these datasets please cite the following publications.
References
----------
.. [1] Renauld, E., M. Descoteaux, M. Bernier, E. Garyfallidis,
K. Whittingstall, "Morphology of thalamus, LGN and optic radiation do not
influence EEG alpha waves", Plos One (under submission), 2015.
.. [2] Garyfallidis, E., O. Ocegueda, D. Wassermann,
M. Descoteaux. Robust and efficient linear registration of fascicles in the
space of streamlines , Neuroimage, 117:124-140, 2015.
"""
dname = pjoin(dipy_home, 'exp_bundles_and_maps', 'bundles_2_subjects')
from dipy.io.streamline import load_tractogram
from dipy.tracking.streamline import Streamlines
res = {}
if 't1' in metrics:
data, affine = load_nifti(pjoin(dname, subj_id, 't1_warped.nii.gz'))
res['t1'] = data
if 'fa' in metrics:
fa, affine = load_nifti(pjoin(dname, subj_id, 'fa_1x1x1.nii.gz'))
res['fa'] = fa
res['affine'] = affine
for bun in bundles:
streams = load_tractogram(pjoin(dname, subj_id,
'bundles', 'bundles_' + bun + '.trk'),
'same',
bbox_valid_check=False).streamlines
streamlines = Streamlines(streams)
res[bun] = streamlines
return res
def read_ivim():
"""Load IVIM dataset.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec = get_fnames('ivim')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs, b0_threshold=0)
img = nib.load(fraw)
return img, gtab
def read_cfin_dwi():
"""Load CFIN multi b-value DWI data.
Returns
-------
img : obj,
Nifti1Image
gtab : obj,
GradientTable
"""
fraw, fbval, fbvec, _ = get_fnames('cfin_multib')
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
img = nib.load(fraw)
return img, gtab
def read_cfin_t1():
"""Load CFIN T1-weighted data.
Returns
-------
img : obj,
Nifti1Image
"""
_, _, _, fraw = get_fnames('cfin_multib')
img = nib.load(fraw)
return img # , gtab
def get_file_formats():
"""
Returns
-------
bundles_list : all bundles (list)
ref_anat : reference
"""
ref_anat = pjoin(dipy_home,
'bundle_file_formats_example', 'template0.nii.gz')
bundles_list = []
for filename in ['cc_m_sub.trk', 'laf_m_sub.tck', 'lpt_m_sub.fib',
'raf_m_sub.vtk', 'rpt_m_sub.dpy']:
bundles_list.append(pjoin(dipy_home,
'bundle_file_formats_example',
filename))
return bundles_list, ref_anat
def get_bundle_atlas_hcp842():
"""
Returns
-------
file1 : string
file2 : string
"""
file1 = pjoin(dipy_home,
'bundle_atlas_hcp842',
'Atlas_80_Bundles',
'whole_brain',
'whole_brain_MNI.trk')
file2 = pjoin(dipy_home,
'bundle_atlas_hcp842',
'Atlas_80_Bundles',
'bundles',
'*.trk')
return file1, file2
def get_two_hcp842_bundles():
"""
Returns
-------
file1 : string
file2 : string
"""
file1 = pjoin(dipy_home,
'bundle_atlas_hcp842',
'Atlas_80_Bundles',
'bundles',
'AF_L.trk')
file2 = pjoin(dipy_home,
'bundle_atlas_hcp842',
'Atlas_80_Bundles',
'bundles',
'CST_L.trk')
return file1, file2
def get_target_tractogram_hcp():
"""
Returns
-------
file1 : string
"""
file1 = pjoin(dipy_home,
'target_tractogram_hcp',
'hcp_tractogram',
'streamlines.trk')
return file1
|
FrancoisRheaultUS/dipy
|
dipy/data/fetcher.py
|
Python
|
bsd-3-clause
| 47,733
|
[
"Brian",
"VTK",
"VisIt"
] |
0fc04600d5058eff023a1ef3d7a489d4318a75e2ff27ffe730db467d680ecb89
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
License: GPLv3 or any later version.
"""
import argparse
import ConfigParser
import datetime
import glob
import json
import logging
import os
import rpm
import subprocess
import shutil
import time
import warnings
from datetime import date
import requests
try:
import pygit2
except ImportError:
pass
try:
import hglib
except ImportError:
pass
DEFAULT_CONFIG = os.path.expanduser('~/.config/dgroc')
COPR_URL = 'https://copr.fedorainfracloud.org/'
# Initial simple logging stuff
logging.basicConfig(format='%(message)s')
LOG = logging.getLogger("dgroc")
class DgrocException(Exception):
''' Exception specific to dgroc so that we will catch, we won't catch
other.
'''
pass
class GitReader(object):
'''Defualt version control system to use: git'''
short = 'git'
@classmethod
def init(cls):
'''Import the stuff git needs again and let it raise an exception now'''
import pygit2
@classmethod
def clone(cls, url, folder):
'''Clone the repository'''
pygit2.clone_repository(url, folder)
@classmethod
def pull(cls):
'''Pull from the repository'''
return subprocess.Popen(["git", "pull"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@classmethod
def commit_hash(cls, folder):
'''Get the latest commit hash'''
repo = pygit2.Repository(folder)
commit = repo[repo.head.target]
return commit.oid.hex[:8]
@classmethod
def archive_cmd(cls, project, archive_name):
'''Command to generate the archive'''
return ["git", "archive", "--format=tar", "--prefix=%s/" % project,
"-o%s/%s" % (get_rpm_sourcedir(), archive_name), "HEAD"]
class MercurialReader(object):
'''Alternative version control system to use: hg'''
short = 'hg'
@classmethod
def init(cls):
'''Import the stuff Mercurial needs again and let it raise an exception now'''
import hglib
@classmethod
def clone(cls, url, folder):
'''Clone the repository'''
hglib.clone(url, folder)
@classmethod
def pull(cls):
'''Pull from the repository'''
return subprocess.Popen(["hg", "pull"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@classmethod
def commit_hash(cls, folder):
'''Get the latest commit hash'''
repo = hglib.open(folder)
commit = commit = repo.log('tip')[0]
return commit.node[:12]
@classmethod
def archive_cmd(cls, project, archive_name):
'''Command to generate the archive'''
return ["hg", "archive", "--type=tar", "--prefix=%s/" % project,
"%s/%s" % (get_rpm_sourcedir(), archive_name)]
def _get_copr_auth(copr_file):
''' Return the username, login and API token from the copr configuration
file.
'''
LOG.debug('Reading configuration for copr')
## Copr config check
copr_file = copr_file or '~/.config/copr'
copr_config_file = os.path.expanduser(copr_file)
if not os.path.exists(copr_config_file):
raise DgrocException('No `~/.config/copr` file found.')
copr_config = ConfigParser.ConfigParser()
copr_config.read(copr_config_file)
if not copr_config.has_option('copr-cli', 'username'):
raise DgrocException(
'No `username` specified in the `copr-cli` section of the copr '
'configuration file.')
username = copr_config.get('copr-cli', 'username')
if not copr_config.has_option('copr-cli', 'login'):
raise DgrocException(
'No `login` specified in the `copr-cli` section of the copr '
'configuration file.')
login = copr_config.get('copr-cli', 'login')
if not copr_config.has_option('copr-cli', 'token'):
raise DgrocException(
'No `token` specified in the `copr-cli` section of the copr '
'configuration file.')
token = copr_config.get('copr-cli', 'token')
return (username, login, token)
def get_arguments():
''' Set the command line parser and retrieve the arguments provided
by the command line.
'''
parser = argparse.ArgumentParser(
description='Daily Git Rebuild On Copr')
parser.add_argument(
'--config', dest='config', default=DEFAULT_CONFIG,
help='Configuration file to use for dgroc.')
parser.add_argument(
'--debug', dest='debug', action='store_true',
default=False,
help='Expand the level of data returned')
parser.add_argument(
'--srpm-only', dest='srpmonly', action='store_true',
default=False,
help='Generate the new source rpm but do not build on copr')
parser.add_argument(
'--no-monitoring', dest='monitoring', action='store_false',
default=True,
help='Upload the srpm to copr and exit (do not monitor the build)')
return parser.parse_args()
def update_spec(spec_file, commit_hash, archive_name, packager, email, reader):
''' Update the release tag and changelog of the specified spec file
to work with the specified commit_hash.
'''
LOG.debug('Update spec file: %s', spec_file)
release = '%s%s%s' % (date.today().strftime('%Y%m%d'), reader.short, commit_hash)
output = []
version = None
rpm.spec(spec_file)
with open(spec_file) as stream:
for row in stream:
row = row.rstrip()
if row.startswith('Version:'):
version = row.split('Version:')[1].strip()
if row.startswith('Release:'):
if commit_hash in row:
raise DgrocException('Spec already up to date')
LOG.debug('Release line before: %s', row)
rel_num = row.split('ase:')[1].strip().split('%{?dist')[0]
rel_list = rel_num.split('.')
if reader.short in rel_list[-1]:
rel_list = rel_list[:-1]
if rel_list[-1].isdigit():
rel_list[-1] = str(int(rel_list[-1])+1)
rel_num = '.'.join(rel_list)
LOG.debug('Release number: %s', rel_num)
row = 'Release: %s.%s%%{?dist}' % (rel_num, release)
LOG.debug('Release line after: %s', row)
if row.startswith('Source0:'):
row = 'Source0: %s' % (archive_name)
LOG.debug('Source0 line after: %s', row)
if row.startswith('%changelog'):
output.append(row)
output.append(rpm.expandMacro('* %s %s <%s> - %s-%s.%s' % (
date.today().strftime('%a %b %d %Y'), packager, email,
version, rel_num, release)
))
output.append('- Update to %s: %s' % (reader.short, commit_hash))
row = ''
output.append(row)
with open(spec_file, 'w') as stream:
for row in output:
stream.write(row + '\n')
LOG.info('Spec file updated: %s', spec_file)
def get_rpm_sourcedir():
''' Retrieve the _sourcedir for rpm
'''
dirname = subprocess.Popen(
['rpm', '-E', '%_sourcedir'],
stdout=subprocess.PIPE
).stdout.read()[:-1]
return dirname
def generate_new_srpm(config, project, first=True):
''' For a given project in the configuration file generate a new srpm
if it is possible.
'''
if not config.has_option(project, 'scm') or config.get(project, 'scm') == 'git':
reader = GitReader
elif config.get(project, 'scm') == 'hg':
reader = MercurialReader
else:
raise DgrocException(
'Project "%s" tries to use unknown "scm" option'
% project)
reader.init()
LOG.debug('Generating new source rpm for project: %s', project)
if not config.has_option(project, '%s_folder' % reader.short):
raise DgrocException(
'Project "%s" does not specify a "%s_folder" option'
% (project, reader.short))
if not config.has_option(project, '%s_url' % reader.short) and not os.path.exists(
config.get(project, '%s_folder' % reader.short)):
raise DgrocException(
'Project "%s" does not specify a "%s_url" option and its '
'"%s_folder" option does not exists' % (project, reader.short, reader.short))
if not config.has_option(project, 'spec_file'):
raise DgrocException(
'Project "%s" does not specify a "spec_file" option'
% project)
# git clone if needed
git_folder = config.get(project, '%s_folder' % reader.short)
if '~' in git_folder:
git_folder = os.path.expanduser(git_folder)
if not os.path.exists(git_folder):
git_url = config.get(project, '%s_url' % reader.short)
LOG.info('Cloning %s', git_url)
reader.clone(git_url, git_folder)
# git pull
cwd = os.getcwd()
os.chdir(git_folder)
pull = reader.pull()
out = pull.communicate()
os.chdir(cwd)
if pull.returncode:
LOG.info('Strange result of the %s pull:\n%s', reader.short, out[0])
if first:
LOG.info('Gonna try to re-clone the project')
shutil.rmtree(git_folder)
generate_new_srpm(config, project, first=False)
return
# Retrieve last commit
commit_hash = reader.commit_hash(git_folder)
LOG.info('Last commit: %s', commit_hash)
# Check if commit changed
changed = False
if not config.has_option(project, '%s_hash' % reader.short):
config.set(project, '%s_hash % reader.short', commit_hash)
changed = True
elif config.get(project, '%s_hash' % reader.short) == commit_hash:
changed = False
elif config.get(project, '%s_hash % reader.short') != commit_hash:
changed = True
if not changed:
return
# Build sources
cwd = os.getcwd()
os.chdir(git_folder)
archive_name = "%s-%s.tar" % (project, commit_hash)
cmd = reader.archive_cmd(project, archive_name)
LOG.debug('Command to generate archive: %s', ' '.join(cmd))
pull = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = pull.communicate()
os.chdir(cwd)
# Update spec file
spec_file = config.get(project, 'spec_file')
if '~' in spec_file:
spec_file = os.path.expanduser(spec_file)
update_spec(
spec_file,
commit_hash,
archive_name,
config.get('main', 'username'),
config.get('main', 'email'),
reader)
# Copy patches
if config.has_option(project, 'patch_files'):
LOG.info('Copying patches')
candidates = config.get(project, 'patch_files').split(',')
candidates = [candidate.strip() for candidate in candidates]
for candidate in candidates:
LOG.debug('Expanding path: %s', candidate)
candidate = os.path.expanduser(candidate)
patches = glob.glob(candidate)
if not patches:
LOG.info('Could not expand path: `%s`', candidate)
for patch in patches:
filename = os.path.basename(patch)
dest = os.path.join(get_rpm_sourcedir(), filename)
LOG.debug('Copying from %s, to %s', patch, dest)
shutil.copy(
patch,
dest
)
# Generate SRPM
env = os.environ
env['LANG'] = 'C'
build = subprocess.Popen(
["rpmbuild", "-bs", spec_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out = build.communicate()
os.chdir(cwd)
if build.returncode:
LOG.info(
'Strange result of the rpmbuild -bs:\n stdout:%s\n stderr:%s',
out[0],
out[1]
)
return
srpm = out[0].split('Wrote:')[1].strip()
LOG.info('SRPM built: %s', srpm)
return srpm
def upload_srpms(config, srpms):
''' Using the information provided in the configuration file,
upload the src.rpm generated somewhere.
'''
if not config.has_option('main', 'upload_command'):
LOG.info(
'No `upload_command` specified in the `main` section of the '
'configuration file. Attempting to build by direct upload.')
return
upload_command = config.get('main', 'upload_command')
for srpm in srpms:
LOG.debug('Uploading source rpm: %s', srpm)
cmd = upload_command % srpm
outcode = subprocess.call(cmd, shell=True)
if outcode:
LOG.info('Strange result with the command: `%s`', cmd)
def get_project_id(copr_url, username, copr, insecure=False):
''' Given username and COPR name, find its internal id. '''
try:
response = requests.get('%s/api_2/projects' % copr_url,
params=dict(owner=username, name=copr),
verify=not insecure)
project = response.json()['projects'][0]
return project['project']['id']
except (ValueError, KeyError, IndexError):
raise DgrocException(
'Failed to find project id of %s/%s' % (username, copr))
def get_chroots(copr_url, project_id, insecure=False):
''' Given a project id, obtain list of names of enabled chroots. '''
try:
response = requests.get('%s/api_2/projects/%s/chroots' % (copr_url,
project_id),
verify=not insecure)
return [obj['chroot']['name'] for obj in response.json()['chroots']]
except (ValueError, KeyError, IndexError):
raise DgrocException(
'Failed to find chroots for project %s.' % project_id)
def copr_build(config, srpms):
''' Using the information provided in the configuration file,
run the build in copr.
'''
# dgroc config check
if config.has_option('main', 'upload_command') and \
not config.has_option('main', 'upload_url'):
raise DgrocException(
'No `upload_url` specified in the `main` section of the dgroc '
'configuration file.')
if not config.has_option('main', 'copr_url'):
warnings.warn(
'No `copr_url` option set in the `main` section of the dgroc '
'configuration file, using default: %s' % COPR_URL)
copr_url = COPR_URL
else:
copr_url = config.get('main', 'copr_url')
copr_url = copr_url.rstrip('/')
insecure = False
if config.has_option('main', 'no_ssl_check') \
and config.get('main', 'no_ssl_check'):
warnings.warn(
"Option `no_ssl_check` was set to True, we won't check the ssl "
"certificate when submitting the builds to copr")
insecure = config.get('main', 'no_ssl_check')
copr_config = config.get('main', 'copr_config')
username, login, token = _get_copr_auth(copr_config)
build_ids = []
# Build project/srpm in copr
for project in srpms:
if config.has_option(project, 'copr'):
copr = config.get(project, 'copr')
else:
copr = project
project_id = get_project_id(copr_url, username, copr, insecure=insecure)
metadata = {
'project_id': project_id,
'chroots': get_chroots(copr_url, project_id, insecure=insecure),
}
url = '%s/api_2/builds' % (copr_url)
srpm_name = os.path.basename(srpms[project])
if config.has_option('main', 'upload_command'):
# SRPMs are uploaded to remote location.
srpm_file = config.get('main', 'upload_url') % srpm_name
metadata['srpm_url'] = srpm_file
req = requests.post(
url, auth=(login, token), json=metadata, verify=not insecure)
else:
# Directly upload SRPM to COPR
files = {
'srpm': (srpm_name, open(srpms[project], 'rb'),
'application/x-rpm'),
'metadata': ('', json.dumps(metadata)),
}
req = requests.post(
url, auth=(login, token), files=files, verify=not insecure)
if req.status_code != requests.codes.created:
LOG.error('Failed to start build in COPR')
LOG.error('Status code was %d: %s', req.status_code, req.reason)
try:
LOG.error(req.json()['message'])
except ValueError:
LOG.error(req.text)
build_url = req.headers['Location']
build_id = build_url.split('/')[-1]
build_ids.append(build_id)
return build_ids
def check_copr_build(config, build_ids):
''' Check the status of builds running in copr.
'''
## dgroc config check
if not config.has_option('main', 'copr_url'):
warnings.warn(
'No `copr_url` option set in the `main` section of the dgroc '
'configuration file, using default: %s' % COPR_URL)
copr_url = COPR_URL
else:
copr_url = config.get('main', 'copr_url')
if not copr_url.endswith('/'):
copr_url = '%s/' % copr_url
insecure = False
if config.has_option('main', 'no_ssl_check') \
and config.get('main', 'no_ssl_check'):
warnings.warn(
"Option `no_ssl_check` was set to True, we won't check the ssl "
"certificate when submitting the builds to copr")
insecure = config.get('main', 'no_ssl_check')
copr_config = config.get('main', 'copr_config')
username, login, token = _get_copr_auth(copr_config)
build_ip = []
## Build project/srpm in copr
for build_id in build_ids:
URL = '%s/api/coprs/build_status/%s/' % (
copr_url,
build_id)
req = requests.get(
URL, auth=(login, token), verify=not insecure)
if '<title>Sign in Coprs</title>' in req.text:
LOG.info("Invalid API token")
return
if req.status_code == 404:
LOG.info("Build %s not found.", build_id)
try:
output = req.json()
except ValueError:
LOG.info("Unknown response from server.")
LOG.debug(req.url)
LOG.debug(req.text)
return
if req.status_code != 200:
LOG.info("Something went wrong:\n %s", output['error'])
return
LOG.info(' Build %s: %s', build_id, output)
if output['status'] in ('pending', 'running'):
build_ip.append(build_id)
return build_ip
def main():
'''
'''
# Retrieve arguments
args = get_arguments()
global LOG
#global LOG
if args.debug:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
# Read configuration file
config = ConfigParser.ConfigParser(defaults={'copr_config': None})
config.read(args.config)
if not config.has_option('main', 'username'):
raise DgrocException(
'No `username` specified in the `main` section of the '
'configuration file.')
if not config.has_option('main', 'email'):
raise DgrocException(
'No `email` specified in the `main` section of the '
'configuration file.')
srpms = {}
for project in config.sections():
if project == 'main':
continue
LOG.info('Processing project: %s', project)
try:
srpm = generate_new_srpm(config, project)
if srpm:
srpms[project] = srpm
except DgrocException, err:
LOG.info('%s: %s', project, err)
LOG.info('%s srpms generated', len(srpms))
if not srpms:
return
if args.srpmonly:
return
try:
upload_srpms(config, srpms.values())
except DgrocException, err:
LOG.info(err)
try:
build_ids = copr_build(config, srpms)
except DgrocException, err:
LOG.info(err)
if args.monitoring:
LOG.info('Monitoring %s builds...', len(build_ids))
while build_ids:
time.sleep(45)
LOG.info(datetime.datetime.now())
build_ids = check_copr_build(config, build_ids)
if __name__ == '__main__':
main()
#build_ids = [6065]
#config = ConfigParser.ConfigParser()
#config.read(DEFAULT_CONFIG)
#print 'Monitoring builds...'
#build_ids = check_copr_build(config, build_ids)
#while build_ids:
#time.sleep(45)
#print datetime.datetime.now()
#build_ids = check_copr_build(config, build_ids)
|
pypingou/dgroc
|
dgroc.py
|
Python
|
gpl-3.0
| 20,755
|
[
"ASE"
] |
b0132aa6a07ef604bdabf090dab42755eeff5f3124ab9c3f056a7287f4cf3b55
|
import numpy as np
from ase.asec.command import Command
class ReactionCommand(Command):
@classmethod
def add_parser(cls, subparser):
parser = subparser.add_parser('reaction', help='reaction ...')
def __init__(self, logfile, args):
Command.__init__(self, logfile, args)
self.count = {}
self.data = self.read()
def run(self, atoms, name):
from collections import Counter
self.count[name] = Counter(atoms.numbers)
def finalize(self):
numbers = set()
for c in self.count.values():
numbers.update(c.keys())
print numbers
a = []
for name in self.args.names:
a.append([self.count[name].get(Z, 0) for Z in numbers])
print a
u, s, v = np.linalg.svd(a)
coefs = u[:, -1] / u[0, -1]
energy = 0.0
for c, name in zip(coefs, self.args.names):
e = self.data[name]['energy']
energy += c * e
print '%10.5f %f %s' % (c, e, name)
print energy, 'ev'
|
alexei-matveev/ase-local
|
ase/asec/reaction.py
|
Python
|
gpl-2.0
| 1,068
|
[
"ASE"
] |
f070dd73d4d98d58849631e13b5028f2860978a39267bae3a976b7e566569d2c
|
#!/usr/bin/python
import os, sys
ncs_lib_path = ('../../../')
sys.path.append(ncs_lib_path)
import ncs
def run(argv):
sim = ncs.Simulation()
fast_spiking_parameters = sim.addNeuron("fast_spiking","izhikevich",
{
"a": 0.1,
"b": 0.3,
"c": -55.0,
"d": 2.0,
"u": -12.0,
"v": -65.0,
"threshold": 30,
})
group_1=sim.addNeuronGroup("group_1",1,fast_spiking_parameters,None)
if not sim.init(argv):
print "failed to initialize simulation."
return
sim.addStimulus("rectangular_current",{"amplitude":10,"width": 1, "frequency": 1},group_1,1,0.01,1.0)
voltage_report=sim.addReport("group_1","neuron","neuron_voltage",1.0,0.0,0.01)
#Place report file in current directory
voltage_report.toAsciiFile("./fast_spiking.txt")
sim.run(duration=0.01)
return
if __name__ == "__main__":
run(sys.argv)
|
BrainComputationLab/ncs
|
python/samples/models/izh/fast_spiking_izh.py
|
Python
|
bsd-2-clause
| 881
|
[
"NEURON"
] |
26e6dc5ac3ff0423d933599de54028757492d728a77b19fc5792ae24d6f3bb5a
|
#!/usr/bin/env python3
#
# Copyright 2014 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# This script generates either header or implementation file from
# a CUDA header files.
#
# Usage: cuew hdr|impl [/path/to/cuda/includes]
# - hdr means header file will be generated and printed to stdout.
# - impl means implementation file will be generated and printed to stdout.
# - /path/to/cuda/includes is a path to a folder with cuda.h and cudaGL.h
# for which wrangler will be generated.
import os
import sys
from cuda_errors import CUDA_ERRORS
from pycparser import c_parser, c_ast, parse_file
from subprocess import Popen, PIPE
INCLUDE_DIR = "/usr/include"
LIB = "CUEW"
REAL_LIB = "CUDA"
VERSION_MAJOR = "1"
VERSION_MINOR = "2"
COPYRIGHT = """/*
* Copyright 2011-2014 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/"""
FILES = ["cuda.h", "cudaGL.h"]
TYPEDEFS = []
FUNC_TYPEDEFS = []
SYMBOLS = []
DEFINES = []
DEFINES_V2 = []
ERRORS = []
class FuncDefVisitor(c_ast.NodeVisitor):
indent = 0
prev_complex = False
dummy_typedefs = ['size_t', 'CUdeviceptr']
def _get_quals_string(self, node):
if node.quals:
return ' '.join(node.quals) + ' '
return ''
def _get_ident_type(self, node):
if isinstance(node, c_ast.PtrDecl):
return self._get_ident_type(node.type.type) + '*'
if isinstance(node, c_ast.ArrayDecl):
return self._get_ident_type(node.type)
elif isinstance(node, c_ast.Struct):
if node.name:
return 'struct ' + node.name
else:
self.indent += 1
struct = self._stringify_struct(node)
self.indent -= 1
return "struct {\n" + \
struct + (" " * self.indent) + "}"
elif isinstance(node, c_ast.Union):
self.indent += 1
union = self._stringify_struct(node)
self.indent -= 1
return "union {\n" + union + (" " * self.indent) + "}"
elif isinstance(node, c_ast.Enum):
return 'enum ' + node.name
elif isinstance(node, c_ast.TypeDecl):
return self._get_ident_type(node.type)
else:
return node.names[0]
def _stringify_param(self, param):
param_type = param.type
result = self._get_quals_string(param)
result += self._get_ident_type(param_type)
if param.name:
result += ' ' + param.name
if isinstance(param_type, c_ast.ArrayDecl):
# TODO(sergey): Workaround to deal with the
# preprocessed file where array size got
# substituded.
dim = param_type.dim.value
if param.name == "reserved" and dim == "64":
dim = "CU_IPC_HANDLE_SIZE"
result += '[' + dim + ']'
return result
def _stringify_params(self, params):
result = []
for param in params:
result.append(self._stringify_param(param))
return ', '.join(result)
def _stringify_struct(self, node):
result = ""
children = node.children()
for child in children:
member = self._stringify_param(child[1])
result += (" " * self.indent) + member + ";\n"
return result
def _stringify_enum(self, node):
result = ""
children = node.children()
for child in children:
if isinstance(child[1], c_ast.EnumeratorList):
enumerators = child[1].enumerators
for enumerator in enumerators:
result += (" " * self.indent) + enumerator.name
if enumerator.value:
result += " = " + enumerator.value.value
result += ",\n"
if enumerator.name.startswith("CUDA_ERROR_"):
ERRORS.append(enumerator.name)
return result
def visit_Decl(self, node):
if node.type.__class__.__name__ == 'FuncDecl':
if isinstance(node.type, c_ast.FuncDecl):
func_decl = node.type
func_decl_type = func_decl.type
typedef = 'typedef '
symbol_name = None
if isinstance(func_decl_type, c_ast.TypeDecl):
symbol_name = func_decl_type.declname
typedef += self._get_quals_string(func_decl_type)
typedef += self._get_ident_type(func_decl_type.type)
typedef += ' CUDAAPI'
typedef += ' t' + symbol_name
elif isinstance(func_decl_type, c_ast.PtrDecl):
ptr_type = func_decl_type.type
symbol_name = ptr_type.declname
typedef += self._get_quals_string(ptr_type)
typedef += self._get_ident_type(func_decl_type)
typedef += ' CUDAAPI'
typedef += ' t' + symbol_name
typedef += '(' + \
self._stringify_params(func_decl.args.params) + \
');'
SYMBOLS.append(symbol_name)
FUNC_TYPEDEFS.append(typedef)
def visit_Typedef(self, node):
if node.name in self.dummy_typedefs:
return
complex = False
type = self._get_ident_type(node.type)
quals = self._get_quals_string(node)
if isinstance(node.type.type, c_ast.Struct):
self.indent += 1
struct = self._stringify_struct(node.type.type)
self.indent -= 1
typedef = quals + type + " {\n" + struct + "} " + node.name
complex = True
elif isinstance(node.type.type, c_ast.Enum):
self.indent += 1
enum = self._stringify_enum(node.type.type)
self.indent -= 1
typedef = quals + type + " {\n" + enum + "} " + node.name
complex = True
else:
typedef = quals + type + " " + node.name
if complex or self.prev_complex:
typedef = "\ntypedef " + typedef + ";"
else:
typedef = "typedef " + typedef + ";"
TYPEDEFS.append(typedef)
self.prev_complex = complex
def get_latest_cpp():
path_prefix = "/usr/bin"
for cpp_version in ["9", "8", "7", "6", "5", "4"]:
test_cpp = os.path.join(path_prefix, "cpp-4." + cpp_version)
if os.path.exists(test_cpp):
return test_cpp
return None
def preprocess_file(filename, cpp_path):
args = [cpp_path, "-I./"]
if filename.endswith("GL.h"):
args.append("-DCUDAAPI= ")
args.append(filename)
try:
pipe = Popen(args,
stdout=PIPE,
universal_newlines=True)
text = pipe.communicate()[0]
except OSError as e:
raise RuntimeError("Unable to invoke 'cpp'. " +
'Make sure its path was passed correctly\n' +
('Original error: %s' % e))
return text
def parse_files():
parser = c_parser.CParser()
cpp_path = get_latest_cpp()
for filename in FILES:
filepath = os.path.join(INCLUDE_DIR, filename)
dummy_typedefs = {}
text = preprocess_file(filepath, cpp_path)
if filepath.endswith("GL.h"):
dummy_typedefs = {
"CUresult": "int",
"CUgraphicsResource": "void *",
"CUdevice": "void *",
"CUcontext": "void *",
"CUdeviceptr": "void *",
"CUstream": "void *"
}
text = "typedef int GLint;\n" + text
text = "typedef unsigned int GLuint;\n" + text
text = "typedef unsigned int GLenum;\n" + text
text = "typedef long size_t;\n" + text
for typedef in sorted(dummy_typedefs):
text = "typedef " + dummy_typedefs[typedef] + " " + \
typedef + ";\n" + text
ast = parser.parse(text, filepath)
with open(filepath) as f:
lines = f.readlines()
for line in lines:
if line.startswith("#define"):
line = line[8:-1]
token = line.split()
if token[0] not in ("__cuda_cuda_h__",
"CUDA_CB",
"CUDAAPI"):
DEFINES.append(token)
for line in lines:
# TODO(sergey): Use better matching rule for _v2 symbols.
if line[0].isspace() and line.lstrip().startswith("#define"):
line = line[12:-1]
token = line.split()
if len(token) == 2 and (token[1].endswith("_v2") or
token[1].endswith("_v2)")):
if token[1].startswith('__CUDA_API_PTDS') or \
token[1].startswith('__CUDA_API_PTSZ'):
token[1] = token[1][16:-1]
DEFINES_V2.append(token)
v = FuncDefVisitor()
for typedef in dummy_typedefs:
v.dummy_typedefs.append(typedef)
v.visit(ast)
FUNC_TYPEDEFS.append('')
SYMBOLS.append('')
def print_copyright():
print(COPYRIGHT)
print("")
def open_header_guard():
print("#ifndef __%s_H__" % (LIB))
print("#define __%s_H__" % (LIB))
print("")
print("#ifdef __cplusplus")
print("extern \"C\" {")
print("#endif")
print("")
def close_header_guard():
print("")
print("#ifdef __cplusplus")
print("}")
print("#endif")
print("")
print("#endif /* __%s_H__ */" % (LIB))
def print_header():
print_copyright()
open_header_guard()
# Fot size_t.
print("#include <stdlib.h>")
print("")
print("/* Defines. */")
print("#define %s_VERSION_MAJOR %s" % (LIB, VERSION_MAJOR))
print("#define %s_VERSION_MINOR %s" % (LIB, VERSION_MINOR))
print("")
for define in DEFINES:
print('#define %s' % (' '.join(define)))
print("")
print("""/* Functions which changed 3.1 -> 3.2 for 64 bit stuff,
* the cuda library has both the old ones for compatibility and new
* ones with _v2 postfix,
*/""")
for define in DEFINES_V2:
print('#define %s' % (' '.join(define)))
print("")
print("/* Types. */")
# We handle this specially because of the file is
# getting preprocessed.
print("""#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
typedef unsigned long long CUdeviceptr;
#else
typedef unsigned int CUdeviceptr;
#endif
""")
for typedef in TYPEDEFS:
print('%s' % (typedef))
# TDO(sergey): This is only specific to CUDA wrapper.
print("""
#ifdef _WIN32
# define CUDAAPI __stdcall
# define CUDA_CB __stdcall
#else
# define CUDAAPI
# define CUDA_CB
#endif
""")
print("/* Function types. */")
for func_typedef in FUNC_TYPEDEFS:
print('%s' % (func_typedef))
print("")
print("/* Function declarations. */")
for symbol in SYMBOLS:
if symbol:
print('extern t%s *%s;' % (symbol, symbol))
else:
print("")
print("")
print("enum {")
print(" CUEW_SUCCESS = 0,")
print(" CUEW_ERROR_OPEN_FAILED = -1,")
print(" CUEW_ERROR_ATEXIT_FAILED = -2,")
print("};")
print("")
print("int %sInit(void);" % (LIB.lower()))
# TODO(sergey): Get rid of hardcoded CUresult.
print("const char *%sErrorString(CUresult result);" % (LIB.lower()))
print("const char *cuewCompilerPath(void);")
print("int cuewCompilerVersion(void);")
close_header_guard()
def print_dl_wrapper():
print("""#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# define VC_EXTRALEAN
# include <windows.h>
/* Utility macros. */
typedef HMODULE DynamicLibrary;
# define dynamic_library_open(path) LoadLibrary(path)
# define dynamic_library_close(lib) FreeLibrary(lib)
# define dynamic_library_find(lib, symbol) GetProcAddress(lib, symbol)
#else
# include <dlfcn.h>
typedef void* DynamicLibrary;
# define dynamic_library_open(path) dlopen(path, RTLD_NOW)
# define dynamic_library_close(lib) dlclose(lib)
# define dynamic_library_find(lib, symbol) dlsym(lib, symbol)
#endif
""")
def print_dl_helper_macro():
print("""#define %s_LIBRARY_FIND_CHECKED(name) \\
name = (t##name *)dynamic_library_find(lib, #name); \\
assert(name);
#define %s_LIBRARY_FIND(name) \\
name = (t##name *)dynamic_library_find(lib, #name);
static DynamicLibrary lib;""" % (REAL_LIB, REAL_LIB))
print("")
def print_dl_close():
print("""static void %sExit(void) {
if(lib != NULL) {
/* Ignore errors. */
dynamic_library_close(lib);
lib = NULL;
}
}""" % (LIB.lower()))
print("")
def print_lib_path():
# TODO(sergey): get rid of hardcoded libraries.
print("""#ifdef _WIN32
/* Expected in c:/windows/system or similar, no path needed. */
const char *path = "nvcuda.dll";
#elif defined(__APPLE__)
/* Default installation path. */
const char *path = "/usr/local/cuda/lib/libcuda.dylib";
#else
const char *path = "libcuda.so";
#endif""")
def print_init_guard():
print(""" static int initialized = 0;
static int result = 0;
int error, driver_version;
if (initialized) {
return result;
}
initialized = 1;
error = atexit(cuewExit);
if (error) {
result = CUEW_ERROR_ATEXIT_FAILED;
return result;
}
/* Load library. */
lib = dynamic_library_open(path);
if (lib == NULL) {
result = CUEW_ERROR_OPEN_FAILED;
return result;
}""")
print("")
def print_driver_version_guard():
# TODO(sergey): Currently it's hardcoded for CUDA only.
print(""" /* Detect driver version. */
driver_version = 1000;
%s_LIBRARY_FIND_CHECKED(cuDriverGetVersion);
if (cuDriverGetVersion) {
cuDriverGetVersion(&driver_version);
}
/* We require version 4.0. */
if (driver_version < 4000) {
result = CUEW_ERROR_OPEN_FAILED;
return result;
}""" % (REAL_LIB))
def print_dl_init():
print("int %sInit(void) {" % (LIB.lower()))
print(" /* Library paths. */")
print_lib_path()
print_init_guard()
print_driver_version_guard()
print(" /* Fetch all function pointers. */")
for symbol in SYMBOLS:
if symbol:
print(" %s_LIBRARY_FIND(%s);" % (REAL_LIB, symbol))
else:
print("")
print("")
print(" result = CUEW_SUCCESS;")
print(" return result;")
print("}")
def print_implementation():
print_copyright()
# TODO(sergey): Get rid of hardcoded header.
print("""#ifdef _MSC_VER
# define snprintf _snprintf
# define popen _popen
# define pclose _pclose
# define _CRT_SECURE_NO_WARNINGS
#endif
""")
print("#include <cuew.h>")
print("#include <assert.h>")
print("#include <stdio.h>")
print("#include <string.h>")
print("#include <sys/stat.h>")
print("")
print_dl_wrapper()
print_dl_helper_macro()
print("/* Function definitions. */")
for symbol in SYMBOLS:
if symbol:
print('t%s *%s;' % (symbol, symbol))
else:
print("")
print("")
print_dl_close()
print("/* Implementation function. */")
print_dl_init()
print("")
# TODO(sergey): Get rid of hardcoded CUresult.
print("const char *%sErrorString(CUresult result) {" % (LIB.lower()))
print(" switch(result) {")
print(" case CUDA_SUCCESS: return \"No errors\";")
for error in ERRORS:
if error in CUDA_ERRORS:
str = CUDA_ERRORS[error]
else:
temp = error[11:].replace('_', ' ')
str = temp[0] + temp[1:].lower()
print(" case %s: return \"%s\";" % (error, str))
print(" default: return \"Unknown CUDA error value\";")
print(" }")
print("}")
from cuda_extra import extra_code
print(extra_code)
if __name__ == "__main__":
if len(sys.argv) != 2 and len(sys.argv) != 3:
print("Usage: %s hdr|impl [/path/to/cuda/toolkit/include]" %
(sys.argv[0]))
exit(1)
if len(sys.argv) == 3:
INCLUDE_DIR = sys.argv[2]
parse_files()
if sys.argv[1] == "hdr":
print_header()
elif sys.argv[1] == "impl":
print_implementation()
else:
print("Unknown command %s" % (sys.argv[1]))
exit(1)
|
pawkoz/dyplom
|
blender/extern/cuew/auto/cuew_gen.py
|
Python
|
gpl-2.0
| 17,548
|
[
"VisIt"
] |
7baf4d9672361937d22cc5e7a66d7b755dfbad8049d0db9433eed9a2d47ed548
|
#!/usr/bin/env python
from pyscf import gto
from pyscf import dft
'''
Tune DFT grids
By default, the DFT integration grids use
* Bragg radius for atom
* Treutler-Ahlrichs radial grids
* Becke partition for grid weights
* NWChem pruning scheme
* mesh grids
===================================
Elements radial part angular part
-------- ----------- ------------
H, He 50 302
Li - Ne 75 302
Na - Ar 80 434
K - Kr 90 434
Rb - Xe 95 434
Cs - Rn 100 434
===================================
See pyscf/dft/gen_grid.py "class Grids" for more details.
'''
mol = gto.M(
verbose = 0,
atom = '''
o 0 0. 0.
h 0 -0.757 0.587
h 0 0.757 0.587''',
basis = '6-31g')
method = dft.RKS(mol)
print('Default DFT(LDA). E = %.12f' % method.kernel())
# See pyscf/dft/radi.py for more radial grid schemes
#grids.radi_method = dft.gauss_chebeshev
#grids.radi_method = dft.delley
method = dft.RKS(mol)
method.grids.radi_method = dft.mura_knowles
print('Changed radial grids for DFT. E = %.12f' % method.kernel())
# See pyscf/dft/gen_grid.py for detail of the grid weight scheme
#method.grids.becke_scheme = dft.original_becke
# Stratmann-Scuseria weight scheme
method = dft.RKS(mol)
method.grids.becke_scheme = dft.stratmann
print('Changed grid partition funciton. E = %.12f' % method.kernel())
# Grids level 0 - 9. Big number indicates dense grids. Default is 3
method = dft.RKS(mol)
method.grids.level = 4
print('Dense grids. E = %.12f' % method.kernel())
# Specify mesh grid for certain atom
method = dft.RKS(mol)
method.grids.atom_grid = {'O': (100, 770)}
print('Dense grids for O atom. E = %.12f' % method.kernel())
# Specify mesh grid for all atoms
method = dft.RKS(mol)
method.grids.atom_grid = (100, 770)
print('Dense grids for all atoms. E = %.12f' % method.kernel())
# Disable pruning grids near core region
#grids.prune = dft.sg1_prune
method = dft.RKS(mol)
method.grids.prune = None
print('Changed grid partition funciton. E = %.12f' % method.kernel())
|
gkc1000/pyscf
|
examples/dft/11-grid_scheme.py
|
Python
|
apache-2.0
| 2,132
|
[
"NWChem",
"PySCF"
] |
145d769f37c95ad04a2d8379f0083ee43100f852f9f650d8c02aefa7e8d79158
|
import random
import math
import string
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method to adopt a pet.
"""
def __init__(self, name, species_types, location):
self.name = name
self.location = tuple(float(x) for x in location)
self.species_types = species_types
def get_number_of_species(self, animal):
return self.species_types.get(animal, 0)
def get_location(self):
return self.location
def get_species_count(self):
return self.species_types.copy()
def get_name(self):
return self.name
def adopt_pet(self, species):
if species in self.species_types.keys():
self.species_types[species] -= 1
if self.species_types[species] == 0:
self.species_types.pop(species)
class Adopter:
"""
Adopters represent people interested in adopting a species.
They have a desired species type that they want, and their score is
simply the number of species that the shelter has of that species.
"""
def __init__(self, name, desired_species):
self.name = name
self.desired_species = desired_species
def get_name(self):
return self.name
def get_desired_species(self):
return self.desired_species
def get_score(self, adoption_center):
return 1.0 * adoption_center.get_number_of_species(self.desired_species)
class FlexibleAdopter(Adopter):
"""
A FlexibleAdopter still has one type of species that they desire,
but they are also alright with considering other types of species.
considered_species is a list containing the other species the adopter will consider
Their score should be 1x their desired species + .3x all of their desired species
"""
def __init__(self, name, desired_species, considered_species):
Adopter.__init__(self, name, desired_species)
self.considered_species = considered_species
def get_score(self, adoption_center):
desired_score = Adopter.get_score(self, adoption_center)
considered_score = 0.3 * sum([adoption_center.get_number_of_species(x) for x in self.considered_species])
total_score = desired_score + considered_score
return total_score
class FearfulAdopter(Adopter):
"""
A FearfulAdopter is afraid of a particular species of animal.
If the adoption center has one or more of those animals in it, they will
be a bit more reluctant to go there due to the presence of the feared species.
Their score should be 1x number of desired species - .3x the number of feared species
"""
def __init__(self, name, desired_species, feared_species):
Adopter.__init__(self, name, desired_species)
self.feared_species = feared_species
def get_score(self, adoption_center):
desired_score = Adopter.get_score(self, adoption_center)
feared_score = 0.3 * adoption_center.get_number_of_species(self.feared_species)
total_score = desired_score - feared_score
if total_score > 0:
return total_score
else:
return 0.0
class AllergicAdopter(Adopter):
"""
An AllergicAdopter is extremely allergic to a one or more species and cannot
even be around it a little bit! If the adoption center contains one or more of
these animals, they will not go there.
Score should be 0 if the center contains any of the animals, or 1x number of desired animals if not
"""
def __init__(self, name, desired_species, allergic_species):
Adopter.__init__(self, name, desired_species)
self.allergic_species = allergic_species
def get_score(self, adoption_center):
desired_score = Adopter.get_score(self, adoption_center)
for species in self.allergic_species:
if species in adoption_center.get_species_count().keys():
return 0.0
return desired_score
class MedicatedAllergicAdopter(AllergicAdopter):
"""
A MedicatedAllergicAdopter is extremely allergic to a particular species
However! They have a medicine of varying effectiveness, which will be given in a dictionary
To calculate the score for a specific adoption center, we want to find what is the most allergy-inducing species that the adoption center has for the particular MedicatedAllergicAdopter.
To do this, first examine what species the AdoptionCenter has that the MedicatedAllergicAdopter is allergic to, then compare them to the medicine_effectiveness dictionary.
Take the lowest medicine_effectiveness found for these species, and multiply that value by the Adopter's calculate score method.
"""
def __init__(self, name, desired_species, allergic_species, medicine_effectiveness):
AllergicAdopter.__init__(self, name, desired_species, allergic_species)
self.medicine_effectiveness = medicine_effectiveness
def get_score(self, adoption_center):
desired_score = Adopter.get_score(self, adoption_center)
# Use python built-in set intersection
allergic_intersection = set(adoption_center.get_species_count().keys()) & set(self.allergic_species)
# List of medical effectivness against allergic species Adoption Center contains
intersection_medicine = [self.medicine_effectiveness[species] for species in allergic_intersection]
# If there's no allergic species in Adoption Center just return a desired score
try:
return desired_score * min(contains_medicine)
except:
return desired_score
class SluggishAdopter(Adopter):
"""
A SluggishAdopter really dislikes travelleng. The further away the
AdoptionCenter is linearly, the less likely they will want to visit it.
Since we are not sure the specific mood the SluggishAdopter will be in on a
given day, we will asign their score with a random modifier depending on
distance as a guess.
Score should be
If distance < 1 return 1 x number of desired species
elif distance < 3 return random between (.7, .9) times number of desired species
elif distance < 5. return random between (.5, .7) times number of desired species
else return random between (.1, .5) times number of desired species
"""
def __init__(self, name, desired_species, location):
Adopter.__init__(self, name, desired_species)
self.location = location
def get_linear_distance(self, to_location):
# Basic pythagorean formula to find a distance between 2 points on a plane
x1, y1 = self.location
x2, y2 = to_location
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def get_score(self, adoption_center):
distance = self.get_linear_distance(adoption_center.get_location())
desired_species = adoption_center.get_number_of_species(self.desired_species)
if distance < 1:
return 1 * desired_species
elif distance < 3:
return random.uniform(0.7, 0.9) * desired_species
elif distance < 5:
return random.uniform(0.5, 0.7) * desired_species
else:
return random.uniform(0.1, 0.5) * desired_species
def get_ordered_adoption_center_list(adopter, list_of_adoption_centers):
"""
The method returns a list of an organized adoption_center
such that the scores for each AdoptionCenter to the Adopter
will be ordered from highest score to lowest score.
"""
ranking = []
for ac in list_of_adoption_centers:
ranking.append([ac, adopter.get_score(ac)])
# Sort by score first, in case of duplicates - sort by center's name
ranking = sorted(ranking, key=lambda x: x[0].get_name())
ranking = sorted(ranking, key=lambda x: x[1], reverse=True)
return [ac[0] for ac in ranking]
def get_adopters_for_advertisement(adoption_center, list_of_adopters, n):
"""
The function returns a list of the top n scoring Adopters
from list_of_adopters (in numerical order of score)
"""
ranking = []
for ad in list_of_adopters:
ranking.append([ad, ad.get_score(adoption_center)])
# Sort by score first, in case of duplicates - sort by adopters's name
ranking = sorted(ranking, key=lambda x: x[0].get_name())
ranking = sorted(ranking, key=lambda x: x[1], reverse=True)
return [x[0] for x in ranking[0:n]]
|
NicholasAsimov/courses
|
6.00.1x/pset7/ps7.py
|
Python
|
mit
| 8,760
|
[
"VisIt"
] |
4a4f1a375591f897c98d45c6101a6de48af3ac37a061ed4fa87e137f901f7ca7
|
from __future__ import division
import numpy as np
from functools import partial
from pyhsmm.internals.hmm_states import HMMStatesPython, HMMStatesEigen
from pyhsmm.internals.hsmm_states import HSMMStatesPython, HSMMStatesEigen, \
GeoHSMMStates
from pylds.lds_messages_interface import info_E_step, info_sample
from pyslds.util import hmm_entropy, lds_entropy, expected_regression_log_prob, expected_gaussian_logprob
class _SLDSStates(object):
def __init__(self,model,T=None,data=None,inputs=None,stateseq=None,gaussian_states=None,
generate=True,initialize_from_prior=True,fixed_stateseq=None):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.inputs = np.zeros((self.T, 0)) if inputs is None else inputs
self.fixed_stateseq = fixed_stateseq
self.clear_caches()
# store gaussian states and state sequence if passed in
if gaussian_states is not None and stateseq is not None:
self.gaussian_states = gaussian_states
self.stateseq = np.array(stateseq, dtype=np.int32)
elif generate:
self.generate_states(stateseq=stateseq)
if data is not None and not initialize_from_prior:
self.resample()
def generate_states(self, initial_condition=None, with_noise=True, stateseq=None):
"""
Jointly sample the discrete and continuous states
"""
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
T, K, n = self.T, self.num_states, self.D_latent
A = self.trans_matrix
# Initialize discrete state sequence
dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq.astype(np.int32)
assert dss.shape == (T,)
gss = np.empty((T,n), dtype='double')
if initial_condition is None:
if dss[0] == -1:
dss[0] = sample_discrete(self.pi_0)
gss[0] = self.init_dynamics_distns[dss[0]].rvs()
else:
dss[0] = initial_condition[0]
gss[0] = initial_condition[1]
for t in range(1,T):
# Sample discrete state given previous continuous state
if with_noise:
# Sample discre=te state from recurrent transition matrix
if dss[t] == -1:
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
gss[t] = self.dynamics_distns[dss[t-1]].\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
else:
# Pick the most likely next discrete state and continuous state
if dss[t] == -1:
dss[t] = np.argmax(A[dss[t-1], :])
gss[t] = self.dynamics_distns[dss[t-1]]. \
predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])))
assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!"
self.stateseq = dss
self.gaussian_states = gss
def generate_obs(self):
# Go through each time bin, get the discrete latent state,
# use that to index into the emission_distns to get samples
T, p = self.T, self.D_emission
dss, gss = self.stateseq, self.gaussian_states
data = np.empty((T,p),dtype='double')
for t in range(self.T):
ed = self.emission_distns[0] if self.model._single_emission \
else self.emission_distns[dss[t]]
data[t] = \
ed.rvs(x=np.hstack((gss[t][None, :], self.inputs[t][None,:])),
return_xy=False)
return data
## convenience properties
@property
def D_latent(self):
return self.dynamics_distns[0].D_out
@property
def D_input(self):
return self.dynamics_distns[0].D_in - self.dynamics_distns[0].D_out
@property
def D_emission(self):
return self.emission_distns[0].D_out
@property
def dynamics_distns(self):
return self.model.dynamics_distns
@property
def emission_distns(self):
return self.model.emission_distns
@property
def single_emission(self):
return self.model._single_emission
@property
def init_dynamics_distns(self):
return self.model.init_dynamics_distns
@property
def diagonal_noise(self):
return self.model.diagonal_noise
@property
def mu_init(self):
return self.init_dynamics_distns[self.stateseq[0]].mu
@property
def sigma_init(self):
return self.init_dynamics_distns[self.stateseq[0]].sigma
@property
def A_set(self):
return np.concatenate([d.A[None, :, :self.D_latent] for d in self.dynamics_distns])
@property
def As(self):
return self.A_set[self.stateseq]
@property
def B_set(self):
return np.concatenate([d.A[None, :, self.D_latent:] for d in self.dynamics_distns])
@property
def Bs(self):
return self.B_set[self.stateseq]
@property
def Q_set(self):
return np.concatenate([d.sigma[None,...] for d in self.dynamics_distns])
@property
def sigma_statess(self):
return self.Q_set[self.stateseq]
@property
def C_set(self):
return np.concatenate([d.A[None,:,:self.D_latent] for d in self.emission_distns])
@property
def Cs(self):
return self.C_set[self.stateseq]
@property
def D_set(self):
return np.concatenate([d.A[None, :, self.D_latent:] for d in self.emission_distns])
@property
def Ds(self):
return self.D_set[self.stateseq]
@property
def R_set(self):
return np.concatenate([d.sigma[None,...] for d in self.emission_distns])
@property
def Rinv_set(self):
if self.diagonal_noise:
return np.concatenate([np.diag(1. / d.sigmasq_flat)[None,...] for d in self.emission_distns])
else:
return np.concatenate([np.linalg.inv(d.sigma)[None,...] for d in self.emission_distns])
@property
def sigma_obss(self):
return self.R_set[self.stateseq]
@property
def _kwargs(self):
return dict(super(_SLDSStates, self)._kwargs,
stateseq=self.stateseq,
gaussian_states=self.gaussian_states)
@property
def info_init_params(self):
J_init = np.linalg.inv(self.sigma_init)
h_init = np.linalg.solve(self.sigma_init, self.mu_init)
log_Z_init = -1. / 2 * h_init.dot(np.linalg.solve(J_init, h_init))
log_Z_init += 1. / 2 * np.linalg.slogdet(J_init)[1]
log_Z_init -= self.D_latent / 2. * np.log(2 * np.pi)
return J_init, h_init, log_Z_init
@property
def info_dynamics_params(self):
z, u = self.stateseq[:-1], self.inputs[:-1]
expand = lambda a: a[None,...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
A_set, B_set, Q_set = self.A_set, self.B_set, self.Q_set
# Get the pairwise potentials
J_pair_22_set = [np.linalg.inv(Q) for Q in Q_set]
J_pair_21_set = [-J22.dot(A) for A,J22 in zip(A_set, J_pair_22_set)]
J_pair_11_set = [A.T.dot(-J21) for A,J21 in zip(A_set, J_pair_21_set)]
J_pair_11 = stack_set(J_pair_11_set)[z]
J_pair_21 = stack_set(J_pair_21_set)[z]
J_pair_22 = stack_set(J_pair_22_set)[z]
# Check if diagonal and avoid inverting D_obs x D_obs matrix
h_pair_1_set = [B.T.dot(J) for B, J in zip(B_set, J_pair_21_set)]
h_pair_2_set = [B.T.dot(Qi) for B, Qi in zip(B_set, J_pair_22_set)]
h_pair_1 = stack_set(h_pair_1_set)[z]
h_pair_2 = stack_set(h_pair_2_set)[z]
h_pair_1 = np.einsum('ni,nij->nj', u, h_pair_1)
h_pair_2 = np.einsum('ni,nij->nj', u, h_pair_2)
# Compute the log normalizer
log_Z_pair = -self.D_latent / 2. * np.log(2 * np.pi) * np.ones(self.T-1)
logdet = [np.linalg.slogdet(Q)[1] for Q in Q_set]
logdet = stack_set(logdet)[z]
log_Z_pair += -1. / 2 * logdet
hJh_pair = [B.T.dot(np.linalg.solve(Q, B)) for B, Q in zip(B_set, Q_set)]
hJh_pair = stack_set(hJh_pair)[z]
log_Z_pair -= 1. / 2 * np.einsum('tij,ti,tj->t', hJh_pair, u, u)
return J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair
@property
def info_emission_params(self):
expand = lambda a: a[None,...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
C_set, D_set, Ri_set = self.C_set, self.D_set, self.Rinv_set
RiC_set = [Ri.dot(C) for C,Ri in zip(C_set, Ri_set)]
RiD_set = [Ri.dot(D) for D,Ri in zip(D_set, Ri_set)]
CRiC_set = [C.T.dot(RiC) for C,RiC in zip(C_set, RiC_set)]
DRiC_set = [D.T.dot(RiC) for D,RiC in zip(D_set, RiC_set)]
DRiD_set = [D.T.dot(RiD) for D,RiD in zip(D_set, RiD_set)]
if self.single_emission:
Ri = Ri_set[0]
RiC = RiC_set[0]
RiD = RiD_set[0]
DRiC = DRiC_set[0]
DRiD = DRiD_set[0]
J_node = CRiC_set[0]
h_node = np.dot(self.data, RiC)
h_node -= np.dot(self.inputs, DRiC)
logdet = np.linalg.slogdet(Ri_set[0])[1]
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet
# E[(y-Du)^T R^{-1} (y-Du)]
log_Z_node -= 1. / 2 * np.sum(np.dot(self.data, Ri) * self.data, axis=1)
log_Z_node += np.sum(np.dot(self.data, RiD) * self.inputs, axis=1)
log_Z_node -= 1. / 2 * np.sum(np.dot(self.inputs, DRiD) * self.inputs, axis=1)
else:
Ri = stack_set(Ri_set)[self.stateseq]
RiC = stack_set(RiC_set)[self.stateseq]
RiD = stack_set(RiD_set)[self.stateseq]
DRiC = stack_set(DRiC_set)[self.stateseq]
DRiD = stack_set(DRiD_set)[self.stateseq]
J_node = stack_set(CRiC_set)[self.stateseq]
h_node = np.einsum('ni,nij->nj', self.data, RiC)
h_node -= np.einsum('ni,nij->nj', self.inputs, DRiC)
logdet = stack_set([np.linalg.slogdet(R)[1] for R in Ri_set])[self.stateseq]
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet
# E[(y-Du)^T R^{-1} (y-Du)]
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', Ri, self.data, self.data)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', -2 * RiD, self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', DRiD, self.inputs, self.inputs)
return J_node, h_node, log_Z_node
@property
def info_params(self):
return self.info_init_params + self.info_dynamics_params + self.info_emission_params
@property
def aBl(self):
if self._aBl is None:
aBl = self._aBl = np.zeros((self.T, self.num_states))
ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, self.emission_distns
for idx, (d1, d2) in enumerate(zip(ids, dds)):
# Initial state distribution
aBl[0, idx] = d1.log_likelihood(self.gaussian_states[0])
# Dynamics
xs = np.hstack((self.gaussian_states[:-1], self.inputs[:-1]))
aBl[:-1, idx] = d2.log_likelihood((xs, self.gaussian_states[1:]))
# Emissions
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
d3 = self.emission_distns[0]
aBl += d3.log_likelihood((xs, self.data))[:, None]
else:
for idx, d3 in enumerate(eds):
aBl[:, idx] += d3.log_likelihood((xs, self.data))
aBl[np.isnan(aBl).any(1)] = 0.
return self._aBl
### Mean-field and VBEM base functions.
def smooth(self):
# Use the info E step because it can take advantage of diagonal noise
# The standard E step could but we have not implemented it
self.info_E_step()
xu = np.column_stack((self.smoothed_mus, self.inputs))
if self.single_emission:
return xu.dot(self.emission_distns[0].A.T)
else:
return np.array([C.dot(x) + D.dot(u) for C, D, x, u in
zip(self.Cs, self.Ds, self.smoothed_mus, self.inputs)])
def info_E_step(self):
self._gaussian_normalizer, self.smoothed_mus, \
self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(*self.info_params)
def _init_mf_from_gibbs(self):
# Base class sets the expected HMM stats
# the first meanfield step will update the HMM params accordingly
super(_SLDSStates, self)._init_mf_from_gibbs()
self._normalizer = None
self._mf_lds_normalizer = 0
self.smoothed_mus = self.gaussian_states.copy()
self.smoothed_sigmas = np.tile(0.01 * np.eye(self.D_latent)[None, :, :], (self.T, 1, 1))
E_xtp1_xtT = self.smoothed_mus[1:,:,None] * self.smoothed_mus[:-1,None,:]
self._set_gaussian_expected_stats(
self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
def _set_gaussian_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
"""
Both meanfield and VBEM require expected statistics of the continuous latent
states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T]
and compute the expected sufficient statistics for the initial distribution,
dynamics distribution, and Gaussian observation distribution.
"""
assert not np.isnan(E_xtp1_xtT).any()
assert not np.isnan(smoothed_mus).any()
assert not np.isnan(smoothed_sigmas).any()
assert smoothed_mus.shape == (self.T, self.D_latent)
assert smoothed_sigmas.shape == (self.T, self.D_latent, self.D_latent)
assert E_xtp1_xtT.shape == (self.T-1, self.D_latent, self.D_latent)
# This is like LDSStates._set_expected_states but doesn't sum over time
T, D_obs = self.T, self.D_emission
E_x_xT = smoothed_sigmas + smoothed_mus[:, :, None] * smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1]
E_xtp1_xtp1T = E_x_xT[1:]
E_xtp1_utT = (smoothed_mus[1:, :, None] * self.inputs[:-1, None, :])
E_xtp1_xutT = np.concatenate((E_xtp1_xtT, E_xtp1_utT), axis=-1)
# Initial state stats
self.E_init_stats = (self.smoothed_mus[0], E_x_xT[0], 1.)
# Dynamics stats
self.E_dynamics_stats = (E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, np.ones(self.T-1))
# Emission stats -- special case diagonal noise
E_yyT = self.data**2 if self.diagonal_noise else self.data[:, :, None] * self.data[:, None, :]
E_yxT = self.data[:, :, None] * self.smoothed_mus[:, None, :]
E_yuT = self.data[:, :, None] * self.inputs[:, None, :]
E_yxuT = np.concatenate((E_yxT, E_yuT), axis=-1)
E_n = np.ones((T, D_obs)) if self.diagonal_noise else np.ones(T)
self.E_emission_stats = (E_yyT, E_yxuT, E_xu_xuT, E_n)
######################
# algorithm mixins #
######################
class _SLDSStatesGibbs(_SLDSStates):
def resample(self, niter=1):
niter = self.niter if hasattr(self, 'niter') else niter
for itr in range(niter):
self.resample_discrete_states()
self.resample_gaussian_states()
def _init_gibbs_from_mf(self):
raise NotImplementedError
def resample_discrete_states(self):
super(_SLDSStatesGibbs, self).resample()
def resample_gaussian_states(self):
self._aBl = None # clear any caching
self._gaussian_normalizer, self.gaussian_states = \
info_sample(*self.info_params)
class _SLDSStatesVBEM(_SLDSStates):
def __init__(self, model, **kwargs):
super(_SLDSStatesVBEM, self).__init__(model, **kwargs)
self.smoothed_mus = np.zeros((self.T, self.D_latent))
self.smoothed_sigmas = np.tile(np.eye(self.D_latent)[None, :, :], (self.T, self.D_latent))
@property
def vbem_info_init_params(self):
E_z0 = self.expected_states[0]
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
mu_set = [d.mu for d in self.init_dynamics_distns]
sigma_set = [d.sigma for d in self.init_dynamics_distns]
J_init_set = stack_set([np.linalg.inv(sigma) for sigma in sigma_set])
h_init_set = stack_set([J.dot(mu) for J, mu in zip(J_init_set, mu_set)])
J_init = np.tensordot(E_z0, J_init_set, axes=1)
h_init = np.tensordot(E_z0, h_init_set, axes=1)
hJh_init = np.array([h.T.dot(S).dot(h) for S, h in zip(sigma_set, h_init_set)])
logdet = np.array([np.linalg.slogdet(J)[1] for J in J_init_set])
log_Z_init = -1. / 2 * np.dot(E_z0, hJh_init)
log_Z_init += 1. / 2 * np.dot(E_z0, logdet)
log_Z_init -= self.D_latent / 2. * np.log(2 * np.pi)
return J_init, h_init, log_Z_init
@property
def vbem_info_dynamics_params(self):
E_z = self.expected_states[:-1]
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
A_set, B_set, Q_set = self.A_set, self.B_set, self.Q_set
# Get the pairwise potentials
J_pair_22_set = stack_set([np.linalg.inv(Q) for Q in Q_set])
J_pair_21_set = stack_set([-J22.dot(A) for A, J22 in zip(A_set, J_pair_22_set)])
J_pair_11_set = stack_set([A.T.dot(-J21) for A, J21 in zip(A_set, J_pair_21_set)])
J_pair_22 = np.tensordot(E_z, J_pair_22_set, axes=1)
J_pair_21 = np.tensordot(E_z, J_pair_21_set, axes=1)
J_pair_11 = np.tensordot(E_z, J_pair_11_set, axes=1)
h_pair_1_set = stack_set([B.T.dot(J) for B, J in zip(B_set, J_pair_21_set)])
h_pair_2_set = stack_set([B.T.dot(Qi) for B, Qi in zip(B_set, J_pair_22_set)])
h_pair_1 = np.tensordot(E_z, h_pair_1_set, axes=1)
h_pair_2 = np.tensordot(E_z, h_pair_2_set, axes=1)
h_pair_1 = np.einsum('ni,nij->nj', self.inputs[:-1], h_pair_1)
h_pair_2 = np.einsum('ni,nij->nj', self.inputs[:-1], h_pair_2)
# Compute the log normalizer
log_Z_pair = -self.D_latent / 2. * np.log(2 * np.pi) * np.ones(self.T - 1)
logdet = np.array([np.linalg.slogdet(Q)[1] for Q in Q_set])
logdet = np.dot(E_z, logdet)
log_Z_pair += -1. / 2 * logdet
hJh_pair = np.array([B.T.dot(np.linalg.solve(Q, B)) for B, Q in zip(B_set, Q_set)])
hJh_pair = np.tensordot(E_z, hJh_pair, axes=1)
log_Z_pair -= 1. / 2 * np.einsum('tij,ti,tj->t',
hJh_pair,
self.inputs[:-1],
self.inputs[:-1])
return J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair
@property
def vbem_info_emission_params(self):
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
C_set, D_set, Ri_set = self.C_set, self.D_set, self.Rinv_set
RiC_set = stack_set([Ri.dot(C) for C, Ri in zip(C_set, Ri_set)])
RiD_set = stack_set([Ri.dot(D) for D, Ri in zip(D_set, Ri_set)])
CRiC_set = stack_set([C.T.dot(RiC) for C, RiC in zip(C_set, RiC_set)])
DRiC_set = stack_set([D.T.dot(RiC) for D, RiC in zip(D_set, RiC_set)])
DRiD_set = stack_set([D.T.dot(RiD) for D, RiD in zip(D_set, RiD_set)])
if self.single_emission:
Ri = Ri_set[0]
RiC = RiC_set[0]
RiD = RiD_set[0]
DRiC = DRiC_set[0]
DRiD = DRiD_set[0]
logdet = np.linalg.slogdet(Ri_set[0])[1]
J_node = CRiC_set[0]
h_node = np.dot(self.data, RiC)
h_node -= np.dot(self.inputs, DRiC)
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet
log_Z_node -= 1. / 2 * np.sum(np.dot(self.data, Ri) * self.data, axis=1)
log_Z_node += np.sum(np.dot(self.data, RiD) * self.inputs, axis=1)
log_Z_node -= 1. / 2 * np.sum(np.dot(self.inputs, DRiD) * self.inputs, axis=1)
else:
E_z = self.expected_states
Ri = np.tensordot(E_z, Ri_set, axes=1)
RiC = np.tensordot(E_z, RiC_set, axes=1)
RiD = np.tensordot(E_z, RiD_set, axes=1)
DRiC = np.tensordot(E_z, DRiC_set, axes=1)
DRiD = np.tensordot(E_z, DRiD_set, axes=1)
logdet = np.dot(E_z, np.array([np.linalg.slogdet(_Ri)[1] for _Ri in Ri_set]))
J_node = np.tensordot(E_z, CRiC_set, axes=1)
h_node = np.einsum('ni,nij->nj', self.data, RiC)
h_node -= np.einsum('ni,nij->nj', self.inputs, DRiC)
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', Ri, self.data, self.data)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', -2 * RiD, self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', DRiD, self.inputs, self.inputs)
return J_node, h_node, log_Z_node
@property
def vbem_info_params(self):
return self.vbem_info_init_params + \
self.vbem_info_dynamics_params + \
self.vbem_info_emission_params
@property
def vbem_aBl(self):
"""
These are the expected log likelihoods (node potentials)
as seen from the discrete states. In other words,
E_{q(x)} [log p(y, x | z)]
"""
vbem_aBl = np.zeros((self.T, self.num_states))
ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, self.emission_distns
for k, (id, dd) in enumerate(zip(ids, dds)):
vbem_aBl[0, k] = expected_gaussian_logprob(id.mu, id.sigma, self.E_init_stats)
vbem_aBl[:-1, k] += expected_regression_log_prob(dd, self.E_dynamics_stats)
if self.single_emission:
ed = self.emission_distns[0]
vbem_aBl += expected_regression_log_prob(ed, self.E_emission_stats)[:,None]
else:
for k, ed in enumerate(self.emission_distns):
vbem_aBl[:, k] += expected_regression_log_prob(ed, self.E_emission_stats)
vbem_aBl[np.isnan(vbem_aBl).any(1)] = 0.
return vbem_aBl
def vb_E_step(self):
H_z = self.vb_E_step_discrete_states()
H_x = self.vb_E_step_gaussian_states()
self._variational_entropy = H_z + H_x
def vb_E_step_discrete_states(self):
# Call pyhsmm to do message passing and compute expected suff stats
aBl = self.vbem_aBl
self.all_expected_stats = self._expected_statistics(self.trans_matrix, self.pi_0, aBl)
params = (np.log(self.trans_matrix), np.log(self.pi_0), aBl, self._normalizer)
return hmm_entropy(params, self.all_expected_stats)
def vb_E_step_gaussian_states(self):
info_params = self.vbem_info_params
# Call pylds to do message passing and compute expected suff stats
stats = info_E_step(*info_params)
self._lds_normalizer, self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT = stats
self._set_gaussian_expected_stats(self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
# Set the gaussian states to smoothed mus
self.gaussian_states = self.smoothed_mus
self._aBl = 0
# Compute the variational entropy
# ve1 = lds_entropy(info_params, stats)
# ve2 = test_lds_entropy(info_params)
# assert np.allclose(ve1, ve2)
# return ve1
return lds_entropy(info_params, stats)
def vb_elbo(self):
return self.expected_log_joint_probability() + self._variational_entropy
def expected_log_joint_probability(self):
"""
Compute E_{q(z) q(x)} [log p(z) + log p(x | z) + log p(y | x, z)]
"""
# E_{q(z)}[log p(z)]
from pyslds.util import expected_hmm_logprob
elp = expected_hmm_logprob(
self.pi_0, self.trans_matrix,
(self.expected_states, self.expected_transcounts, self._normalizer))
# E_{q(x)}[log p(y, x | z)] is given by aBl
# To get E_{q(x)}[ aBl ] we multiply and sum
elp += np.sum(self.expected_states * self.vbem_aBl)
return elp
class _SLDSStatesMeanField(_SLDSStates):
def __init__(self, model, **kwargs):
super(_SLDSStatesMeanField, self).__init__(model, **kwargs)
self.smoothed_mus = np.zeros((self.T, self.D_latent))
self.smoothed_sigmas = np.tile(np.eye(self.D_latent)[None, :, :], (self.T, self.D_latent))
@property
def expected_info_init_params(self):
from pybasicbayes.util.stats import niw_expectedstats
def get_paramseq(distns):
contract = partial(np.tensordot, self.expected_states[0], axes=1)
params = [niw_expectedstats(d.nu_mf, d.sigma_mf, d.mu_mf, d.kappa_mf)
for d in distns]
return list(map(contract, zip(*params)))
J_init, h_init, hJih_init, logdet_J_init = \
get_paramseq(self.init_dynamics_distns)
log_Z_init = -1. / 2 * hJih_init
log_Z_init += 1. / 2 * logdet_J_init
log_Z_init -= self.D_latent / 2. * np.log(2 * np.pi)
return J_init, h_init, log_Z_init
@property
def expected_info_dynamics_params(self):
def get_paramseq(distns):
contract = partial(np.tensordot, self.expected_states[:-1], axes=1)
params = [d.meanfield_expectedstats() for d in distns]
return list(map(contract, zip(*params)))
J_pair_22, J_pair_21, J_pair_11, logdet_pair = \
get_paramseq(self.dynamics_distns)
# Compute E[B^T Q^{-1}] and E[B^T Q^{-1} A]
n = self.D_latent
E_Qinv = J_pair_22
E_Qinv_A = J_pair_21[:,:,:n]
E_Qinv_B = J_pair_21[:,:,n:]
E_BT_Qinv_A = J_pair_11[:,n:,:n]
E_BT_Qinv_B = J_pair_11[:,n:,n:]
E_AT_Qinv_A = J_pair_11[:,:n,:n].copy("C")
h_pair_1 = -np.einsum('ti,tij->tj', self.inputs[:-1], E_BT_Qinv_A)
h_pair_2 = np.einsum('ti,tji->tj', self.inputs[:-1], E_Qinv_B)
log_Z_pair = 1./2 * logdet_pair
log_Z_pair -= self.D_latent / 2. * np.log(2 * np.pi)
log_Z_pair -= 1. / 2 * np.einsum('tij,ti,tj->t', E_BT_Qinv_B,
self.inputs[:-1], self.inputs[:-1])
return E_AT_Qinv_A, -E_Qinv_A, E_Qinv, h_pair_1, h_pair_2, log_Z_pair
@property
def expected_info_emission_params(self):
# Now get the expected observation potentials
def get_paramseq(distns):
contract = partial(np.tensordot, self.expected_states, axes=1)
params = [d.meanfield_expectedstats() for d in distns]
return list(map(contract, zip(*params)))
J_yy, J_yx, J_node, logdet_node = get_paramseq(self.emission_distns)
n = self.D_latent
E_Rinv = J_yy
E_Rinv_C = J_yx[:,:,:n].copy("C")
E_Rinv_D = J_yx[:,:,n:].copy("C")
E_DT_Rinv_C = J_node[:,n:,:n]
E_CT_Rinv_C = J_node[:,:n,:n].copy("C")
E_DT_Rinv_D = J_node[:,n:,n:]
h_node = np.einsum('ni,nij->nj', self.data, E_Rinv_C)
h_node -= np.einsum('ni,nij->nj', self.inputs, E_DT_Rinv_C)
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet_node
# E[(y-Du)^T R^{-1} (y-Du)]
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', E_Rinv,
self.data, self.data)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', -2*E_Rinv_D,
self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('tij,ti,tj->t', E_DT_Rinv_D,
self.inputs, self.inputs)
return E_CT_Rinv_C, h_node, log_Z_node
@property
def expected_info_params(self):
return self.expected_info_init_params + \
self.expected_info_dynamics_params + \
self.expected_info_emission_params
@property
def mf_aBl(self):
"""
These are the expected log likelihoods (node potentials)
as seen from the discrete states.
"""
mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states))
ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \
self.emission_distns
for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)):
mf_aBl[0,idx] = d1.expected_log_likelihood(
stats=self.E_init_stats)
mf_aBl[:-1,idx] += d2.expected_log_likelihood(
stats=self.E_dynamics_stats)
mf_aBl[:,idx] += d3.expected_log_likelihood(
stats=self.E_emission_stats)
mf_aBl[np.isnan(mf_aBl).any(1)] = 0.
return mf_aBl
def meanfieldupdate(self, niter=1):
H_z = self.meanfield_update_discrete_states()
H_x = self.meanfield_update_gaussian_states()
self._variational_entropy = H_z + H_x
def meanfield_update_discrete_states(self):
super(_SLDSStatesMeanField, self).meanfieldupdate()
# Save the states
self.stateseq = np.argmax(self.expected_states, axis=1)
# Compute the variational entropy
return hmm_entropy(self._mf_param_snapshot, self.all_expected_stats)
def meanfield_update_gaussian_states(self):
info_params = self.expected_info_params
# Call pylds to do message passing and compute expected suff stats
stats = info_E_step(*self.expected_info_params)
self._lds_normalizer, self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT = stats
self._set_gaussian_expected_stats(self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
# Save the states
self.gaussian_states = self.smoothed_mus.copy()
# Compute the variational entropy
return lds_entropy(info_params, stats)
def get_vlb(self, most_recently_updated=False):
# E_{q(z)}[log p(z)]
from pyslds.util import expected_hmm_logprob
vlb = expected_hmm_logprob(
self.mf_pi_0, self.mf_trans_matrix,
(self.expected_states, self.expected_transcounts, self._normalizer))
# E_{q(x)}[log p(y, x | z)] is given by aBl
# To get E_{q(x)}[ aBl ] we multiply and sum
vlb += np.sum(self.expected_states * self.mf_aBl)
# Add the variational entropy
vlb += self._variational_entropy
# test: compare to old code
# vlb2 = super(_SLDSStatesMeanField, self).get_vlb(
# most_recently_updated=False) \
# + self._lds_normalizer
# print(vlb - vlb2)
return vlb
def meanfield_smooth(self):
# Use the smoothed latent states in combination with the expected
# discrete states and observation matrices
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
if self.single_emission:
EC = self.emission_distns[0].mf_expectations[0]
return self.smoothed_mus.dot(EC.T)
else:
mf_params = [d.mf_expectations for d in self.emission_distns]
ECs = stack_set([prms[0] for prms in mf_params])
ECs = np.tensordot(self.expected_states, ECs, axes=1)
return np.array([C.dot(mu) for C, mu in zip(ECs, self.smoothed_mus)])
class _SLDSStatesMaskedData(_SLDSStatesGibbs, _SLDSStatesVBEM, _SLDSStatesMeanField):
"""
This mixin allows arbitrary patterns of missing data. Currently,
we only support the simplest case in which the observation noise
has diagonal covariance, such that,
y_{t,n} ~ N(c_n \dot x_t, \sigma^2_n).
In this case, missing data corresponds to fewer emission potentials.
The missing data can either be indicated by NaN's in the data or by
an explicit, Boolean mask passed to the constructor. The mixin works
by overriding the info_emission_parameters and the corresponding
emission likelihoods. If no mask is present, it passes through
to the base mixings (Gibbs, VBEM, MeanField).
"""
def __init__(self, model, data=None, mask=None, **kwargs):
if mask is not None:
# assert mask.shape == data.shape
self.mask = mask
elif data is not None and \
isinstance(data, np.ndarray) \
and np.any(np.isnan(data)):
from warnings import warn
warn("data includes NaN's. Treating these as missing data.")
self.mask = ~np.isnan(data)
data[np.isnan(data)] = 0
else:
self.mask = None
super(_SLDSStatesMaskedData, self).__init__(model, data=data, **kwargs)
# If masked, make sure we have diagonal observations
# we do not currently support arbitrary masking with dense observation cov.
if self.mask is not None and not self.diagonal_noise:
raise Exception("PySLDS only supports diagonal observation noise with masked data")
def heldout_log_likelihood(self, test_mask=None):
"""
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
"""
if test_mask is None:
# If a test mask is not supplied, use the negation of this object's mask
if self.mask is None:
return 0
else:
test_mask = ~self.mask
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
return self.emission_distns[0].\
log_likelihood((xs, self.data), mask=test_mask).sum()
else:
hll = 0
z = self.stateseq
for idx, ed in enumerate(self.emission_distns):
hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]),
mask=test_mask[z == idx]).sum()
### Gibbs
@property
def info_emission_params(self):
if self.mask is None:
return super(_SLDSStatesMaskedData, self).info_emission_params
# Otherwise, compute masked potentials
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
C_set, D_set = self.C_set, self.D_set
sigmasq_inv_set = [np.diag(Ri) for Ri in self.Rinv_set]
CCT_set = stack_set(
[np.array([np.outer(cp, cp) for cp in C]).
reshape((self.D_emission, self.D_latent ** 2)) for C in C_set])
# Compute expectations wrt q(z)
if self.single_emission:
sigmasq_inv = sigmasq_inv_set[0]
C = C_set[0]
D = D_set[0]
CCT = CCT_set[0]
else:
z = self.stateseq
sigmasq_inv = sigmasq_inv_set[z]
C = C_set[z]
D = D_set[z]
CCT = CCT_set[z]
# Finally, we can compute the emission potential with the mask
T, D_latent, data, inputs, mask = self.T, self.D_latent, self.data, self.inputs, self.mask
centered_data = data - inputs.dot(np.swapaxes(D, -2, -1))
J_node = np.dot(mask * sigmasq_inv, CCT).reshape((T, D_latent, D_latent))
h_node = (mask * centered_data * sigmasq_inv).dot(C)
log_Z_node = -mask.sum(1) / 2. * np.log(2 * np.pi) * np.ones(T)
log_Z_node += 1. / 2 * np.sum(mask * np.log(sigmasq_inv))
log_Z_node += -1. / 2 * np.sum(mask * centered_data ** 2 * sigmasq_inv, axis=1)
return J_node, h_node, log_Z_node
@property
def aBl(self):
if self.mask is None:
return super(_SLDSStatesMaskedData, self).aBl
if self._aBl is None:
aBl = self._aBl = np.zeros((self.T, self.num_states))
ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \
self.emission_distns
for idx, (d1, d2) in enumerate(zip(ids, dds)):
# Initial state distribution
aBl[0, idx] = d1.log_likelihood(self.gaussian_states[0])
# Dynamics
xs = np.hstack((self.gaussian_states[:-1], self.inputs[:-1]))
aBl[:-1, idx] = d2.log_likelihood((xs, self.gaussian_states[1:]))
# Emissions
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
d3 = self.emission_distns[0]
aBl += d3.log_likelihood((xs, self.data), mask=self.mask)[:, None]
else:
for idx, d3 in enumerate(eds):
aBl[:, idx] += d3.log_likelihood((xs, self.data), mask=self.mask)
aBl[np.isnan(aBl).any(1)] = 0.
return self._aBl
### VBEM
@property
def vbem_info_emission_params(self):
if self.mask is None:
return super(_SLDSStatesMaskedData, self).vbem_info_emission_params
# Otherwise, compute masked potentials
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
C_set, D_set = self.C_set, self.D_set
sigmasq_inv_set = [np.diag(Ri) for Ri in self.Rinv_set]
CCT_set = stack_set(
[np.array([np.outer(cp, cp) for cp in C]).
reshape((self.D_emission, self.D_latent ** 2)) for C in C_set])
# Compute expectations wrt q(z)
if self.single_emission:
sigmasq_inv = sigmasq_inv_set[0]
C = C_set[0]
D = D_set[0]
CCT = CCT_set[0]
else:
E_z = self.expected_states
sigmasq_inv = np.tensordot(E_z, sigmasq_inv_set, axes=1)
C = np.tensordot(E_z, C_set, axes=1)
D = np.tensordot(E_z, D_set, axes=1)
CCT = np.tensordot(E_z, CCT_set, axes=1)
# Finally, we can compute the emission potential with the mask
T, D_latent, data, inputs, mask = self.T, self.D_latent, self.data, self.inputs, self.mask
centered_data = data - inputs.dot(np.swapaxes(D, -2, -1))
J_node = np.dot(mask * sigmasq_inv, CCT).reshape((T, D_latent, D_latent))
h_node = (mask * centered_data * sigmasq_inv).dot(C)
log_Z_node = -mask.sum(1) / 2. * np.log(2 * np.pi)
log_Z_node += 1. / 2 * np.sum(mask * np.log(sigmasq_inv), axis=1)
log_Z_node += -1. / 2 * np.sum(mask * centered_data ** 2 * sigmasq_inv, axis=1)
return J_node, h_node, log_Z_node
### Mean field
@property
def expected_info_emission_params(self):
if self.mask is None:
return super(_SLDSStatesMaskedData, self).expected_info_emission_params
# Otherwise, compute masked potentials
expand = lambda a: a[None, ...]
stack_set = lambda x: np.concatenate(list(map(expand, x)))
# mf_expectations: mf_E_A, mf_E_AAT, mf_E_sigmasq_inv, mf_E_log_sigmasq
mf_stats = [ed.mf_expectations for ed in self.emission_distns]
n = self.D_latent
E_C_set = stack_set([s[0][:, :n] for s in mf_stats])
E_D_set = stack_set([s[0][:, n:] for s in mf_stats])
E_CCT_set = stack_set([s[1][:, :n, :n] for s in mf_stats])
E_sigmasq_inv_set = stack_set(s[2] for s in mf_stats)
E_log_sigmasq_inv_set = stack_set(s[3] for s in mf_stats)
# Compute expectations wrt q(z)
if self.single_emission:
E_C = E_C_set[0]
E_D = E_D_set[0]
E_CCT = E_CCT_set[0]
E_sigmasq_inv = E_sigmasq_inv_set[0]
E_log_sigmasq_inv = E_log_sigmasq_inv_set[0]
else:
E_z = self.expected_states
E_C = np.tensordot(E_z, E_C_set, axes=1)
E_D = np.tensordot(E_z, E_D_set, axes=1)
E_CCT = np.tensordot(E_z, E_CCT_set, axes=1)
E_sigmasq_inv = np.tensordot(E_z, E_sigmasq_inv_set, axes=1)
E_log_sigmasq_inv = np.tensordot(E_z, E_log_sigmasq_inv_set, axes=1)
# Finally, we can compute the emission potential with the mask
T, D_latent, data, inputs, mask = self.T, self.D_latent, self.data, self.inputs, self.mask
centered_data = data - inputs.dot(np.swapaxes(E_D, -2, -1))
J_node = np.tensordot(mask * E_sigmasq_inv, E_CCT, axes=1).\
reshape((T, D_latent, D_latent))
h_node = (mask * centered_data * E_sigmasq_inv).dot(E_C)
log_Z_node = -mask.sum(1) / 2. * np.log(2 * np.pi)
log_Z_node += 1. / 2 * np.sum(mask * E_log_sigmasq_inv, axis=1)
log_Z_node += -1. / 2 * np.sum(mask * centered_data ** 2 * E_sigmasq_inv, axis=1)
return J_node, h_node, log_Z_node
### VBEM and Mean Field
def _set_gaussian_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
if self.mask is None:
return super(_SLDSStatesMaskedData, self). \
_set_gaussian_expected_stats(smoothed_mus, smoothed_sigmas, E_xtp1_xtT)
assert not np.isnan(E_xtp1_xtT).any()
assert not np.isnan(smoothed_mus).any()
assert not np.isnan(smoothed_sigmas).any()
assert smoothed_mus.shape == (self.T, self.D_latent)
assert smoothed_sigmas.shape == (self.T, self.D_latent, self.D_latent)
assert E_xtp1_xtT.shape == (self.T - 1, self.D_latent, self.D_latent)
# This is like LDSStates._set_expected_states but doesn't sum over time
E_x_xT = smoothed_sigmas + smoothed_mus[:, :, None] * smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1]
E_xtp1_xtp1T = E_x_xT[1:]
E_xtp1_utT = (smoothed_mus[1:, :, None] * self.inputs[:-1, None, :])
E_xtp1_xutT = np.concatenate((E_xtp1_xtT, E_xtp1_utT), axis=-1)
# Initial state stats
self.E_init_stats = (self.smoothed_mus[0], E_x_xT[0], 1.)
# Dynamics stats
self.E_dynamics_stats = (E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, np.ones(self.T - 1))
# Emission stats
masked_data = self.data * self.mask
E_yyT = masked_data ** 2
E_yxT = masked_data[:, :, None] * self.smoothed_mus[:, None, :]
E_yuT = masked_data[:, :, None] * self.inputs[:, None, :]
E_yxuT = np.concatenate((E_yxT, E_yuT), axis=-1)
# We can't just reuse E[xu \dot xu^T]. Now we need to reweight by mask.
# Now E[xu \dot xu^T] must be T x D_obs x (D_latent + D_input) x (D_latent + D_input)
E_xu_xuT_masked = self.mask[:, :, None, None] * E_xu_xuT[:, None, :, :]
E_n = self.mask.astype(np.float)
self.E_emission_stats = (E_yyT, E_yxuT, E_xu_xuT_masked, E_n)
####################
# states classes #
####################
class HMMSLDSStatesPython(
_SLDSStatesMaskedData,
_SLDSStatesGibbs,
_SLDSStatesVBEM,
_SLDSStatesMeanField,
HMMStatesPython):
pass
class HMMSLDSStatesEigen(
_SLDSStatesMaskedData,
_SLDSStatesGibbs,
_SLDSStatesVBEM,
_SLDSStatesMeanField,
HMMStatesEigen):
pass
class HSMMSLDSStatesPython(
_SLDSStatesMaskedData,
_SLDSStatesGibbs,
_SLDSStatesVBEM,
_SLDSStatesMeanField,
HSMMStatesPython):
pass
class HSMMSLDSStatesEigen(
_SLDSStatesMaskedData,
_SLDSStatesGibbs,
_SLDSStatesVBEM,
_SLDSStatesMeanField,
HSMMStatesEigen):
pass
class GeoHSMMSLDSStates(
_SLDSStatesMaskedData,
_SLDSStatesGibbs,
_SLDSStatesVBEM,
_SLDSStatesMeanField,
GeoHSMMStates):
pass
class _SLDSStatesCountData(_SLDSStatesMaskedData, _SLDSStatesGibbs):
def __init__(self, model, data=None, mask=None, **kwargs):
super(_SLDSStatesCountData, self). \
__init__(model, data=data, mask=mask, **kwargs)
# Check if the emission matrix is a count regression
import pypolyagamma as ppg
from pypolyagamma.distributions import _PGLogisticRegressionBase
if isinstance(self.emission_distns[0], _PGLogisticRegressionBase):
self.has_count_data = True
# Initialize the Polya-gamma samplers
num_threads = ppg.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=num_threads)
self.ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds]
# Initialize auxiliary variables, omega
self.omega = np.ones((self.T, self.D_emission), dtype=np.float)
else:
self.has_count_data = False
@property
def diagonal_noise(self):
from pypolyagamma.distributions import _PGLogisticRegressionBase
from pybasicbayes.distributions import DiagonalRegression
return all([isinstance(ed, (_PGLogisticRegressionBase, DiagonalRegression))
for ed in self.emission_distns])
@property
def sigma_obss(self):
if self.has_count_data:
raise Exception("Count data does not have sigma_obs")
return super(_SLDSStatesCountData, self).sigma_obss
@property
def info_emission_params(self):
if not self.has_count_data:
return super(_SLDSStatesCountData, self).info_emission_params
# Otherwise, use the Polya-gamma augmentation
# log p(y_{tn} | x, om)
# = -0.5 * om_{tn} * (c_n^T x_t + d_n^T u_t + b_n)**2
# + kappa * (c_n * x_t + d_n^Tu_t + b_n)
# = -0.5 * om_{tn} * (x_t^T c_n c_n^T x_t
# + 2 x_t^T c_n d_n^T u_t
# + 2 x_t^T c_n b_n)
# + x_t^T (kappa_{tn} * c_n)
# = -0.5 x_t^T (c_n c_n^T * om_{tn}) x_t
# + x_t^T * (kappa_{tn} - d_n^T u_t * om_{tn} -b_n * om_{tn}) * c_n
#
# Thus
# J = (om * mask).dot(CCT)
# h = ((kappa - om * d) * mask).dot(C)
T, D_latent, D_emission = self.T, self.D_latent, self.D_emission
data, inputs, mask, omega = self.data, self.inputs, self.mask, self.omega
if self.single_emission:
emission_distn = self.emission_distns[0]
C = emission_distn.A[:, :D_latent]
D = emission_distn.A[:,D_latent:]
b = emission_distn.b
CCT = np.array([np.outer(cp, cp) for cp in C]).\
reshape((D_emission, D_latent ** 2))
J_node = np.dot(omega * mask, CCT)
J_node = J_node.reshape((T, D_latent, D_latent))
kappa = emission_distn.kappa_func(data)
h_node = ((kappa - omega * b.T - omega * inputs.dot(D.T)) * mask).dot(C)
else:
C_set = [d.A[:,:self.D_latent] for d in self.emission_distns]
D_set = [d.A[:,self.D_latent:] for d in self.emission_distns]
b_set = [d.b for d in self.emission_distns]
CCT_set = [np.array([np.outer(cp, cp) for cp in C]).
reshape((self.D_emission, self.D_latent**2))
for C in C_set]
J_node = np.zeros((self.T, self.D_latent**2))
h_node = np.zeros((self.T, self.D_latent))
for i in range(len(self.emission_distns)):
ti = np.where(self.stateseq == i)[0]
J_obs = omega[ti] * mask[ti]
kappa = self.emission_distns[i].kappa_func(data[ti])
J_node[ti] = np.dot(J_obs, CCT_set[i])
h_node[ti] = ((kappa
- omega[ti] * b_set[i].T
- omega * inputs[ti].dot(D_set[i].T)
) * mask[ti]).dot(C_set[i])
# See pylds/states.py for info on the log normalizer
# terms for Polya-gamma augmented states
return J_node, h_node, np.zeros(self.T)
def log_likelihood(self):
if self.has_count_data:
if self.single_emission:
ll = self.emission_distns[0].log_likelihood(
(np.hstack((self.gaussian_states, self.inputs)),
self.data), mask=self.mask).sum()
else:
ll = 0
z, xs, u, y = self.stateseq, self.gaussian_states, self.inputs, self.data
for k, ed in enumerate(self.emission_distns):
xuk = np.hstack((xs[z==k], u[z==k]))
yk = y[z==k]
ll += ed.log_likelihood((xuk, yk), mask=self.mask).sum()
return ll
else:
return super(_SLDSStatesCountData, self).log_likelihood()
@staticmethod
def empirical_rate(data, sigma=3.0):
"""
Smooth count data to get an empirical rate
"""
from scipy.ndimage.filters import gaussian_filter1d
return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0)
def resample(self, niter=1):
niter = self.niter if hasattr(self, 'niter') else niter
for itr in range(niter):
self.resample_discrete_states()
self.resample_gaussian_states()
if self.has_count_data:
self.resample_auxiliary_variables()
def resample_auxiliary_variables(self):
import pypolyagamma as ppg
if self.single_emission:
ed = self.emission_distns[0]
C, D = ed.A[:, :self.D_latent], ed.A[:, self.D_latent:]
psi = self.gaussian_states.dot(C.T) + self.inputs.dot(D.T) + ed.b.T
b = ed.b_func(self.data)
else:
C_set = [d.A[:, :self.D_latent] for d in self.emission_distns]
D_set = [d.A[:, self.D_latent:] for d in self.emission_distns]
b_set = [d.b for d in self.emission_distns]
psi = np.zeros((self.T, self.D_emission))
b = np.zeros((self.T, self.D_emission))
for i in range(len(self.emission_distns)):
ti = np.where(self.stateseq == i)[0]
psi[ti] = self.gaussian_states[ti].dot(C_set[i].T)
psi[ti] += self.inputs[ti].dot(D_set[i].T)
psi[ti] += b_set[i].T
b[ti] = self.emission_distns[i].b_func(self.data[ti])
ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), self.omega.ravel())
def smooth(self):
if not self.has_count_data:
return super(_SLDSStatesCountData, self).smooth()
X = np.column_stack((self.gaussian_states, self.inputs))
if self.single_emission:
ed = self.emission_distns[0]
mean = ed.mean(X)
else:
mean = np.zeros((self.T, self.D_emission))
for i, ed in enumerate(self.emission_distns):
ed = self.emission_distns[i]
ti = np.where(self.stateseq == i)[0]
mean[ti] = ed.mean(X[ti])
return mean
### VBEM
@property
def vbem_info_emission_params(self):
raise NotImplementedError("VBEM not implemented for Polya-gamma augmented states.")
def vb_E_step(self):
raise NotImplementedError("VBEM not implemented for Polya-gamma augmented states.")
class HMMCountSLDSStatesPython(
_SLDSStatesCountData,
HMMStatesPython):
pass
class HMMCountSLDSStatesEigen(
_SLDSStatesCountData,
HMMStatesEigen):
pass
class HSMMCountSLDSStatesPython(
_SLDSStatesCountData,
HSMMStatesPython):
pass
class HSMMCountSLDSStatesEigen(
_SLDSStatesCountData,
HSMMStatesEigen):
pass
class GeoHSMMCountSLDSStates(
_SLDSStatesCountData,
GeoHSMMStates):
pass
|
mattjj/pyhsmm-slds
|
pyslds/states.py
|
Python
|
mit
| 52,521
|
[
"Gaussian"
] |
d23e5f683e1519bb1181f0140feead1c3a0aec77093c9ef408602e2a4d352b76
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
import sys
from warnings import warn
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
from logilab.common.interface import implements
from logilab.common.modutils import modpath_from_file, get_module_files, \
file_from_modpath
from logilab.common.textutils import normalize_text
from logilab.common.configuration import rest_format_section
from logilab.common.ureports import Section
from logilab.astng import nodes, Module
from pylint.checkers import EmptyReport
from pylint.interfaces import IRawChecker
class UnknownMessage(Exception):
"""raised when a unregistered message id is encountered"""
MSG_TYPES = {
'I' : 'info',
'C' : 'convention',
'R' : 'refactor',
'W' : 'warning',
'E' : 'error',
'F' : 'fatal'
}
MSG_TYPES_LONG = dict([(v, k) for k, v in MSG_TYPES.iteritems()])
MSG_TYPES_STATUS = {
'I' : 0,
'C' : 16,
'R' : 8,
'W' : 4,
'E' : 2,
'F' : 1
}
_MSG_ORDER = 'EWRCIF'
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = 'FR'
class WarningScope(object):
LINE = 'line-based-msg'
NODE = 'node-based-msg'
def sort_msgs(msgids):
"""sort message identifiers according to their category first"""
msgs = {}
for msg in msgids:
msgs.setdefault(msg[0], []).append(msg)
result = []
for m_id in _MSG_ORDER:
if m_id in msgs:
result.extend( sorted(msgs[m_id]) )
return result
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = '', []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, 'name', '<lambda>'))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, '.'.join(obj)
def category_id(id):
id = id.upper()
if id in MSG_TYPES:
return id
return MSG_TYPES_LONG.get(id)
class Message:
def __init__(self, checker, msgid, msg, descr, symbol, scope):
assert len(msgid) == 5, 'Invalid message id %s' % msgid
assert msgid[0] in MSG_TYPES, \
'Bad message type %s in %r' % (msgid[0], msgid)
self.msgid = msgid
self.msg = msg
self.descr = descr
self.checker = checker
self.symbol = symbol
self.scope = scope
class MessagesHandlerMixIn:
"""a mix-in class containing all the messages related methods for the main
lint class
"""
def __init__(self):
# dictionary of registered messages
self._messages = {}
# dictionary from string symbolic id to Message object.
self._messages_by_symbol = {}
self._msgs_state = {}
self._module_msgs_state = {} # None
self._raw_module_msgs_state = {}
self._msgs_by_category = {}
self.msg_status = 0
self._ignored_msgs = {}
self._suppression_mapping = {}
def register_messages(self, checker):
"""register a dictionary of messages
Keys are message ids, values are a 2-uple with the message type and the
message itself
message ids should be a string of len 4, where the two first characters
are the checker id and the two last the message id in this checker
"""
msgs_dict = checker.msgs
chkid = None
for msgid, msg_tuple in msgs_dict.iteritems():
if implements(checker, IRawChecker):
scope = WarningScope.LINE
else:
scope = WarningScope.NODE
if len(msg_tuple) > 2:
(msg, msgsymbol, msgdescr) = msg_tuple[:3]
assert msgsymbol not in self._messages_by_symbol, \
'Message symbol %r is already defined' % msgsymbol
if len(msg_tuple) > 3 and 'scope' in msg_tuple[3]:
scope = msg_tuple[3]['scope']
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, msgdescr) = msg_tuple
warn("[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid, DeprecationWarning)
msgsymbol = None
# avoid duplicate / malformed ids
assert msgid not in self._messages, \
'Message id %r is already defined' % msgid
assert chkid is None or chkid == msgid[1:3], \
'Inconsistent checker part in message id %r' % msgid
chkid = msgid[1:3]
msg = Message(checker, msgid, msg, msgdescr, msgsymbol, scope)
self._messages[msgid] = msg
self._messages_by_symbol[msgsymbol] = msg
self._msgs_by_category.setdefault(msgid[0], []).append(msgid)
def get_message_help(self, msgid, checkerref=False):
"""return the help string for the given message id"""
msg = self.check_message_id(msgid)
desc = normalize_text(' '.join(msg.descr.split()), indent=' ')
if checkerref:
desc += ' This message belongs to the %s checker.' % \
msg.checker.name
title = msg.msg
if msg.symbol:
symbol_part = ' (%s)' % msg.symbol
else:
symbol_part = ''
if title != '%s':
title = title.splitlines()[0]
return ':%s%s: *%s*\n%s' % (msg.msgid, symbol_part, title, desc)
return ':%s%s:\n%s' % (msg.msgid, symbol_part, desc)
def disable(self, msgid, scope='package', line=None):
"""don't output message of the given id"""
assert scope in ('package', 'module')
# handle disable=all by disabling all categories
if msgid == 'all':
for msgid in MSG_TYPES:
self.disable(msgid, scope, line)
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self._msgs_by_category.get(catid):
self.disable(_msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
self.disable(_msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.disable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = False
except KeyError:
self._module_msgs_state[msg.msgid] = {line: False}
if msgid != 'I0011':
self.add_message('I0011', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = False
# sync configuration object
self.config.disable_msg = [mid for mid, val in msgs.iteritems()
if not val]
def enable(self, msgid, scope='package', line=None):
"""reenable message of the given id"""
assert scope in ('package', 'module')
catid = category_id(msgid)
# msgid is a category?
if catid is not None:
for msgid in self._msgs_by_category.get(catid):
self.enable(msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for msgid in checker.msgs:
self.enable(msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.enable_report(msgid)
return
# msgid is a symbolic or numeric msgid.
msg = self.check_message_id(msgid)
if scope == 'module':
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = True
except KeyError:
self._module_msgs_state[msg.msgid] = {line: True}
self.add_message('I0012', line=line, args=msg.msgid)
else:
msgs = self._msgs_state
msgs[msg.msgid] = True
# sync configuration object
self.config.enable = [mid for mid, val in msgs.iteritems() if val]
def check_message_id(self, msgid):
"""returns the Message object for this message.
msgid may be either a numeric or symbolic id.
Raises UnknownMessage if the message id is not defined.
"""
if msgid in self._messages_by_symbol:
return self._messages_by_symbol[msgid]
msgid = msgid.upper()
try:
return self._messages[msgid]
except KeyError:
raise UnknownMessage('No such message id %s' % msgid)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
if self.config.symbols:
symbol = self.check_message_id(msgid).symbol
if symbol:
msgid += '(%s)' % symbol
return msgid
def get_message_state_scope(self, msgid, line=None):
"""Returns the scope at which a message was enabled/disabled."""
try:
if line in self._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
def is_message_enabled(self, msgid, line=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if msgid in self._messages_by_symbol:
msgid = self._messages_by_symbol[msgid].msgid
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self._module_msgs_state[msgid][line]
except (KeyError, TypeError):
return self._msgs_state.get(msgid, True)
def handle_ignored_message(self, state_scope, msgid, line, node, args):
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs.setdefault((msgid, orig_line), set()).add(line)
except KeyError:
pass
def add_message(self, msgid, line=None, node=None, args=None):
"""add the message corresponding to the given id.
If provided, msg is expanded using args
astng checkers should provide the node argument, raw checkers should
provide the line argument.
"""
msg_info = self._messages[msgid]
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
assert node is None and line is not None, (
'Message %s must only provide line, got line=%s, node=%s' % (msgid, line, node))
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
assert node is not None, 'Message %s must provide Node, got None'
if line is None and node is not None:
line = node.fromlineno
if hasattr(node, 'col_offset'):
col_offset = node.col_offset # XXX measured in bytes for utf-8, divide by two for chars?
else:
col_offset = None
# should this message be displayed
if not self.is_message_enabled(msgid, line):
self.handle_ignored_message(
self.get_message_state_scope(msgid, line), msgid, line, node, args)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats['by_module'][self.current_name][msg_cat] += 1
try:
self.stats['by_msg'][msgid] += 1
except KeyError:
self.stats['by_msg'][msgid] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ''
path = self.current_file
else:
module, obj = get_module_and_frameid(node)
path = node.root().file
# add the message
self.reporter.add_message(msgid, (path, module, obj, line or 1, col_offset or 0), msg)
def help_message(self, msgids):
"""display help messages for the given message identifiers"""
for msgid in msgids:
try:
print self.get_message_help(msgid, True)
print
except UnknownMessage, ex:
print ex
print
continue
def print_full_documentation(self):
"""output a full documentation in ReST format"""
by_checker = {}
for checker in self.get_checkers():
if checker.name == 'master':
prefix = 'Main '
print "Options"
print '-------\n'
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = 'General options'
else:
title = '%s options' % section.capitalize()
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
else:
try:
by_checker[checker.name][0] += checker.options_and_values()
by_checker[checker.name][1].update(checker.msgs)
by_checker[checker.name][2] += checker.reports
except KeyError:
by_checker[checker.name] = [list(checker.options_and_values()),
dict(checker.msgs),
list(checker.reports)]
for checker, (options, msgs, reports) in by_checker.iteritems():
prefix = ''
title = '%s checker' % checker
print title
print '-' * len(title)
print
if options:
title = 'Options'
print title
print '~' * len(title)
rest_format_section(sys.stdout, None, options)
print
if msgs:
title = ('%smessages' % prefix).capitalize()
print title
print '~' * len(title)
for msgid in sort_msgs(msgs.iterkeys()):
print self.get_message_help(msgid, False)
print
if reports:
title = ('%sreports' % prefix).capitalize()
print title
print '~' * len(title)
for report in reports:
print ':%s: %s' % report[:2]
print
print
def list_messages(self):
"""output full messages list documentation in ReST format"""
msgids = []
for checker in self.get_checkers():
for msgid in checker.msgs.iterkeys():
msgids.append(msgid)
msgids.sort()
for msgid in msgids:
print self.get_message_help(msgid, False)
print
class ReportsHandlerMixIn:
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = {}
self._reports_state = {}
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports.setdefault(checker, []).append( (reportid, r_title, r_cb) )
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section('Report',
'%s statements analysed.'% (self.stats['statement']))
for checker in self._reports:
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReport:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in kwargs.iteritems():
if key[-1] == '_':
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def expand_modules(files_or_modules, black_list):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if exists(something):
# this is a file or a directory
try:
modname = '.'.join(modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, '__init__.py')
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = file_from_modpath(modname.split('.'))
if filepath is None:
errors.append( {'key' : 'F0003', 'mod': modname} )
continue
except (ImportError, SyntaxError), ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append( {'key': 'F0001', 'mod': modname, 'ex': ex} )
continue
filepath = normpath(filepath)
result.append( {'path': filepath, 'name': modname,
'basepath': filepath, 'basename': modname} )
if not (modname.endswith('.__init__') or modname == '__init__') \
and '__init__.py' in filepath:
for subfilepath in get_module_files(dirname(filepath), black_list):
if filepath == subfilepath:
continue
submodname = '.'.join(modpath_from_file(subfilepath))
result.append( {'path': subfilepath, 'name': submodname,
'basepath': filepath, 'basename': modname} )
return result, errors
class PyLintASTWalker(object):
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 1
self.visit_events = {}
self.leave_events = {}
self.linter = linter
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
msgs = self.linter._msgs_state
for member in dir(checker):
cid = member[6:]
if cid == 'default':
continue
if member.startswith('visit_'):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if hasattr(v_meth, 'checks_msgs'):
if not any(msgs.get(m, True) for m in v_meth.checks_msgs):
continue
visits.setdefault(cid, []).append(v_meth)
vcids.add(cid)
elif member.startswith('leave_'):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if hasattr(l_meth, 'checks_msgs'):
if not any(msgs.get(m, True) for m in l_meth.checks_msgs):
continue
leaves.setdefault(cid, []).append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, 'visit_default', None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits.setdefault(cid, []).append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astng):
"""call visit events of astng checkers for the given node, recurse on
its children, then leave events.
"""
cid = astng.__class__.__name__.lower()
if astng.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in self.visit_events.get(cid, ()):
cb(astng)
# recurse on children
for child in astng.get_children():
self.walk(child)
for cb in self.leave_events.get(cid, ()):
cb(astng)
|
hpfem/agros2d
|
resources/python/pylint/utils.py
|
Python
|
gpl-2.0
| 23,927
|
[
"VisIt"
] |
03416be1df7fa55814bdbd30734b6cd247bfe895eb9ac7959dba393e17ab2dc9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PyNNLess -- Yet Another PyNN Abstraction Layer
# Copyright (C) 2015 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Builds and runs a neuron synfire chain.
"""
import sys
import common.setup # Common example code (checks command line parameters)
import common.params # Parameters for the models which work with all systems
import common.utils # Output functions
import pynnless as pynl
# Create a new pl instance with the given backend
backend = sys.argv[1]
sim = pynl.PyNNLess(backend)
# Build and run the synfire chain
print("Simulating network...")
synfire_len = 100
w_syn = 0.024
res = sim.run(pynl.Network()
.add_source(spike_times=[10.0])
.add_population(
pynl.IfCondExpPopulation(
count=synfire_len,
params=common.params.IF_cond_exp)
.record_spikes()
)
.add_connections([
((0, 0), (1, 0), w_syn, 0.0),
((1, synfire_len - 1), (1, 0), w_syn, 0.0)
] + [((1, i - 1), (1, i), w_syn, 0.0) for i in xrange(1, synfire_len)]),
1000.0)
print("Done!")
# Write the spike times for each neuron to disk
print("Writing spike times to " + common.setup.outfile)
common.utils.write_spike_times(common.setup.outfile, res[1]["spikes"])
|
hbp-sanncs/pynnless
|
examples/synfire_chain.py
|
Python
|
gpl-3.0
| 1,950
|
[
"NEURON"
] |
c3a407fadbb060d669967040e2af639efb282d3d792607735e2c45a29b012e14
|
import numpy
import mlpy
import time
import scipy
import os
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.lda import LDA
import csv
import os.path
import sklearn
import sklearn.hmm
import cPickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag < len(Flags) - 1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop == 0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag == len(Flags) - 1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <= segEnd[i]:
break
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []
segEnd = []
segLabel = []
for row in reader:
if len(row) == 3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min(flagsInd.shape[0], flagsIndGT.shape[0])
if minLength > 0:
accuracy = numpy.count_nonzero(flagsInd[0:minLength] == flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1]
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg, 1]-segs[iSeg, 0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0 * SPercentages[i] / Duration
S = sum(1 for c in classes if c == classNames[i])
if S > 0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print classNames[i], Percentages[i], AvDurations[i]
font = {'family': 'fantasy', 'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(flagsInd))) * mtStep + mtStep / 2.0, flagsInd)
if flagsIndGT.shape[0] > 0:
ax1.plot(numpy.array(range(len(flagsIndGT))) * mtStep + mtStep / 2.0, flagsIndGT + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(classNames) + 1, 0, 100))
ax2.set_xticks(numpy.array(range(len(classNames) + 1)))
ax2.set_xticklabels([" "] + classNames)
ax2.bar(numpy.array(range(len(classNames))) + 0.5, Percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
ax3.set_xticks(numpy.array(range(len(classNames) + 1)))
ax3.set_xticklabels([" "] + classNames)
ax3.bar(numpy.array(range(len(classNames))) + 0.5, AvDurations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min(flags.shape[0], flagsGT.shape[0])
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros((uFlags.shape[0], uFlagsGT.shape[0]))
for i in range(minLength):
cMatrix[int(numpy.nonzero(uFlags == flags[i])[0]), int(numpy.nonzero(uFlagsGT == flagsGT[i])[0])] += 1.0
Nc, Ns = cMatrix.shape
N_s = numpy.sum(cMatrix, axis=0)
N_c = numpy.sum(cMatrix, axis=1)
N = numpy.sum(cMatrix)
purityCluster = numpy.zeros((Nc, ))
puritySpeaker = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max((cMatrix[i, :])) / (N_c[i])
for j in range(Ns):
puritySpeaker[j] = numpy.max((cMatrix[:, j])) / (N_s[j])
purityClusterMean = numpy.sum(purityCluster * N_c) / N
puritySpeakerMean = numpy.sum(puritySpeaker * N_s) / N
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print "trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!"
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i, u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels == u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == uLabels[i])[0]], axis=1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read ground truth data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to fix-sized sequence of flags
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
#F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
startprob, transmat, means, cov = trainHMM_computeStatistics(F, flags) # compute HMM statistics (priors, transition matrix, etc)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # output to file
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classNames
def trainHMM_fromDir(dirPath, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
flagsAll = numpy.array([])
classesAll = []
for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
wavFile = f
gtFile = f.replace('.wav', '.segments') # open for annotated file
if not os.path.isfile(gtFile): # if current WAV file does not have annotation -> skip
continue
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
for c in classNames: # update classnames:
if c not in classesAll:
classesAll.append(c)
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
lenF = F.shape[1]
lenL = len(flags)
MIN = min(lenF, lenL)
F = F[:, 0:MIN]
flags = flags[0:MIN]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append(classesAll.index(classNames[flags[j]]))
flagsAll = numpy.append(flagsAll, numpy.array(flagsNew))
if i == 0:
Fall = F
else:
Fall = numpy.concatenate((Fall, F), axis=1)
startprob, transmat, means, cov = trainHMM_computeStatistics(Fall, flagsAll) # compute HMM statistics
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # train HMM
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # save HMM model
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classesAll, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classesAll
def hmmSegmentation(wavFileName, hmmModelName, PLOT=False, gtFileName=""):
[Fs, x] = audioBasicIO.readAudioFile(wavFileName) # read audio data
try:
fo = open(hmmModelName, "rb")
except IOError:
print "didn't find file"
return
try:
hmm = cPickle.load(fo)
classesAll = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
except:
fo.close()
fo.close()
#Features = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs); # feature extraction
[Features, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050))
flagsInd = hmm.predict(Features.T) # apply model
#for i in range(len(flagsInd)):
# if classesAll[flagsInd[i]]=="silence":
# flagsInd[i]=classesAll.index("speech")
# plot results
if os.path.isfile(gtFileName):
[segStart, segEnd, segLabels] = readSegmentGT(gtFileName)
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)
flagsGTNew = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classesAll:
flagsGTNew.append(classesAll.index(classNamesGT[flagsGT[j]]))
else:
flagsGTNew.append(-1)
flagsIndGT = numpy.array(flagsGTNew)
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classesAll, mtStep, not PLOT)
if acc >= 0:
print "Overall Accuracy: {0:.2f}".format(acc)
return flagsInd, classesAll, acc
def mtFileClassification(inputFile, modelName, modelType, plotResults=False, gtFile=""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- inputFile: path of the input WAV file
- modelName: name of the classification model
- modelType: svm or knn depending on the classifier type
- plotResults: True if results are to be plotted using matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
if not os.path.isfile(modelName):
print "mtFileClassificationError: input modelType not found!"
return (-1, -1, -1)
# Load classifier:
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
if computeBEAT:
print "Model " + modelName + " contains long-term music features (beat etc) and cannot be used in segmentation"
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # load input file
if Fs == -1: # could not read file
return (-1, -1, -1)
x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono
Duration = len(x) / Fs
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
flags = []
Ps = []
flagsInd = []
for i in range(MidTermFeatures.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
curFV = (MidTermFeatures[:, i] - MEAN) / STD # normalize current feature vector
[Result, P] = aT.classifierWrapper(Classifier, modelType, curFV) # classify vector
flagsInd.append(Result)
flags.append(classNames[int(Result)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flagsInd = numpy.array(flagsInd)
# 1-window smoothing
for i in range(1, len(flagsInd) - 1):
if flagsInd[i-1] == flagsInd[i + 1]:
flagsInd[i] = flagsInd[i + 1]
(segs, classes) = flags2segs(flags, mtStep) # convert fix-sized flags to segments and classes
segs[-1] = len(x) / float(Fs)
# Load grount-truth:
if os.path.isfile(gtFile):
[segStartGT, segEndGT, segLabelsGT] = readSegmentGT(gtFile)
flagsGT, classNamesGT = segs2flags(segStartGT, segEndGT, segLabelsGT, mtStep)
flagsIndGT = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classNames:
flagsIndGT.append(classNames.index(classNamesGT[flagsGT[j]]))
else:
flagsIndGT.append(-1)
flagsIndGT = numpy.array(flagsIndGT)
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, not plotResults)
if acc >= 0:
print "Overall Accuracy: {0:.3f}".format(acc)
return (flagsInd, classNames, acc)
def evaluateSegmentationClassificationDir(dirName, modelName, methodName):
flagsAll = numpy.array([])
classesAll = []
accuracys = []
for i, f in enumerate(glob.glob(dirName + os.sep + '*.wav')): # for each WAV file
wavFile = f
print wavFile
gtFile = f.replace('.wav', '.segments') # open for annotated file
if methodName.lower() in ["svm", "knn"]:
flagsInd, classNames, acc = mtFileClassification(wavFile, modelName, methodName, False, gtFile)
else:
flagsInd, classNames, acc = hmmSegmentation(wavFile, modelName, False, gtFile)
if acc > -1:
accuracys.append(acc)
print " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
print "Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).mean())
print "Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracys)))
print "Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).min())
print "Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).max())
def silenceRemoval(x, Fs, stWin, stStep, smoothWindow=0.5, Weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- Fs: sampling freq
- stWin, stStep: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- Weight: (optinal) weight factor (0 < Weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight >= 1:
Weight = 0.99
if Weight <= 0:
Weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x) # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin * Fs, stStep * Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E) / 10) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:, numpy.where(EnergySt < T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:, numpy.where(EnergySt > T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T] # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:, i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.pred_probability(curFV)[1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10
T = (numpy.mean((1 - Weight) * ProbOnsetSorted[0:Nt]) + Weight * numpy.mean(ProbOnsetSorted[-Nt::]))
MaxIdx = numpy.where(ProbOnset > T)[0] # get the indices of the frames that satisfy the thresholding
i = 0
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i < len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i == len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i == len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0] * stStep, curCluster[-1] * stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs), 1.0 / Fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset)
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('SVM Probability')
plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel("data/knnSpeakerAll")
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel("data/knnSpeakerFemaleMale")
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))
MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
# 97,98, 99,100]; # SET 0C
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin))
mtStepRatio = int(round(stWin / stWin))
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = LDA(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels, tol=0.000001)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers <= 0:
sRange = range(2, 10)
else:
sRange = [numOfSpeakers]
clsAll = []
silAll = []
centersAll = []
for iSpeakers in sRange:
cls, means, steps = mlpy.kmeans(MidTermFeaturesNorm.T, k=iSpeakers, plus=True) # perform k-means clustering
#YDist = distance.pdist(MidTermFeaturesNorm.T, metric='euclidean')
#print distance.squareform(YDist).shape
#hc = mlpy.HCluster()
#hc.linkage(YDist)
#cls = hc.cut(14.5)
#print cls
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of the maximum sillouette value
nSpeakersFinal = sRange[imax] # optimal number of clusters
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
cls = numpy.zeros((numOfWindows,))
for i in range(numOfWindows):
j = numpy.argmin(numpy.abs(i-iNonOutLiers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(MidTermFeaturesNormOr.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = silAll[imax] # final sillouette
classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gtFile = fileName.replace('.wav', '.segments'); # open for annotated file
if os.path.isfile(gtFile): # if groundturh exists
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
if PLOT:
fig = plt.figure()
if numOfSpeakers>0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(cls)))*mtStep+mtStep/2.0, cls)
if os.path.isfile(gtFile):
if PLOT:
ax1.plot(numpy.array(range(len(flagsGT)))*mtStep+mtStep/2.0, flagsGT, 'r')
purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
print "{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean)
if PLOT:
plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
if PLOT:
plt.xlabel("time (seconds)")
#print sRange, silAll
if numOfSpeakers<=0:
plt.subplot(212)
plt.plot(sRange, silAll)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
def speakerDiarizationEvaluateScript(folderName, LDAs):
'''
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth)
ARGUMENTS:
- folderName: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored
- LDAs: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folderName, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wavFile in wavFilesList:
gtFile = wavFile.replace('.wav', '.segments');
if os.path.isfile(gtFile):
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
N.append(len(list(set(segLabels))))
else:
N.append(-1)
for l in LDAs:
print "LDA = {0:d}".format(l)
for i, wavFile in enumerate(wavFilesList):
speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT = False)
print
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0):
'''
This function detects instances of the most representative part of a music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- Fs: sampling frequency
- shortTermSize: window size (in seconds)
- shortTermStep: window step (in seconds)
- thumbnailSize: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[Fs, x] = basicIO.readAudioFile(inputFile)
[A1, A2, B1, B2] = musicThumbnailing(x, Fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
# self-similarity matrix
S = selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
MIN = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN;
# find max position:
maxVal = numpy.max(S)
I = numpy.argmax(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return (shortTermStep*i1, shortTermStep*i2, shortTermStep*j1, shortTermStep*j2, S)
|
LoadedCoders/iHear
|
iHear-Py/pyAudioAnalysis/audioSegmentation.py
|
Python
|
mit
| 43,895
|
[
"Gaussian"
] |
e18b4c7a0189f0d56a3a38eb9fb0b6a23cb9789d3c775e743563c8c49aa6d9cf
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while True:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
# Collects all the flags used in golang files and verifies the flags do
# not contain underscore. If any flag needs to be excluded from this check,
# need to add that flag in hack/verify-flags/excluded-flags.txt.
def check_underscore_in_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
check_underscore_in_flags(rootdir, files)
if __name__ == "__main__":
sys.exit(main())
|
mikedanese/kubernetes
|
hack/verify-flags-underscore.py
|
Python
|
apache-2.0
| 4,662
|
[
"VisIt"
] |
05ab39a6648bc76f326ef9f9adb49ecb02349589d15dc5789171058b98774432
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import warnings
import numpy as np
from skbio import Sequence, Protein, DNA, RNA, TabularMSA
from skbio.alignment import (
global_pairwise_align_protein, local_pairwise_align_protein,
global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,
make_identity_substitution_matrix, local_pairwise_align,
global_pairwise_align)
from skbio.alignment._pairwise import (
_init_matrices_sw, _init_matrices_nw,
_compute_score_and_traceback_matrices, _traceback, _first_largest,
_compute_substitution_score)
from skbio.sequence import GrammaredSequence
from skbio.util._decorator import classproperty, overrides
class CustomSequence(GrammaredSequence):
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('^$')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '^'
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set('WXYZ')
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {}
class PairwiseAlignmentTests(TestCase):
"""
Note: In the high-level tests, the expected results were derived with
assistance from the EMBOSS web server:
http://www.ebi.ac.uk/Tools/psa/emboss_needle/
http://www.ebi.ac.uk/Tools/psa/emboss_water/
In some cases, placement of non-gap characters surrounded by gap
characters are slighly different between scikit-bio and the EMBOSS
server. These differences arise from arbitrary implementation
differences, and always result in the same score (which tells us that
the alignments are equivalent). In cases where the expected results
included here differ from those generated by the EMBOSS server, I note
the EMBOSS result as a comment below the expected value.
"""
def setUp(self):
"""Ignore warnings during tests."""
warnings.simplefilter("ignore")
def tearDown(self):
"""Clear the list of warning filters, so that no filters are active."""
warnings.resetwarnings()
def test_make_identity_substitution_matrix(self):
expected = {'A': {'A': 1, 'C': -2, 'G': -2, 'T': -2, 'U': -2},
'C': {'A': -2, 'C': 1, 'G': -2, 'T': -2, 'U': -2},
'G': {'A': -2, 'C': -2, 'G': 1, 'T': -2, 'U': -2},
'T': {'A': -2, 'C': -2, 'G': -2, 'T': 1, 'U': -2},
'U': {'A': -2, 'C': -2, 'G': -2, 'T': -2, 'U': 1}}
self.assertEqual(make_identity_substitution_matrix(1, -2), expected)
expected = {'A': {'A': 5, 'C': -4, 'G': -4, 'T': -4, 'U': -4},
'C': {'A': -4, 'C': 5, 'G': -4, 'T': -4, 'U': -4},
'G': {'A': -4, 'C': -4, 'G': 5, 'T': -4, 'U': -4},
'T': {'A': -4, 'C': -4, 'G': -4, 'T': 5, 'U': -4},
'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U': 5}}
self.assertEqual(make_identity_substitution_matrix(5, -4), expected)
# TODO: duplicate of test_global_pairwise_align_custom_alphabet, remove
# when nondegenerate_chars is removed
def test_global_pairwise_align_custom_alphabet_nondegenerate_chars(self):
custom_substitution_matrix = make_identity_substitution_matrix(
1, -1, alphabet=CustomSequence.nondegenerate_chars)
custom_msa, custom_score, custom_start_end = global_pairwise_align(
CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
10.0, 5.0, custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
CustomSequence('WXYYZZ')]))
self.assertEqual(custom_score, 2.0)
self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
def test_global_pairwise_align_custom_alphabet(self):
custom_substitution_matrix = make_identity_substitution_matrix(
1, -1, alphabet=CustomSequence.definite_chars)
custom_msa, custom_score, custom_start_end = global_pairwise_align(
CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
10.0, 5.0, custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
CustomSequence('WXYYZZ')]))
self.assertEqual(custom_score, 2.0)
self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
# TODO: duplicate of test_local_pairwise_align_custom_alphabet, remove
# when nondegenerate_chars is removed.
def test_local_pairwise_align_custom_alphabet_nondegenerate_chars(self):
custom_substitution_matrix = make_identity_substitution_matrix(
5, -4, alphabet=CustomSequence.nondegenerate_chars)
custom_msa, custom_score, custom_start_end = local_pairwise_align(
CustomSequence("YWXXZZYWXXWYYZWXX"),
CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(
custom_msa,
TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
CustomSequence('WXZZZYWX^^^YZWWX')]))
self.assertEqual(custom_score, 41.0)
self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
def test_local_pairwise_align_custom_alphabet(self):
custom_substitution_matrix = make_identity_substitution_matrix(
5, -4, alphabet=CustomSequence.definite_chars)
custom_msa, custom_score, custom_start_end = local_pairwise_align(
CustomSequence("YWXXZZYWXXWYYZWXX"),
CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(
custom_msa,
TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
CustomSequence('WXZZZYWX^^^YZWWX')]))
self.assertEqual(custom_score, 41.0)
self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
def test_global_pairwise_align_invalid_type(self):
with self.assertRaisesRegex(TypeError,
"GrammaredSequence.*"
"TabularMSA.*'Sequence'"):
global_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
def test_global_pairwise_align_dtype_mismatch(self):
with self.assertRaisesRegex(TypeError,
"same dtype: 'DNA' != 'RNA'"):
global_pairwise_align(DNA('ACGT'), TabularMSA([RNA('ACGU')]),
1.0, 1.0, {})
with self.assertRaisesRegex(TypeError,
"same dtype: 'DNA' != 'RNA'"):
global_pairwise_align(TabularMSA([DNA('ACGT')]),
TabularMSA([RNA('ACGU')]),
1.0, 1.0, {})
def test_global_pairwise_align_protein(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
Protein("---PAW-HEAE")]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# EMBOSS result: P---AW-HEAE
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
gap_extend_penalty=0.5)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHE-E"),
Protein("---PAW-HEAE")]))
self.assertEqual(obs_score, 30.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# Protein sequences with metadata
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("---PAW-HEAE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# One TabularMSA and one Protein as input
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"})]),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("---PAW-HEAE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# One single-sequence alignment as input and one double-sequence
# alignment as input. Score confirmed manually.
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("HDAGAWGHDE", metadata={'id': "s2"})]),
TabularMSA([Protein("PAWHEAE", metadata={'id': "s3"})]),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("HDAGAWGHDE-", metadata={'id': "s2"}),
Protein("---PAW-HEAE", metadata={'id': "s3"})]))
self.assertEqual(obs_score, 21.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# TypeError on invalid input
self.assertRaises(TypeError, global_pairwise_align_protein,
42, Protein("HEAGAWGHEE"))
self.assertRaises(TypeError, global_pairwise_align_protein,
Protein("HEAGAWGHEE"), 42)
def test_global_pairwise_align_protein_invalid_dtype(self):
with self.assertRaisesRegex(TypeError,
"TabularMSA with Protein dtype.*dtype "
"'DNA'"):
global_pairwise_align_protein(TabularMSA([Protein('PAW')]),
TabularMSA([DNA('ACGT')]))
def test_global_pairwise_align_protein_penalize_terminal_gaps(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5., penalize_terminal_gaps=True)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE"),
Protein("---PAWHEAE")]))
self.assertEqual(obs_score, 1.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
def test_global_pairwise_align_nucleotide_penalize_terminal_gaps(self):
# in these tests one sequence is about 3x the length of the other.
# we toggle penalize_terminal_gaps to confirm that it results in
# different alignments and alignment scores.
seq1 = DNA("ACCGTGGACCGTTAGGATTGGACCCAAGGTTG")
seq2 = DNA("T"*25 + "ACCGTGGACCGTAGGATTGGACCAAGGTTA" + "A"*25)
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
match_score=5, mismatch_score=-4, penalize_terminal_gaps=False)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
"TTGGACCCAAGGTTG-------------------------"),
DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
self.assertEqual(obs_score, 131.0)
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
match_score=5, mismatch_score=-4, penalize_terminal_gaps=True)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
"TTGGACCCAAGGTT-------------------------G"),
DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
self.assertEqual(obs_score, 97.0)
def test_local_pairwise_align_protein(self):
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE"),
Protein("AW-HE")]))
self.assertEqual(obs_score, 26.0)
self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
gap_extend_penalty=0.5)
self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE-E"),
Protein("AW-HEAE")]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(4, 9), (1, 6)])
# Protein sequences with metadata
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa, TabularMSA([Protein("AWGHE", metadata={'id': "s1"}),
Protein("AW-HE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 26.0)
self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
# Fails when either input is passed as a TabularMSA
self.assertRaises(TypeError, local_pairwise_align_protein,
TabularMSA([Protein("HEAGAWGHEE",
metadata={'id': "s1"})]),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertRaises(TypeError, local_pairwise_align_protein,
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
TabularMSA([Protein("PAWHEAE",
metadata={'id': "s2"})]),
gap_open_penalty=10., gap_extend_penalty=5.)
# TypeError on invalid input
self.assertRaises(TypeError, local_pairwise_align_protein,
42, Protein("HEAGAWGHEE"))
self.assertRaises(TypeError, local_pairwise_align_protein,
Protein("HEAGAWGHEE"), 42)
def test_global_pairwise_align_nucleotide(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("G-ACCTTGACCAGGTACC"),
DNA("GAACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 41.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
DNA("GAACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# DNA sequences with metadata
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGAC---GTAAC", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# Align one DNA sequence and one TabularMSA, score computed manually
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
TabularMSA([DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GACCATGACCAGGTACC", metadata={'id': "s2"})]),
DNA("GAACTTTGACGTAAC", metadata={'id': "s3"}),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("-GACCATGACCAGGTACC", metadata={'id': "s2"}),
DNA("GAACTTTGAC---GTAAC", metadata={'id': "s3"})]))
self.assertEqual(obs_score, 27.5)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# TypeError on invalid input
self.assertRaises(TypeError, global_pairwise_align_nucleotide,
42, DNA("ACGT"))
self.assertRaises(TypeError, global_pairwise_align_nucleotide,
DNA("ACGT"), 42)
def test_global_pairwise_align_nucleotide_invalid_dtype(self):
with self.assertRaisesRegex(TypeError,
"TabularMSA with DNA or RNA dtype.*dtype "
"'Protein'"):
global_pairwise_align_nucleotide(TabularMSA([DNA('ACGT')]),
TabularMSA([Protein('PAW')]))
def test_local_pairwise_align_nucleotide(self):
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGACCAGGTACC"),
DNA("ACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 41.0)
self.assertEqual(obs_start_end, [(1, 16), (2, 14)])
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGAC"),
DNA("ACTTTGAC")]))
self.assertEqual(obs_score, 31.0)
self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
# DNA sequences with metadata
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("ACCTTGAC", metadata={'id': "s1"}),
DNA("ACTTTGAC", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 31.0)
self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
# Fails when either input is passed as a TabularMSA
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
TabularMSA([DNA("GACCTTGACCAGGTACC",
metadata={'id': "s1"})]),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.,
match_score=5, mismatch_score=-4)
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
TabularMSA([DNA("GAACTTTGACGTAAC",
metadata={'id': "s2"})]),
gap_open_penalty=10., gap_extend_penalty=5.,
match_score=5, mismatch_score=-4)
# TypeError on invalid input
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
42, DNA("ACGT"))
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
DNA("ACGT"), 42)
def test_nucleotide_aligners_use_substitution_matrices(self):
alt_sub = make_identity_substitution_matrix(10, -10)
# alternate substitution matrix yields different alignment (the
# aligned sequences and the scores are different) with local alignment
msa_no_sub, score_no_sub, start_end_no_sub = \
local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
msa_alt_sub, score_alt_sub, start_end_alt_sub = \
local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4, substitution_matrix=alt_sub)
self.assertNotEqual(msa_no_sub, msa_alt_sub)
self.assertNotEqual(score_no_sub, score_alt_sub)
self.assertNotEqual(start_end_no_sub, start_end_alt_sub)
# alternate substitution matrix yields different alignment (the
# aligned sequences and the scores are different) with global alignment
msa_no_sub, score_no_sub, start_end_no_sub = \
global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
msa_alt_sub, score_alt_sub, start_end_alt_sub = \
global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4, substitution_matrix=alt_sub)
self.assertNotEqual(msa_no_sub, msa_alt_sub)
self.assertNotEqual(score_no_sub, score_alt_sub)
self.assertEqual(start_end_no_sub, start_end_alt_sub)
def test_local_pairwise_align_invalid_type(self):
with self.assertRaisesRegex(TypeError,
'GrammaredSequence.*Sequence'):
local_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
def test_local_pairwise_align_type_mismatch(self):
with self.assertRaisesRegex(TypeError,
"same type: 'DNA' != 'RNA'"):
local_pairwise_align(DNA('ACGT'), RNA('ACGU'), 1.0, 1.0, {})
def test_init_matrices_sw(self):
expected_score_m = np.zeros((5, 4))
expected_tback_m = [[0, 0, 0, 0],
[0, -1, -1, -1],
[0, -1, -1, -1],
[0, -1, -1, -1],
[0, -1, -1, -1]]
actual_score_m, actual_tback_m = _init_matrices_sw(
TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
def test_init_matrices_nw(self):
expected_score_m = [[0, -5, -7, -9],
[-5, 0, 0, 0],
[-7, 0, 0, 0],
[-9, 0, 0, 0],
[-11, 0, 0, 0]]
expected_tback_m = [[0, 3, 3, 3],
[2, -1, -1, -1],
[2, -1, -1, -1],
[2, -1, -1, -1],
[2, -1, -1, -1]]
actual_score_m, actual_tback_m = _init_matrices_nw(
TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
def test_compute_substitution_score(self):
# these results were computed manually
subs_m = make_identity_substitution_matrix(5, -4)
gap_chars = set('-.')
self.assertEqual(
_compute_substitution_score(['A'], ['A'], subs_m, 0, gap_chars),
5.0)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A'], subs_m, 0,
gap_chars),
5.0)
self.assertEqual(
_compute_substitution_score(['A', 'C'], ['A'], subs_m, 0,
gap_chars),
0.5)
self.assertEqual(
_compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0,
gap_chars),
0.5)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
gap_chars),
2.5)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1,
gap_chars),
3)
# alt subs_m
subs_m = make_identity_substitution_matrix(1, -2)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
gap_chars),
0.5)
def test_compute_score_and_traceback_matrices(self):
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 6],
[-11, -7, -3, 1]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 2]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACG', metadata={'id': 'id'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
# different sequences
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 3],
[-11, -7, -3, -2]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 1]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACC', metadata={'id': 'id'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
# four sequences provided in two alignments
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 3],
[-11, -7, -3, -2]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 1]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACC', metadata={'id': 's1'}),
DNA('ACC', metadata={'id': 's2'})]),
TabularMSA([DNA('ACGT', metadata={'id': 's3'}),
DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
def test_compute_score_and_traceback_matrices_invalid(self):
# if the sequence contains a character that is not in the
# substitution matrix, an informative error should be raised
m = make_identity_substitution_matrix(2, -1)
self.assertRaises(ValueError, _compute_score_and_traceback_matrices,
TabularMSA([DNA('AWG', metadata={'id': 'id'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'id'})]),
5, 2, m)
def test_traceback(self):
score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 6],
[-11, -7, -3, 1]]
score_m = np.array(score_m)
tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 2]]
tback_m = np.array(tback_m)
# start at bottom-right
expected = ([DNA("ACG-", metadata={'id': 'foo'})],
[DNA("ACGT", metadata={'id': 'bar'})], 1, 0, 0)
actual = _traceback(tback_m, score_m,
TabularMSA([DNA('ACG', metadata={'id': 'foo'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'bar'})]),
4, 3)
self.assertEqual(actual, expected)
# four sequences in two alignments
score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 6],
[-11, -7, -3, 1]]
score_m = np.array(score_m)
tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 2]]
tback_m = np.array(tback_m)
# start at bottom-right
expected = ([DNA("ACG-", metadata={'id': 's1'}),
DNA("ACG-", metadata={'id': 's2'})],
[DNA("ACGT", metadata={'id': 's3'}),
DNA("ACGT", metadata={'id': 's4'})],
1, 0, 0)
actual = _traceback(tback_m, score_m,
TabularMSA([DNA('ACG', metadata={'id': 's1'}),
DNA('ACG', metadata={'id': 's2'})]),
TabularMSA([DNA('ACGT', metadata={'id': 's3'}),
DNA('ACGT', metadata={'id': 's4'})]),
4, 3)
self.assertEqual(actual, expected)
# start at highest-score
expected = ([DNA("ACG", metadata={'id': 'foo'})],
[DNA("ACG", metadata={'id': 'bar'})], 6, 0, 0)
actual = _traceback(tback_m, score_m,
TabularMSA([DNA('ACG', metadata={'id': 'foo'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'bar'})]),
3, 3)
self.assertEqual(actual, expected)
# terminate traceback before top-right
tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 0, 3],
[2, 2, 2, 1],
[2, 2, 2, 2]]
tback_m = np.array(tback_m)
expected = ([DNA("G", metadata={'id': 'a'})],
[DNA("G", metadata={'id': 'a'})], 6, 2, 2)
actual = _traceback(tback_m, score_m,
TabularMSA([DNA('ACG', metadata={'id': 'a'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'a'})]),
3, 3)
self.assertEqual(actual, expected)
def test_first_largest(self):
l = [(5, 'a'), (5, 'b'), (5, 'c')]
self.assertEqual(_first_largest(l), (5, 'a'))
l = [(5, 'c'), (5, 'b'), (5, 'a')]
self.assertEqual(_first_largest(l), (5, 'c'))
l = [(5, 'c'), (6, 'b'), (5, 'a')]
self.assertEqual(_first_largest(l), (6, 'b'))
# works for more than three entries
l = [(5, 'c'), (6, 'b'), (5, 'a'), (7, 'd')]
self.assertEqual(_first_largest(l), (7, 'd'))
# Note that max([(5, 'a'), (5, 'c')]) == max([(5, 'c'), (5, 'a')])
# but for the purposes needed here, we want the max to be the same
# regardless of what the second item in the tuple is.
if __name__ == "__main__":
main()
|
kdmurray91/scikit-bio
|
skbio/alignment/tests/test_pairwise.py
|
Python
|
bsd-3-clause
| 34,993
|
[
"scikit-bio"
] |
a9fb52ef41b40431a58132ea4702f8a6294679e2cdbaca8a9b8882c6c451ea69
|
from numpy import *
from pylab import *
from datetime import datetime, time, timedelta
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
from ShiftTimes import *
import os
import matplotlib.patches as patches
import matplotlib.transforms as transforms
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
#from z_expansion_gulisano import z as z_exp
def flags2nan(VAR, FLAG):
cond = VAR < FLAG
VAR = np.array(VAR)
VAR[~cond] = np.nan
return VAR
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
time = (fecha - utc).total_seconds()
return time
def dates_from_omni(t):
time = []
n = len(t)
for i in range(n):
yyyy = t[i][0]
mm = t[i][1]
dd = t[i][2]
HH = t[i][3]
MM = t[i][4]
SS = t[i][5]
uSS = t[i][6]
time += [datetime(yyyy, mm, dd, HH, MM, SS, uSS)]
return time
def utc_from_omni(file):
t = np.array(file.variables['time'].data)
dates = dates_from_omni(t)
n = len(dates)
time = np.zeros(n)
for i in range(n):
time[i] = date_to_utc(dates[i])
return time
def selecc_data(data, tshk):
time = data[0] #[s] utc sec
rate = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tshk_utc = (tshk - utc).total_seconds()
ti = tshk_utc - 10.*day # [seg] utc
tf = tshk_utc + 30.*day
cond = (time > ti) & (time < tf)
time = (time[cond] - tshk_utc) / day # [days] since shock
rate = rate[cond]
return (time, rate)
def selecc_window(data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
ti = tini_utc # [seg] utc
tf = tend_utc
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def enoughdata(var, fgap):
n = len(var)
ngood = len(find(~isnan(var)))
fdata = 1.*ngood/n # fraccion de data sin gaps
if fdata>=(1.-fgap):
return True
else:
return False
def averages_and_std(n_icmes, t_shck, ti_icme, dTday, nbin, t_utc, VAR, fgap):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (ti_icme[i] - t_shck[i]).total_seconds()/day # [day]
if dT>dTday:
dt = dT/nbin
t, var = selecc_window(
[t_utc, VAR],
t_shck[i], ti_icme[i]
)
if enoughdata(var, fgap): # pido q haya mas del 80% NO sean gaps
adap += [adaptar(nbin, dt, t, var)]
nok +=1
else:
continue
else:
print " i:%d ---> Este evento es muy chico!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_std, ndata]
def adaptar(n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
for i in range(n):
tmin = i*dt
tmax = (i+1.)*dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])
rr[i] = mean(r[cond])
return [tt/(n*dt), rr]
def adaptar(nwndw, dT, n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
rr[i] = mean(r[cond])
return [tt/dT, rr] # tiempo normalizado x la duracion de la sheath
def adaptar_ii(nwndw, dT, n, dt, t, r, fgap):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath/mc
cc = (t>0.) & (t<dT) # intervalo de la sheath/mc
enough = enoughdata(r[cc], fgap) # [bool] True si hay mas del 80% de data buena.
if not(enough): rr = nan*ones(n) # si no hay suficiente data, este evento no aporta
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
if enough:
cc = ~isnan(r[cond]) # no olvidemos filtrar los gaps
rr[i] = mean(r[cond][cc])
return enough, [tt/dT, rr] # tiempo normalizado x la duracion de la sheath/mc/etc
def selecc_window_ii(nwndw, data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
dt = tend_utc - tini_utc
ti = tini_utc - nwndw[0]*dt # [seg] utc
tf = tend_utc + nwndw[1]*dt
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def averages_and_std_ii(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VAR):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (tend[i] - tini[i]).total_seconds()/day # [day]
if ((dT>dTday) & SELECC[i]):# (MCsig[i]>=MCwant)):
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, # nro de veces hacia atras y adelante
[t_utc, VAR],
tini[i], tend[i]
)
adap += [adaptar(nwndw, dT, nbin, dt, t, var)] # rebinea usando 'dt' como el ancho de nuevo bineo
nok +=1
else:
print " i:%d ---> Filtramos este evento!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_medi[i] = median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata]
def mvs_for_each_event(VAR_adap, nbin, nwndw, Enough):
nok = size(VAR_adap, axis=0)
mvs = np.zeros(nok) # valores medios por cada evento
binsPerTimeUnit = nbin/(1+nwndw[0]+nwndw[1]) # nro de bines por u. de tiempo
start = nwndw[0]*binsPerTimeUnit # en este bin empieza la MC
#print " ----> binsPerTimeUnit: ", binsPerTimeUnit
#print " ----> nok: ", nok
#print " ----> VAR_adap.shape: ", VAR_adap.shape
#print " ----> VAR_adap: \n", VAR_adap
#raw_input()
for i in range(nok):
aux = VAR_adap[i, start:start+binsPerTimeUnit] # (*)
cc = ~isnan(aux) # pick good-data only
#if len(find(cc))>1:
if Enough[i]: # solo imprimo los q tienen *suficiente data*
print ccl.G
print "id %d/%d: "%(i+1, nok), aux[cc]
print ccl.W
mvs[i] = np.mean(aux[cc])
else:
mvs[i] = np.nan
#(*): esta es la serie temporal (de esta variable) para el evento "i"
pause(1)
return mvs
def diff_dates(tend, tini):
n = len(tend)
diffs = np.nan*np.ones(n)
for i in range(n):
ok = type(tend[i]) == type(tini[i]) == datetime # ambos deben ser fechas!
if ok:
diffs[i] = (tend[i] - tini[i]).total_seconds()
else:
diffs[i] = np.nan
return diffs #[sec]
def write_variable(fout, varname, dims, var, datatype, comments):
dummy = fout.createVariable(varname, datatype, dims)
dummy[:] = var
dummy.units = comments
def calc_beta(Temp, Pcc, B):
# Agarramos la definicion de OMNI, de:
# http://pamela.roma2.infn.it/index.php
# Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
#
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def thetacond(ThetaThres, ThetaSh):
if ThetaThres<=0.:
print ccl.Rn + ' ----> BAD WANG FILTER!!: ThetaThres<=0.'
print ' ----> Saliendo...' + ccl.Rn
raise SystemExit
#return ones(len(ThetaSh), dtype=bool)
else:
return (ThetaSh > ThetaThres)
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
SUBTITLE, YLIMS, YLAB, fname_fig):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
ax.plot(tnorm, avrVAR, 'o-', color='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', color='red', alpha=.5, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/np.sqrt(nVAR)
sup = avrVAR - stdVAR/np.sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='upper right')
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE
ax.set_title(TITLE)
ax.set_xlabel('time normalized to MC passage time [1]', fontsize=14)
ax.set_ylabel(YLAB, fontsize=20)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
class general:
def __init__(self):
name='name'
class events_mgr:
def __init__(self, gral, FILTER, CUTS, bd, nBin, fgap, tb, z_exp):
#self.fnames = fnames
self.data_name = gral.data_name
self.FILTER = FILTER
self.CUTS = CUTS
self.bd = bd
self.nBin = nBin
self.fgap = fgap
self.tb = tb
self.z_exp = z_exp
self.dir_plots = gral.dirs['dir_plots']
self.dir_ascii = gral.dirs['dir_ascii']
self.f_sc = netcdf_file(gral.fnames['ACE'], 'r')
self.f_events = netcdf_file(gral.fnames['table_richardson'], 'r')
print " -------> archivos input leidos!"
def run_all(self):
#----- seleccion de eventos
self.filter_events()
print "\n ---> filtrado de eventos (n:%d): OK\n" % (self.n_SELECC)
#----- load data y los shiftimes "omni"
self.load_data_and_timeshift()
#----- rebineo y promedios
self.rebine_and_avr()
#----- hacer ploteos
self.make_plots()
#----- archivos "stuff"
self.build_params_file()
def rebine_and_avr(self):
"""def avrs_and_stds(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VARS, fgap):"""
nvars = self.nvars #len(VARS)
n_icmes = self.tb.n_icmes
bd = self.bd
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
"""print " ---> nbin: ", nbin
print " ---> t_utc[0]: ", self.t_utc[0]
print " ---> t_utc[-1]: ", self.t_utc[-1]
print " ---> fgap: ", self.fgap
print " ---> VARS[-1][1]: ", self.VARS[-1][1]
print " ---> nwndw: ", nwndw
print " ---> dTday: ", self.CUTS['dTday']
print " ---> tini[0]: ", bd.tini[0]
print " ---> tend[-110]: ", bd.tend[-110]"""
#raw_input()
ADAP = [] # conjunto de varios 'adap' (uno x c/variable)
# recorremos los eventos:
nok=0; nbad=0;
nEnough = np.zeros(nvars)
Enough = np.zeros(n_icmes*nvars, dtype=bool).reshape(n_icmes, nvars)
Enough = []
nnn = 0 # nro de evento q pasan el filtro a-priori
#---- quiero una lista de los eventos-id q van a incluirse en c/promedio :-)
IDs = {}
for j in range(nvars):
varname = VARS[j][1]
IDs[varname] = []
for i in range(n_icmes):
#nok=0; nbad=0;
ok=False
try: #no todos los elementos de 'tend' son fechas (algunos eventos no tienen fecha definida)
dT = (bd.tend[i] - bd.tini[i]).total_seconds()/day # [day]
ok = True
except:
continue # saltar al sgte evento 'i'
#np.set_printoptions(4) # nro de digitos a imprimir cuando use numpy.arrays
if (ok & self.SELECC[i]):# (MCsig[i]>=MCwant)): ---FILTRO--- (*1)
nnn += 1
print ccl.Gn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
nok +=1
Enough += [ np.zeros(nvars, dtype=bool) ] # todo False por defecto
# recorremos las variables:
for j in range(nvars):
varname = VARS[j][1]
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, #rango ploteo
[self.t_utc, VARS[j][0]],
bd.tini[i], bd.tend[i]
)
# rebinea usando 'dt' como el ancho de nuevo bineo
out = adaptar_ii(nwndw, dT, nbin, dt, t, var, self.fgap)
enough = out[0] # True: data con menos de 100*'fgap'% de gap
Enough[nok-1][j] = enough
ADAP += [ out[1] ]
#print " out01: ", out[1]; raw_input()
if enough:
IDs[varname] += [i]
nEnough[j] += 1
else:
print ccl.Rn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
nbad +=1
print " ----> len.ADAP: %d" % len(ADAP)
Enough = np.array(Enough)
stuff = []
#nok = len(ADAP)/nvars # (*)
# (*) la dim de 'ADAP' es 'nvars' por el nro de eventos q pasaro el filtro en (*1)
for j in range(nvars):
print ccl.On + " -------> procesando: %s" % VARS[j][3] + " (%d/%d)" % (j+1,nvars)
print " nEnough/nok/(nok+nbad): %d/%d/%d " % (nEnough[j], nok, nok+nbad) + ccl.W
VAR_adap = np.zeros((nok, nbin)) # perfiles rebineados (*)
# (*): uno de estos por variable
# recorro los 'nok' eventos q pasaron el filtro de arriba:
for i in range(nok):
VAR_adap[i,:] = ADAP[i*nvars+j][1] # valores rebineados de la variable "j" para el evento "i"
# valores medios de esta variable para c/evento
avrVAR_adap = mvs_for_each_event(VAR_adap, nbin, nwndw, Enough.T[j])
print " ---> (%d/%d) avrVAR_adap[]: \n" % (j+1,nvars), avrVAR_adap
VAR_avrg = np.zeros(nbin)
VAR_avrgNorm = np.zeros(nbin)
VAR_medi = np.zeros(nbin)
VAR_std = np.zeros(nbin)
ndata = np.zeros(nbin)
for i in range(nbin):
cond = ~np.isnan(VAR_adap.T[i,:]) # filtro eventos q no aportan data en este bin
ndata[i] = len(find(cond)) # nro de datos != nan
VAR_avrg[i] = np.mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_avrgNorm[i] = np.mean(VAR_adap.T[i,cond]/avrVAR_adap[cond])
VAR_medi[i] = np.median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = np.std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
#--- calculo perfil normalizado por c/variable
#ii = nwndw[0]*binsPerTimeUnit
#AvrInWndw = mean(VAR_avrg[ii:ii+binsPerTimeUnit])
tnorm = ADAP[0][0]
stuff += [[nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata, avrVAR_adap]]
#return stuff, nEnough, Enough, IDs
self.out = OUT = {}
OUT['dVARS'] = stuff
OUT['nEnough'] = nEnough
OUT['Enough'] = Enough
OUT['IDs'] = IDs
OUT['tnorm'] = OUT['dVARS'][0][2]
def load_data_and_timeshift(self):
if self.data_name=='ACE':
self.load_data_ACE()
elif self.data_name=='McMurdo':
self.load_data_McMurdo()
else:
print " --------> BAD 'self.data_name'!!!"
print " exiting.... "
raise SystemExit
def load_data_McMurdo(self):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
def load_data_ACE(self):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
#----------------------------------------------------------
print " leyendo tiempo..."
t_utc = utc_from_omni(self.f_sc)
print " Ready."
#++++++++++++++++++++ CORRECCION DE BORDES +++++++++++++++++++++++++++++
# IMPORTANTE:
# Solo valido para los "63 eventos" (MCflag='2', y visibles en ACE)
# NOTA: dan saltos de shock mas marcados con True.
if self.FILTER['CorrShift']:
ShiftCorrection(ShiftDts, tb.tshck)
ShiftCorrection(ShiftDts, tb.tini_icme)
ShiftCorrection(ShiftDts, tb.tend_icme)
ShiftCorrection(ShiftDts, tb.tini_mc)
ShiftCorrection(ShiftDts, tb.tend_mc)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
B = np.array(self.f_sc.variables['Bmag'].data)
Vsw = np.array(self.f_sc.variables['Vp'].data)
Temp = np.array(self.f_sc.variables['Tp'].data)
Pcc = np.array(self.f_sc.variables['Np'].data)
rmsB = np.array(self.f_sc.variables['dBrms'].data)
alphar = np.array(self.f_sc.variables['Alpha_ratio'].data)
beta = calc_beta(Temp, Pcc, B)
rmsBoB = rmsB/B
print " -------> variables leidas!"
#------------------------------------ VARIABLES
self.t_utc = t_utc
self.VARS = VARS = []
# variable, nombre archivo, limite vertical, ylabel
VARS += [[B, 'B', [5., 18.], 'B [nT]']]
VARS += [[Vsw, 'V', [380., 650.], 'Vsw [km/s]']]
VARS += [[rmsBoB, 'rmsBoB', [0.01, 0.2], 'rms($\hat B$/|B|) [1]']]
VARS += [[beta, 'beta', [0.001, 5.], '$\\beta$ [1]']]
VARS += [[Pcc, 'Pcc', [2, 17.], 'proton density [#/cc]']]
VARS += [[Temp, 'Temp', [1e4, 4e5], 'Temp [K]']]
VARS += [[alphar, 'AlphaRatio', [1e-3, 0.1], 'alpha ratio [1]']]
self.nvars = len(VARS)
#---------
#nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#fgap = 0.2 # fraccion de gap que tolero
# nEnough: nmbr of events aporting good data in 80% of the window
self.aux = aux = {}
aux['SELECC'] = self.SELECC
"""aux['BETW1998_2006'] = BETW1998_2006
aux['DURATION'] = DURATION
if wang_filter: aux['ThetaCond'] = ThetaCond
if vsw_filter: aux['SpeedCond'] = SpeedCond
if z_filter_on: aux['z_cond'] = z_cond
aux['dt_mc'] = dt_mc
aux['dt_sh'] = dt_sh"""
#---- SALIDA:
#self.VARS = VARS
#self.out = out
#self.aux = aux
#---- generar figuras y asciis de los perfiles promedio/mediana
def make_plots(self):
nBin = self.nBin
fgap = self.fgap
MCwant = self.FILTER['MCwant']
#dt_mc = self.aux['dt_mc']
#dt_sh = self.aux['dt_sh']
ThetaThres = self.CUTS['ThetaThres']
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#-------------------- prefijos:
# prefijo para filtro Wang:
#WangFlag = wangflag(ThetaThres) #'NaN' #wangflag(ThetaThres)
if self.FILTER['wang']:
WangFlag = str(ThetaThres)
else:
WangFlag = 'NaN'
# prefijo gral para los nombres de los graficos:
if self.FILTER['CorrShift']:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
# filtro z-expansion
if not(self.FILTER['z_filter_on']):
z_lo = z_hi = 0.0 # estos valores significan q no hay filtro por z
# filtro por Vmc (veloc del MC)
if not(self.FILTER['vsw_filter']):
v_lo = v_hi = 0.0 # estos valores significan q no hay filtro por Vmc
#-------------------------------
# nombres genericos...
DIR_FIGS = '%s/MCflag%s/%s' % (self.dir_plots, MCwant['alias'], prexShift)
DIR_ASCII = '%s/MCflag%s/%s' % (self.dir_ascii, MCwant['alias'], prexShift)
os.system('mkdir -p %s' % DIR_FIGS)
os.system('mkdir -p %s' % DIR_ASCII)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant['alias'], nBin['before'], nBin['after'], fgap)
FNAMEs += '_Wang%s' % (WangFlag)
FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (v_lo, v_hi)
FNAMEs += '_zlo.%2.2f.zhi.%2.2f' % (z_lo, z_hi)
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
fname_nro = DIR_ASCII+'/'+'n.events_'+FNAMEs+'.txt'
fnro = open(fname_nro, 'w')
#--------------------------------------------------------------------------------
nvars = len(self.VARS)
for i in range(nvars):
fname_fig = '%s_%s.png' % (FNAME_FIGS, self.VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
varname = self.VARS[i][1]
ylims = self.VARS[i][2]
ylabel = self.VARS[i][3]
mediana = self.out['dVARS'][i][4]
average = self.out['dVARS'][i][3]
std_err = self.out['dVARS'][i][5]
nValues = self.out['dVARS'][i][6] # nmbr of good values aporting data
#binsPerTimeUnit = nbin #nbin/(1+nbefore+nafter)
N_selec = self.out['dVARS'][i][0]
N_final = self.out['nEnough'][i] #nEnough[i]
SUBTITLE = '# of selected events: %d \n\
events w/80%% of data: %d \n\
bins per time unit: %d \n\
MCflag: %s \n\
WangFlag: %s' % (N_selec, N_final, nBin['bins_per_utime'], MCwant['alias'], WangFlag)
makefig(mediana, average, std_err, nValues, self.out['tnorm'], SUBTITLE,
ylims, ylabel, fname_fig)
fdataout = '%s_%s.txt' % (FNAME_ASCII, self.VARS[i][1])
dataout = np.array([self.out['tnorm'] , mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
np.savetxt(fdataout, dataout.T, fmt='%12.5f')
#-------- grabamos nro de eventos selecc para esta variable
line = '%s %d %d\n' % (varname, N_final, N_selec)
fnro.write(line)
print ccl.Rn + " --> nro de eventos seleccionados: " + fname_nro + ccl.W
fnro.close()
#--- salidas (a parte de los .png)
self.DIR_ASCII = DIR_ASCII
self.FNAMEs = FNAMEs
#---- construye archivo q contiene cosas de los eventos seleccionados:
# - valores medios de los observables (B, Vsw, Temp, beta, etc)
# - los IDs de los eventos
# - duracion de los MCs y las sheaths
def build_params_file(self):
DIR_ASCII = self.DIR_ASCII
FNAMEs = self.FNAMEs
#---------------------------------------------- begin: NC_FILE
print "\n**************************************** begin: NC_FILE"
#------- generamos registro de id's de los
# eventos q entraron en los promedios.
# Nota: un registro por variable.
fname_out = DIR_ASCII+'/'+'_stuff_'+FNAMEs+'.nc' #'./test.nc'
fout = netcdf_file(fname_out, 'w')
print "\n ----> generando: %s\n" % fname_out
IDs = self.out['IDs']
for i in range(len(self.VARS)):
varname = self.VARS[i][1]
print " ----> " + varname
n_events = len(IDs[varname])
dimname = 'nevents_'+varname
fout.createDimension(dimname, n_events)
prom = self.out['dVARS'][i][7]
cc = np.isnan(prom)
prom = prom[~cc]
dims = (dimname,)
write_variable(fout, varname, dims,
prom, 'd', 'average_values per event')
#---------- IDs de esta variable
ids = map(int, IDs[varname])
vname = 'IDs_'+varname
write_variable(fout, vname, dims, ids, 'i',
'event IDs that enter in this parameter average')
#---------- duracion de la estructura
dtsh = np.zeros(len(ids))
dtmc = np.zeros(len(ids))
for i in range(len(ids)):
id = ids[i]
dtsh[i] = self.dt_sh[id]
dtmc[i] = self.dt_mc[id]
vname = 'dt_sheath_'+varname
write_variable(fout, vname, dims, dtsh, 'd', '[days]')
vname = 'dt_mc_'+varname
write_variable(fout, vname, dims, dtmc, 'd', '[days]')
fout.close()
print "**************************************** end: NC_FILE"
#---------------------------------------------- end: NC_FILE
def filter_events(self):
tb = self.tb
FILTER = self.FILTER
ThetaThres = self.CUTS['ThetaThres']
dTday = self.CUTS['dTday']
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
day = 86400.
#------------------------------------ EVENTS's PARAMETERS
#MCsig = array(f_events.variables['MC_sig'].data) # 2,1,0: MC, rotation, irregular
#Vnsh = array(f_events.variables['wang_Vsh'].data) # veloc normal del shock
ThetaSh = np.array(self.f_events.variables['wang_theta_shock'].data) # orientacion de la normal del shock
i_V = np.array(self.f_events.variables['i_V'].data) # velocidad de icme
#------------------------------------
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++ begin: SELECCION DE EVENTOS ++++++++++++++++++++++
#------- fechas
BETW1998_2006 = np.ones(tb.n_icmes, dtype=bool)
for i in range(307, tb.n_icmes)+range(0, 26):
BETW1998_2006[i]=False # 'False' para excluir eventos
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MC_FLAG = np.ones(tb.n_icmes, dtype=bool)
for i in range(tb.n_icmes):
MC_FLAG[i] = tb.MCsig[i] in FILTER['MCwant']['flags']
#------- excluimos eventos de 2MCs
EVENTS_with_2MCs= (26, 148, 259, 295)
MCmultiple = FILTER['Mcmultiple'] #False #True para incluir eventos multi-MC
MCmulti = np.ones(tb.n_icmes, dtype=bool) # False para eventos multi-MC (SI, escribi bien)
if(~FILTER['Mcmultiple']):
for i in EVENTS_with_2MCs:
MCmulti[i] &= False
#------- orientacion del shock (catalogo Wang)
if FILTER['wang']:
ThetaCond = thetacond(ThetaThres, ThetaSh)
#------- duration of sheaths
self.dt_mc = diff_dates(tb.tend_mc, tb.tini_mc)/day # [day]
self.dt_sh = diff_dates(tb.tini_mc, tb.tshck)/day # [day]
dt = diff_dates(self.bd.tend, self.bd.tini)/day
DURATION = dt > dTday # sheaths>0
#------- speed of icmes
if (FILTER['vsw_filter']) & (v_lo<v_hi):
SpeedCond = (i_V>=v_lo) & (i_V<v_hi)
#------- z expansion (a. gulisano)
z_exp = self.z_exp
if (FILTER['z_filter_on']) & (z_lo<z_hi):
z_cond = (z_exp>=z_lo) & (z_exp<z_hi)
#------- filtro total
SELECC = np.ones(tb.n_icmes, dtype=bool)
SELECC &= BETW1998_2006 # nos mantenemos en este periodo de anios
SELECC &= MCmulti # nubes multiples
SELECC &= MC_FLAG # catalogo de nubes
SELECC &= DURATION # no queremos sheaths q duran 1hr xq solo aportan ruido
if FILTER['wang']: SELECC &= ThetaCond # cerca a 180 es nariz del shock
if FILTER['vsw_filter']: SELECC &= SpeedCond
if FILTER['z_filter_on']: SELECC &= z_cond # para desactivar este filtro, comentar esta linea
"""print "+++ eventos +++++++++++++++++++++++++++++++++++++++"
for i in range(tb.n_icmes):
if SELECC[i]:
print i
raw_input()"""
self.SELECC = SELECC
self.n_SELECC = len(find(SELECC))
#+++++++++++++++++ end: SELECCION DE EVENTOS +++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if self.n_SELECC<=0:
print " --------> FATAL ERROR!!!: self.n_SELECC=<0"
print " exiting....... \n"
raise SystemExit
##
|
jimsrc/seatos
|
shared_lib/shared_funcs_ii.py
|
Python
|
mit
| 31,654
|
[
"NetCDF"
] |
1615f76fa8cc347ee29c687480a2b435d7ed1d4b40ae17bb5356a748ccc09556
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Fireworks"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
p = Fireworks()
# p = Explosion()
# p = Fire()
# p = Flower()
# p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,100)
self.add( p )
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
|
shadowmint/nwidget
|
lib/cocos2d-0.5.5/test/test_particle_fireworks.py
|
Python
|
apache-2.0
| 956
|
[
"Galaxy"
] |
a03e973e224d67b26ced6479db18fad90487c0b18812f252c7caadef1cbcba51
|
# stardard library
import os
import json
import cPickle as pck
import itertools as it
# external libraries
import numpy as np
from gala import imio
def add_anything(a, b):
return a + b
def write_synapse_to_vtk(neurons, coords, fn, im=None, margin=None):
"""Output neuron shapes around pre- and post-synapse coordinates.
The coordinate array is a (n+1) x m array, where n is the number of
post-synaptic sites (fly neurons are polyadic) and m = neurons.ndim, the
number of dimensions of the image.
"""
neuron_ids = neurons[zip(*coords)]
mean_coords = coords.mean(axis=0).astype(np.uint)
neurons = get_box(neurons, mean_coords, margin)
synapse_volume = reduce(add_anything,
[(i+1)*(neurons==j) for i, j in enumerate(neuron_ids)])
imio.write_vtk(synapse_volume.astype(np.uint8), fn)
if im is not None:
im = get_box(im, mean_coords, margin)
imio.write_vtk(im,
os.path.join(os.path.dirname(fn), 'image.' + os.path.basename(fn)))
def all_sites(synapses):
return list(it.chain(*tbar_post_pairs_to_arrays(synapses)))
def all_postsynaptic_sites(synapses):
tbars, posts = zip(*synapses)
return list(it.chain(*posts))
def get_box(a, coords, margin):
"""Obtain a box of size 2*margin+1 around coords in array a.
Boxes close to the boundary are trimmed accordingly.
"""
if margin is None:
return a
coords = np.array(coords)[np.newaxis, :].astype(int)
origin = np.zeros(coords.shape, dtype=int)
shape = np.array(a.shape)[np.newaxis, :]
topleft = np.concatenate((coords-margin, origin), axis=0).max(axis=0)
bottomright = np.concatenate((coords+margin+1, shape), axis=0).min(axis=0)
box = [slice(top, bottom) for top, bottom in zip(topleft, bottomright)]
return a[box].copy()
def tbar_post_pairs_to_arrays(pairs):
return [np.concatenate((t[np.newaxis, :], p), axis=0) for t, p in pairs]
def volume_synapse_view(pairs, shape):
v = np.zeros(shape, int)
for i, (pre, post) in enumerate(pairs):
coords = np.concatenate((pre[np.newaxis, :], post), axis=0)
coords = [coords[:, j] for j in range(coords.shape[1])]
for j in range(len(coords)):
if coords[j][0] < 0:
coords[j] = shape[j] + coords[j]
v[coords] = i+1
return v
def synapses_from_raveler_session_data(fn, output_format='pairs',
t=(2, 1, 0), s=(1, -1, 1), transform=True):
if not transform:
t = (0, 1, 2)
s = (1, 1, 1)
with open(fn) as f:
d = pck.load(f)
annots = d['annotations']['point']
tbars = [a for a in annots if annots[a]['kind'] == 'T-bar']
posts = [annots[a] for a in tbars]
posts = [eval(p['value'].replace('false', 'False').replace('true', 'True'))
for p in posts]
posts = [p['partners'] for p in posts]
posts = [map(lambda x: x[0], p) for p in posts]
tbars = [coord_transform(tbar, t, s) for tbar in tbars]
posts = [coord_transform(post, t, s) for post in posts]
pairs = zip(tbars, posts)
if output_format == 'pairs':
return pairs
elif output_format == 'arrays':
return tbar_post_pairs_to_arrays(pairs)
def raveler_synapse_annotations_to_coords(fn, output_format='pairs',
t=(2, 1, 0), s=(1, -1, 1), transform=True):
"""Obtain pre- and post-synaptic coordinates from Raveler annotations."""
if not transform:
t = (0, 1, 2)
s = (1, 1, 1)
with open(fn, 'r') as f:
syns = json.load(f)['data']
tbars = [coord_transform(syn['T-bar']['location'], t, s) for syn in syns]
posts = [coord_transform([p['location'] for p in syn['partners']], t, s)
for syn in syns]
pairs = zip(tbars, posts)
if output_format == 'pairs':
return pairs
elif output_format == 'arrays':
return tbar_post_pairs_to_arrays(pairs)
def coord_transform(coords, t=(2, 1, 0), s=(1, -1, 1)):
coords = np.array(coords)
s = np.array(s)
return coords[..., t]*s - (s == -1)
def write_all_synapses_to_vtk(neurons, list_of_coords, fn, im, margin=None,
single_pairs=True):
for i, coords in enumerate(list_of_coords):
if single_pairs:
pre = coords[0]
for j, post in enumerate(coords[1:]):
pair_coords = np.concatenate(
(pre[np.newaxis, :], post[np.newaxis, :]), axis=0)
cfn = fn%(i, j)
write_synapse_to_vtk(neurons, pair_coords, cfn, im, margin)
else:
cfn = fn%i
write_synapse_to_vtk(neurons, coords, cfn, im, margin)
|
janelia-flyem/synapse-geometry
|
syngeo/io.py
|
Python
|
bsd-3-clause
| 4,730
|
[
"NEURON"
] |
d4bfc9437c1a897f545670694f7bd7b87b38407d88ac04a978ba78c03bfb963f
|
import os
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
CYTHON_COMPILER_DIRECTIVES = {
"language_level": 3,
}
CYTHON_EXTENSION_INCLUDES = ['.', np.get_include()]
CYTHON_EXTENSION_MODULES = [
Extension('pycalphad.core.hyperplane', sources=['pycalphad/core/hyperplane.pyx']),
Extension('pycalphad.core.eqsolver', sources=['pycalphad/core/eqsolver.pyx']),
Extension('pycalphad.core.phase_rec', sources=['pycalphad/core/phase_rec.pyx']),
Extension('pycalphad.core.composition_set', sources=['pycalphad/core/composition_set.pyx']),
Extension('pycalphad.core.minimizer', sources=['pycalphad/core/minimizer.pyx']),
]
setup(
name='pycalphad',
author='Richard Otis',
author_email='richard.otis@outlook.com',
description='CALPHAD tools for designing thermodynamic models, calculating phase diagrams and investigating phase equilibria.',
# Do NOT include pycalphad._dev here. It is for local development and should not be distributed.
packages=['pycalphad', 'pycalphad.codegen', 'pycalphad.core', 'pycalphad.io', 'pycalphad.plot', 'pycalphad.plot.binary', 'pycalphad.tests'],
ext_modules=cythonize(
CYTHON_EXTENSION_MODULES,
include_path=CYTHON_EXTENSION_INCLUDES,
compiler_directives=CYTHON_COMPILER_DIRECTIVES,
),
package_data={
'pycalphad/core': ['*.pxd'],
},
# This include is for the compiler to find the *.h files during the build_ext phase
# the include must contain a symengine directory with header files
include_dirs=[np.get_include()],
license='MIT',
long_description=read('README.rst'),
long_description_content_type='text/x-rst',
url='https://pycalphad.org/',
install_requires=[
# NOTE: please try to keep any depedencies in alphabetic order so they
# may be easily compared with other dependency lists
# NOTE: these dependencies may differ in name from those in the
# conda-forge Anaconda channel. For example, conda-forge/symengine
# gives the C++ SymEngine library, while conda-forge/python-symengine
# provides the Python package called `symengine`.
'Cython>=0.24',
'importlib_metadata', # drop when pycalphad drops support for Python<3.8
'matplotlib>=3.3',
'numpy>=1.13',
'pyparsing>=2.4',
'pytest',
'pytest-cov',
'scipy',
'setuptools_scm[toml]>=6.0',
'symengine==0.7.2', # python-symengine on conda-forge
'sympy==1.8',
'tinydb>=3.8',
'xarray>=0.11.2',
],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Chemistry',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Supported Python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
tkphd/pycalphad
|
setup.py
|
Python
|
mit
| 3,682
|
[
"pycalphad"
] |
a7b30884ee277237e2f590163b67fd42596c1ba4432302caeeb63646d9e91758
|
import os
import sys
msg = "\nThe Agg rendering GTK matplotlib backend is missing or not installed properly.\n"
msg += "See http://matplotlib.org/faq/usage_faq.html#what-is-a-backend.\n"
msg += "Is the PYTHONPATH environment variable set correctly?\n"
msg += "Please verify your installation by running on the command line:\n"
msg += "python -c 'from matplotlib.backends import backend_gtkagg'\n"
msg += "\n"
msg += "This module is optional and required in order to use "
msg += "ASE's simple GUI (ase-gui).\n"
msg += "If you don't wish to use ase-gui ignore this error, otherwise\n"
msg += "please install the matplotlib package containing the missing backend\n"
msg += "using your distribution package manager, i.e.:\n"
msg += "\n"
msg += " Debian/Ubuntu: sudo apt-get python-matplotlib\n"
msg += "\n"
msg += " OpenSUSE: yast -i python-matplotlib-gtk\n"
msg += "\n"
msg += " Red Hat/Fedora: yum install python-matplotlib\n"
msg += "\n"
msg += "or perform manual installation, preferably as non-root user,\n"
msg += "following http://matplotlib.sourceforge.net/users/installing.html\n"
msg += "after installing the http://www.pygtk.org/downloads.html dependency first."
if locals().get('display'):
try:
import matplotlib.backends as b
f = os.path.join(os.path.dirname(b.__file__), '_gtkagg.so')
open(f).close()
from matplotlib.backends import backend_gtkagg
except ImportError:
print >> sys.stderr, msg
raise
except IOError:
print >> sys.stderr, ("\nThe backend file %s does not exist.\n" % f) + msg
raise
|
askhl/ase
|
ase/test/dependency_backend_gtkagg.py
|
Python
|
gpl-2.0
| 1,588
|
[
"ASE"
] |
8ead343b0c5a58edc0648afa70eae1e284aea02184fa59042da8505c3473dc5d
|
from __future__ import print_function
import numpy as np
from ase.io.aff import affopen as open
class A:
def write(self, writer):
writer.write(x=np.ones((2, 3)))
@staticmethod
def read(reader):
a = A()
a.x = reader.x
return a
w = open('a.aff', 'w')
w.write(a=A(), y=9)
w.write(s='abc')
w.sync()
w.write(s='abc2')
w.sync()
w.write(s='abc3', z=np.ones(7, int))
w.close()
print(w.data)
r = open('a.aff')
print(r.y, r.s)
print(A.read(r.a).x)
print(r.a.x)
print(r[1].s)
print(r[2].s)
print(r[2].z)
w = open('a.aff', 'a')
print(w.nitems, w.offsets)
w.write(d={'h': [1, 'asdf']})
w.add_array('psi', (4, 3))
w.fill(np.ones((1, 3)))
w.fill(np.ones((1, 3)) * 2)
w.fill(np.ones((2, 3)) * 3)
w.close()
print(open('a.aff', 'r', 3).d)
print(open('a.aff')[2].z)
print(open('a.aff', index=3).proxy('psi')[0:3])
for d in open('a.aff'):
print(d)
|
suttond/MODOI
|
ase/test/aff.py
|
Python
|
lgpl-3.0
| 886
|
[
"ASE"
] |
d6d169d54e9153eef2351e4d84f126ea8bf3c7a6ad4f633f6d193002e428158a
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import session, _
from frappe.utils import today
from erpnext.utilities.transaction_base import TransactionBase
class CustomerIssue(TransactionBase):
def validate(self):
if session['user'] != 'Guest' and not self.customer:
frappe.throw(_("Customer is required"))
if self.status=="Closed" and \
frappe.db.get_value("Customer Issue", self.name, "status")!="Closed":
self.resolution_date = today()
def on_cancel(self):
lst = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent = t1.name and t2.prevdoc_docname = %s and t1.docstatus!=2""",
(self.name))
if lst:
lst1 = ','.join([x[0] for x in lst])
frappe.throw(_("Cancel Material Visit {0} before cancelling this Customer Issue").format(lst1))
else:
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc, map_child_doc
def _update_links(source_doc, target_doc, source_parent):
target_doc.prevdoc_doctype = source_parent.doctype
target_doc.prevdoc_docname = source_parent.name
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
target_doc = get_mapped_doc("Customer Issue", source_name, {
"Customer Issue": {
"doctype": "Maintenance Visit",
"field_map": {}
}
}, target_doc)
source_doc = frappe.get_doc("Customer Issue", source_name)
if source_doc.get("item_code"):
table_map = {
"doctype": "Maintenance Visit Purpose",
"postprocess": _update_links
}
map_child_doc(source_doc, target_doc, table_map, source_doc)
return target_doc
|
indictranstech/focal-erpnext
|
support/doctype/customer_issue/customer_issue.py
|
Python
|
agpl-3.0
| 2,083
|
[
"VisIt"
] |
a48e6dbd88c9ee77520163cdc988fa65590a7826601e7d8cf8faec6184a6ac38
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Development script of the ChemEnv utility to get the explicit permutations for coordination environments identified
with the separation plane algorithms (typically with coordination numbers >= 6)
"""
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane, collinear
import numpy as np
import itertools
import json
if __name__ == '__main__':
# Choose the geometry
allcg = AllCoordinationGeometries()
while True:
cg_symbol = raw_input('Enter symbol of the geometry for which you want to get the explicit permutations : ')
try:
cg = allcg[cg_symbol]
break
except LookupError:
print('Wrong geometry, try again ...')
continue
# Check if the algorithm currently defined for this geometry corresponds to the explicit permutation algorithm
for algo in cg.algorithms:
if algo.algorithm_type != 'SEPARATION_PLANE':
raise ValueError('WRONG ALGORITHM !')
newalgos = []
ialgo = 1
for sepplanealgo in cg._algorithms:
print('In ialgo = {:d}/{:d}'.format(ialgo, len(cg._algorithms)))
ialgo += 1
if sepplanealgo.algorithm_type != 'SEPARATION_PLANE':
raise ValueError('Should all be separation plane')
permsonfile = 'Permutations on file in this algorithm ({:d}) '.format(len(sepplanealgo._permutations))
print(permsonfile)
print(sepplanealgo._permutations)
permutations = sepplanealgo.safe_separation_permutations(ordered_plane=sepplanealgo.ordered_plane,
ordered_point_groups=sepplanealgo.ordered_point_groups)
sepplanealgo._permutations = permutations
print('Test permutations ({:d}) :'.format(len(permutations)))
print(permutations)
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
lgf.setup_test_perfect_environment(cg_symbol, randomness=True, indices=range(cg.coordination_number), max_random_dist=0.05)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
# (csms, perms, sep_perms) = lgf.coordination_geometry_symmetry_measures_separation_plane_newpmg(coordination_geometry=cg,
# separation_plane_algo=sepplanealgo,
# testing=True)
# Setting up the plane of separation
local_plane = None
found = False
for npoints in range(sepplanealgo.minimum_number_of_points,
min(sepplanealgo.maximum_number_of_points, 4)+1):
if found:
break
for ipoints in itertools.combinations(sepplanealgo.plane_points, npoints):
points_combination = [lgf.local_geometry.coords[ipoint] for ipoint in ipoints]
if npoints == 2:
if collinear(points_combination[0], points_combination[1],
lgf.local_geometry.central_site, tolerance=0.25):
continue
local_plane = Plane.from_3points(points_combination[0], points_combination[1],
lgf.local_geometry.central_site)
found = True
break
elif npoints == 3:
if collinear(points_combination[0], points_combination[1], points_combination[2], tolerance=0.25):
continue
local_plane = Plane.from_3points(points_combination[0], points_combination[1], points_combination[2])
found = True
break
elif npoints > 3:
local_plane = Plane.from_npoints(points_combination, best_fit='least_square_distance')
found = True
break
else:
raise ValueError('Wrong number of points to initialize separation plane')
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
# Actual test of the permutations
cgsm = lgf._cg_csm_separation_plane(coordination_geometry=cg,
sepplane=sepplanealgo,
local_plane=local_plane,
plane_separations=[],
dist_tolerances=[0.05, 0.1, 0.2, 0.3],
testing=True,
points_perfect=points_perfect)
print(cgsm)
if cgsm[0] is None:
print('IS NONE !')
raw_input()
continue
csms, perms, algos, sep_perms = cgsm[0], cgsm[1], cgsm[2], cgsm[3]
print('Continuous symmetry measures')
print(csms)
csms_with_recorded_permutation = []
explicit_permutations = []
for icsm, csm in enumerate(csms):
found = False
for csm2 in csms_with_recorded_permutation:
if np.isclose(csm, csm2, rtol=0.0, atol=1.0e-6):
found = True
break
if not found:
print(perms[icsm], csm)
csms_with_recorded_permutation.append(csm)
explicit_permutations.append(sep_perms[icsm])
print(permsonfile)
print('Permutations found ({:d}) : '.format(len(explicit_permutations)))
print(explicit_permutations)
sepplanealgo.explicit_permutations = explicit_permutations
newalgos.append(sepplanealgo)
# Write update geometry file ?
test = raw_input('Save it ? ("y" to confirm)')
if test == 'y':
cg._algorithms = newalgos
cg_dict = cg.as_dict()
f = open('../coordination_geometries_files_new/{}.json'.format(cg_symbol), 'w')
json.dump(cg_dict, f)
f.close()
|
matk86/pymatgen
|
dev_scripts/chemenv/explicit_permutations_plane_algorithm.py
|
Python
|
mit
| 6,600
|
[
"pymatgen"
] |
54dd736e21dee2e377a2d9e98c0e43c29683694d6fc62119fc071e26f76c40bf
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIlluminaio(RPackage):
"""Parsing Illumina Microarray Output Files.
Tools for parsing Illumina's microarray output files, including IDAT."""
homepage = "https://bioconductor.org/packages/illuminaio"
git = "https://git.bioconductor.org/packages/illuminaio.git"
version('0.26.0', commit='40c2f94df2ea64d745d25aadd2bfb33ac3e02f81')
version('0.24.0', commit='47953c77713c2da00a610f39308f86c5b44f6c59')
version('0.22.0', commit='dbd842340999569975ea593f47d70a729b3f68f2')
version('0.20.0', commit='d226628133b2396be9e7a6bf043f0309bd70c4ec')
version('0.18.0', commit='e6b8ab1f8eacb760aebdb4828e9cfbf07da06eda')
depends_on('r-base64', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-illuminaio/package.py
|
Python
|
lgpl-2.1
| 929
|
[
"Bioconductor"
] |
f8f7e8157bdd371a0948f32ac1090a659132c1cabf22bdf741865f86729c0b3a
|
"""
Write a function that takes an unsigned integer and
returns the number of '1' bits it has
(also known as the Hamming weight).
For example, the 32-bit integer '11' has binary
representation 00000000000000000000000000001011,
so the function should return 3.
T(n)- O(k) : k is the number of 1s present in binary representation.
NOTE: this complexity is better than O(log n).
e.g. for n = 00010100000000000000000000000000
only 2 iterations are required.
Number of loops is
equal to the number of 1s in the binary representation."""
def count_ones_recur(n):
"""Using Brian Kernighan's Algorithm. (Recursive Approach)"""
if not n:
return 0
return 1 + count_ones_recur(n & (n-1))
def count_ones_iter(n):
"""Using Brian Kernighan's Algorithm. (Iterative Approach)"""
count = 0
while n:
n &= (n-1)
count += 1
return count
|
keon/algorithms
|
algorithms/bit/count_ones.py
|
Python
|
mit
| 878
|
[
"Brian"
] |
ba28e1f7d28752c96eccf0c22ac8a654ffbf6bd1d79cfc801d83ebf6c0ff63e0
|
"""High level summaries of samples and programs with MultiQC.
https://github.com/ewels/MultiQC
"""
import collections
import glob
import json
import mimetypes
import os
import pandas as pd
import shutil
import numpy as np
import pybedtools
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.bam import ref
from bcbio.structural import annotate
from bcbio.variation import bedutils
from bcbio.qc.variant import get_active_vcinfo
from bcbio.upload import get_all_upload_paths_from_sample
def summary(*samples):
"""Summarize all quality metrics together"""
samples = list(utils.flatten(samples))
work_dir = dd.get_work_dir(samples[0])
multiqc = config_utils.get_program("multiqc", samples[0]["config"])
if not multiqc:
logger.debug("multiqc not found. Update bcbio_nextgen.py tools to fix this issue.")
out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "multiqc"))
out_data = os.path.join(out_dir, "multiqc_data")
out_file = os.path.join(out_dir, "multiqc_report.html")
file_list = os.path.join(out_dir, "list_files.txt")
work_samples = [cwlutils.unpack_tarballs(utils.deepish_copy(x), x) for x in samples]
work_samples = _report_summary(work_samples, os.path.join(out_dir, "report"))
if not utils.file_exists(out_file):
with tx_tmpdir(samples[0], work_dir) as tx_out:
in_files = _get_input_files(work_samples, out_dir, tx_out)
in_files += _merge_metrics(work_samples, out_dir)
if _one_exists(in_files):
with utils.chdir(out_dir):
_create_config_file(out_dir, work_samples)
input_list_file = _create_list_file(in_files, file_list)
if dd.get_tmp_dir(samples[0]):
export_tmp = "export TMPDIR=%s &&" % dd.get_tmp_dir(samples[0])
else:
export_tmp = ""
path_export = utils.local_path_export()
other_opts = config_utils.get_resources("multiqc", samples[0]["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts])
cmd = "{path_export}{export_tmp} {multiqc} -f -l {input_list_file} {other_opts} -o {tx_out}"
do.run(cmd.format(**locals()), "Run multiqc")
if utils.file_exists(os.path.join(tx_out, "multiqc_report.html")):
shutil.move(os.path.join(tx_out, "multiqc_report.html"), out_file)
shutil.move(os.path.join(tx_out, "multiqc_data"), out_data)
out = []
for i, data in enumerate(_group_by_samplename(samples)):
if i == 0:
if utils.file_exists(out_file):
data_files = glob.glob(os.path.join(out_dir, "multiqc_data", "*.txt"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.bed"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.txt"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.tsv"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.yaml"))
data_files += glob.glob(os.path.join(out_dir, "report", "*.R*"))
data_files += glob.glob(os.path.join(out_dir, "multiqc_config.yaml"))
data_files.append(file_list)
if "summary" not in data:
data["summary"] = {}
data["summary"]["multiqc"] = {"base": out_file, "secondary": data_files}
file_list_final = _save_uploaded_file_list(samples, file_list, out_dir)
if file_list_final:
data["summary"]["multiqc"]["secondary"].append(file_list_final)
out.append([data])
return out
def _save_uploaded_file_list(samples, file_list_work, out_dir):
if not utils.file_exists(file_list_work):
return None
file_list_final = os.path.join(out_dir, "list_files_final.txt")
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return None
with open(file_list_work) as f:
paths = [l.strip() for l in f.readlines() if os.path.exists(l.strip())]
upload_paths = []
for path in paths:
if path in upload_path_mapping:
upload_path = upload_path_mapping[path]
upload_base = samples[0]["upload"]["dir"]
upload_relpath = os.path.relpath(upload_path, upload_base)
upload_paths.append(upload_relpath)
if not upload_paths:
return None
with open(file_list_final, "w") as f:
for path in upload_paths:
f.write(path + '\n')
return file_list_final
def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False
def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, basestring):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, basestring):
if os.path.exists(pfiles):
pfiles = [os.path.join(os.path.dirname(pfiles), x) for x in os.listdir(os.path.dirname(pfiles))]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(tx_out_dir, sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
# Back compatible -- to migrate to explicit specifications in input YAML
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files)))
def _in_temp_directory(f):
return any(x.startswith("tmp") for x in f.split("/"))
def _group_by_samplename(samples):
"""Group samples split by QC method back into a single sample.
"""
out = collections.defaultdict(list)
for data in samples:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
out[(dd.get_sample_name(data), dd.get_align_bam(data), batch)].append(data)
return [xs[0] for xs in out.values()]
def _create_list_file(paths, out_file):
with open(out_file, "w") as f:
for path in paths:
f.write(path + '\n')
return out_file
def _create_config_file(out_dir, samples):
"""Provide configuration file hiding duplicate columns.
Future entry point for providing top level configuration of output reports.
"""
out_file = os.path.join(out_dir, "multiqc_config.yaml")
out = {"table_columns_visible": dict()}
# Avoid duplicated bcbio columns with qualimap
if any(("qualimap" in dd.get_tools_on(d) or "qualimap_full" in dd.get_tools_on(d)) for d in samples):
out["table_columns_visible"]["bcbio"] = {"Average_insert_size": False}
out["table_columns_visible"]["FastQC"] = {"percent_gc": False}
# Setting the module order
module_order = []
module_order.extend([
"bcbio",
"samtools",
"goleft_indexcov"
])
out['bcftools'] = {'write_separate_table': True}
# if germline calling was performed:
if any("germline" in (get_active_vcinfo(s) or {}) # tumor-only somatic with germline extraction
or dd.get_phenotype(s) == "germline" # or paired somatic with germline calling for normal
for s in samples):
# Split somatic and germline variant stats into separate multiqc submodules,
# with somatic going into General Stats, and germline going into a separate table:
module_order.extend([{
'bcftools': {
'name': 'Bcftools (somatic)',
'info': 'Bcftools stats for somatic variant calls only.',
'path_filters': ['*_bcftools_stats.txt'],
'write_general_stats': True,
}},
{'bcftools': {
'name': 'Bcftools (germline)',
'info': 'Bcftools stats for germline variant calls only.',
'path_filters': ['*_bcftools_stats_germline.txt'],
'write_general_stats': False
}},
])
else:
module_order.append("bcftools")
module_order.extend([
"picard",
"qualimap",
"snpeff",
"fastqc",
"preseq",
])
out["module_order"] = module_order
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
out["preseq"] = _make_preseq_multiqc_config(preseq_samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _check_multiqc_input(path):
"""Check if file exists, and return empty if it doesn't"""
if utils.file_exists(path):
return path
# ## report and coverage
def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True
def _report_summary(samples, out_dir):
"""
Run coverage report with bcbiocov package
"""
try:
import bcbreport.prepare as bcbreport
except ImportError:
logger.info("skipping report. No bcbreport installed.")
return samples
work_dir = dd.get_work_dir(samples[0])
parent_dir = utils.safe_makedir(out_dir)
with utils.chdir(parent_dir):
logger.info("copy qsignature")
qsignature_fn = os.path.join(work_dir, "qc", "qsignature", "qsignature.ma")
if qsignature_fn: # this need to be inside summary/qc dict
if utils.file_exists(qsignature_fn) and not utils.file_exists("qsignature.ma"):
shutil.copy(qsignature_fn, "bcbio_qsignature.ma")
out_dir = utils.safe_makedir("fastqc")
logger.info("summarize fastqc")
with utils.chdir(out_dir):
_merge_fastqc(samples)
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
samples = _merge_target_information(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
out_dir = utils.safe_makedir("preseq")
logger.info("summarize preseq")
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
out_dir = utils.safe_makedir("coverage")
logger.info("summarize coverage")
for data in samples:
pfiles = tz.get_in(["summary", "qc", "coverage"], data, [])
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles["secondary"]
elif pfiles:
pfiles = [pfiles]
for fn in pfiles:
if os.path.basename(fn).find("coverage_fixed") > -1:
utils.copy_plus(fn, os.path.join(out_dir, os.path.basename(fn)))
out_dir = utils.safe_makedir("variants")
logger.info("summarize variants")
for data in samples:
pfiles = tz.get_in(["summary", "qc", "variants"], data, [])
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles["secondary"]
elif pfiles:
pfiles = [pfiles]
for fn in pfiles:
if os.path.basename(fn).find("gc-depth-parse.tsv") > -1:
utils.copy_plus(fn, os.path.join(out_dir, os.path.basename(fn)))
bcbreport.report(parent_dir)
out_report = os.path.join(parent_dir, "qc-coverage-report.html")
if not utils.file_exists(out_report):
rmd_file = os.path.join(parent_dir, "report-ready.Rmd")
run_file = "%s-run.R" % (os.path.splitext(out_report)[0])
with open(run_file, "w") as out_handle:
out_handle.write("""library(rmarkdown)\nrender("%s")\n""" % rmd_file)
# cmd = "%s %s" % (utils.Rscript_cmd(), run_file)
# Skip automated generation of coverage report to avoid error
# messages. We need to generalize coverage reporting and re-include.
# try:
# do.run(cmd, "Prepare coverage summary", log_error=False)
# except subprocess.CalledProcessError as msg:
# logger.info("Skipping generation of coverage report: %s" % (str(msg)))
if utils.file_exists("report-ready.html"):
shutil.move("report-ready.html", out_report)
return samples
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
def _add_disambiguate(sample):
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
sample["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
sample["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
sample["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return sample
def _fix_duplicated_rate(dt):
"""Get RNA duplicated rate if exists and replace by samtools metric"""
if "Duplication_Rate_of_Mapped" in dt:
dt["Duplicates_pct"] = 100.0 * dt["Duplication_Rate_of_Mapped"]
return dt
def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, basestring):
m = json.loads(m)
if m:
for me in m.keys():
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
dt['rRNA_rate'] = m.get('rRNA_rate', "NA")
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out
def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples
def _merge_preseq(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
real_counts_file = os.path.abspath(os.path.join("preseq_real_counts.txt"))
with file_transaction(samples[0], real_counts_file) as tx_out_file:
with open(tx_out_file, "w") as f:
for s, m in zip(samples, metrics):
line = dd.get_sample_name(s) + "\t" + str(m["Preseq_read_count"])
if m.get("Preseq_unique_count") is not None:
line += "\t" + str(m["Preseq_unique_count"])
line += "\n"
f.write(line)
samples[0]["summary"]["qc"]["preseq"]["secondary"] = [real_counts_file]
def _make_preseq_multiqc_config(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
out = {"read_length": float(np.median([m["Preseq_read_length"] for m in metrics]))}
genome_sizes = list(set(m["Preseq_genome_size"] for m in metrics))
if len(genome_sizes) == 1:
out["genome_size"] = genome_sizes[0]
return out
def _merge_target_information(samples):
metrics_dir = utils.safe_makedir("metrics")
out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
if utils.file_exists(out_file):
return samples
genomes = set(dd.get_genome_build(data) for data in samples)
coverage_beds = set(dd.get_coverage(data) for data in samples)
original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)
data = samples[0]
info = {}
# Reporting in MultiQC only if the genome is the same across all samples
if len(genomes) == 1:
info["genome_info"] = {
"name": dd.get_genome_build(data),
"size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
}
# Reporting in MultiQC only if the target is the same across all samples
vcr_orig = None
if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
vcr_orig = list(original_variant_regions)[0]
vcr_clean = bedutils.clean_file(vcr_orig, data)
info["variants_regions_info"] = {
"bed": vcr_orig,
"size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
"regions": pybedtools.BedTool(vcr_clean).count(),
}
gene_num = annotate.count_genes(vcr_clean, data)
if gene_num is not None:
info["variants_regions_info"]["genes"] = gene_num
else:
info["variants_regions_info"] = {
"bed": "callable regions",
}
# Reporting in MultiQC only if the target is the same across samples
if len(coverage_beds) == 1:
cov_bed = list(coverage_beds)[0]
if cov_bed not in [None, "None"]:
if vcr_orig and vcr_orig == cov_bed:
info["coverage_bed_info"] = info["variants_regions_info"]
else:
clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
info["coverage_bed_info"] = {
"bed": cov_bed,
"size": pybedtools.BedTool(cov_bed).total_coverage(),
"regions": pybedtools.BedTool(clean_bed).count(),
}
gene_num = annotate.count_genes(clean_bed, data)
if gene_num is not None:
info["coverage_bed_info"]["genes"] = gene_num
else:
info["coverage_bed_info"] = info["variants_regions_info"]
coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
if len(coverage_intervals) == 1:
info["coverage_interval"] = list(coverage_intervals)[0]
if info:
with open(out_file, "w") as out_handle:
yaml.safe_dump(info, out_handle)
return samples
|
biocyberman/bcbio-nextgen
|
bcbio/qc/multiqc.py
|
Python
|
mit
| 22,238
|
[
"HTSeq"
] |
57cccfbc790bc4d05d57cf0b410f4e4d31d01020169ff527d28fe19e4bddb9e6
|
#!/usr/bin/env python
#20-11-2017
#Authors:Sebastian ECHEVERRI RESTREPO,
# sebastian.echeverri.restrepo@skf.com, sebastianecheverrir@gmail.com
# James EWEN
# j.ewen14@imperial.ac.uk, jimmyewen@gmail.com
#################################################################################3
# This file contains a function to generate the rough Fe surfaces. The
# algorithm used is based on Tribol Lett 2011, 44, 279-85 and
# http://doi.org/10.1007/978-3-642-84574-1_34
#################################################################################3
from ase.spacegroup import crystal
import ase.io
import os
from ase import Atoms
from ase.visualize import view
from ase.build import surface
from ase import Atom
from random import randint
from random import shuffle
from ase.neighborlist import *
import numpy as np
import copy
import random
import math
def Rough(FractalLevels,RMSin,H,boxLenghtX,boxLenghtY,boxLenghtZ,aFe,Separation):
#converting the box lenght to angstroms
boxLenghtXAngs=boxLenghtX*aFe
boxLenghtYAngs=boxLenghtY*aFe
boxLenghtZAngs=boxLenghtZ*aFe
#mean for the random number generator
mu=0.0
scaleFactorK=1.5*RMSin
#####################################################################
#Fractal Section For Bulk Fe
#####################################################################
print '#######################################'
print 'Generating the Fractal surface for bulk Fe'
class Point:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
# Create an array
PointsArray = [[0.0 for x in range(0,int(math.sqrt(4**FractalLevels)+1))]
for y in range(0,int(math.sqrt(4**FractalLevels)+1))]
xSize=len(PointsArray)
ySize=len(PointsArray[0])
#generatig the corner points
zCorner=0.0 #random.gauss(mu,scaleFactorK*2**(-(1)*H))
PointsArray[0][0] =Point(0.0,0.0,zCorner)
PointsArray[xSize-1][0] =Point(boxLenghtXAngs,0.0,zCorner)
PointsArray[0][ySize-1] =Point(0.0,boxLenghtYAngs,zCorner)
PointsArray[xSize-1][ySize-1] =Point(boxLenghtXAngs,boxLenghtYAngs,zCorner)
#Number of squares per side
NSideSquares=2**(FractalLevels)
RMS=0.0
while (RMS > 1.05*RMSin or RMS < 0.95*RMSin):
#Fractal cycle
for level in range(0, FractalLevels):
deltaSideSquares=2**(FractalLevels-level)
#calculate the central point
for i in range(0,NSideSquares,deltaSideSquares):
for j in range(0,NSideSquares,deltaSideSquares):
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2]=Point((PointsArray[i][j].x+PointsArray[i+deltaSideSquares][j].x)/2,
(PointsArray[i][j].y+PointsArray[i][j+deltaSideSquares].y)/2,
(PointsArray[i][j].z+PointsArray[i][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#calculate the sides
for i in range(0,NSideSquares,deltaSideSquares):
for j in range(0,NSideSquares,deltaSideSquares):
#point 0
if i==0:
PointsArray[i][j+deltaSideSquares/2]=Point((i)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i-deltaSideSquares/2-1][j+deltaSideSquares/2].z+PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
else:
PointsArray[i][j+deltaSideSquares/2]=Point((i)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i-deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 1
if j==0:
PointsArray[i+deltaSideSquares/2][j]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j-deltaSideSquares/2-1].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
else:
PointsArray[i+deltaSideSquares/2][j]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j-deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 2
if i+deltaSideSquares== xSize-1:
PointsArray[i+deltaSideSquares][j+deltaSideSquares/2]=Point((i+deltaSideSquares)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
PointsArray[0][j+deltaSideSquares/2].z)
else:
PointsArray[i+deltaSideSquares][j+deltaSideSquares/2]=Point((i+deltaSideSquares)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+3*deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 3
if j+deltaSideSquares==ySize-1:
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares)*boxLenghtYAngs/NSideSquares,
PointsArray[i+deltaSideSquares/2][0].z)#copy.deepcopy(PointsArray[i+deltaSideSquares/2][j].z))
else:
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares/2][j+3*deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#forcing the min value to be at z=0
#finding the min Z value
minZ=PointsArray[0][0].z
sumsq=0
npts=0
for i in range(0,xSize):
for j in range(0,ySize):
sumsq=(PointsArray[i][j].z-mu)**2+sumsq
if minZ>PointsArray[i][j].z:
minZ=PointsArray[i][j].z
RMS=math.sqrt(sumsq/(xSize*ySize))
print "RMS = ", RMS, "Angstrom"
for i in range(0,xSize):
for j in range(0,ySize):
PointsArray[i][j].z=PointsArray[i][j].z-minZ
#Making squares from the points
class Square:
def __init__(self, x0,y0,x1,y1,x2,y2,x3,y3,z):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.x3 = x3
self.y3 = y3
self.z = z
SquaresArray = [[0.0 for x in range(0,xSize-1)]
for y in range(0,ySize-1)]
for i in range(0,xSize-1):
for j in range(0,ySize-1):
SquaresArray[i][j]=Square(PointsArray[i][j].x, PointsArray[i][j].y,
PointsArray[i+1][j].x, PointsArray[i+1][j].y,
PointsArray[i+1][j+1].x, PointsArray[i+1][j+1].y,
PointsArray[i][j+1].x, PointsArray[i][j+1].y,
(PointsArray[i][j].z+PointsArray[i+1][j].z+PointsArray[i+1][j+1].z+PointsArray[i][j+1].z)/4)
#Plotting the squares and the points
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
x=[]
y=[]
z=[]
for i in range(0,xSize):
for j in range(0,ySize):
x.append(PointsArray[i][j].x)
y.append(PointsArray[i][j].y)
z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x+boxLenghtXAngs)
#y.append(PointsArray[i][j].y)
#z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x)
#y.append(PointsArray[i][j].y+boxLenghtYAngs)
#z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x+boxLenghtXAngs)
#y.append(PointsArray[i][j].y+boxLenghtYAngs)
#z.append(PointsArray[i][j].z)
#print '%5.3f,%5.3f,%5.3f \t\t'%(PointsArray[i][j].x,PointsArray[i][j].y,PointsArray[i][j].z ),
#print ""
#for i in range(0,xSize-1):
# for j in range(0,ySize-1):
# ax.plot([SquaresArray[i][j].x0,SquaresArray[i][j].x1,SquaresArray[i][j].x2,SquaresArray[i][j].x3, SquaresArray[i][j].x0 ],
# [SquaresArray[i][j].y0,SquaresArray[i][j].y1,SquaresArray[i][j].y2,SquaresArray[i][j].y3, SquaresArray[i][j].y0], 0)
#ax.scatter(x,y,z,c=z)
#ax.plot([1,1], [2,2], 0)
#ax.plot([SquaresArray[0][0].x0,SquaresArray[0][0].x1,SquaresArray[0][0].x2,SquaresArray[0][0].x3, SquaresArray[0][0].x0 ],
#[SquaresArray[0][0].y0,SquaresArray[0][0].y1,SquaresArray[0][0].y2,SquaresArray[0][0].y3,SquaresArray[0][0].y0], 0)
#ax.plot_trisurf(x, y, z, cmap=plt.cm.Spectral)
#ax.set_xlabel('X axis')
#ax.set_ylabel('Y axis')
#plt.show()
#####################################################################
#generating a bulk Fe crystal
print '#######################################'
print 'Generating the bulk Fe region'
atomsBulk = crystal(spacegroup=229,
symbols='Fe',
basis=[0,0,0],
cellpar=[aFe,aFe,aFe,90.0,90.0,90.0],
size=(boxLenghtX,boxLenghtY,boxLenghtZ))
#ase.io.write("FeBulk.cfg", atomsBulk, "cfg")
#os.system("atomsk FeBulk.cfg lmp >& /dev/null")
#os.system("mv FeBulk.lmp data.FeBulk")
#####################################################################
#Making the crystal surface rough
print '#######################################'
print 'Applying the roughness to the bulk Fe region'
atomsBulkRough = copy.deepcopy(atomsBulk)
for k in reversed(range(0, Atoms.get_number_of_atoms(atomsBulkRough))):
deleted=False
for i in range(0,xSize-1):
for j in range(0,ySize-1):
if atomsBulkRough[k].x>=SquaresArray[i][j].x0 and atomsBulkRough[k].x<SquaresArray[i][j].x1 and atomsBulkRough[k].y>=SquaresArray[i][j].y0 and atomsBulkRough[k].y<SquaresArray[i][j].y2 and atomsBulkRough[k].z<SquaresArray[i][j].z:
del atomsBulkRough[k]
deleted=True
break
if deleted==True:
break
#ase.io.write("RoughFeBulk.cfg", atomsBulkRough, "cfg")
#os.system("atomsk RoughFeBulk.cfg lmp >& /dev/null")
#os.system("mv RoughFeBulk.lmp data.RoughFeBulk")
#####################################################################
#Fractal Section For Bulk2 Fe
#####################################################################
print '#######################################'
print 'Generating the Fractal surface for Bulk2 Fe'
# Create an array
PointsArray = [[0.0 for x in range(0,int(math.sqrt(4**FractalLevels)+1))]
for y in range(0,int(math.sqrt(4**FractalLevels)+1))]
xSize=len(PointsArray)
ySize=len(PointsArray[0])
#generatig the corner points
zCorner=0.0#random.gauss(mu,scaleFactorK*2**(-(1)*H))
PointsArray[0][0] =Point(0.0,0.0,zCorner)
PointsArray[xSize-1][0] =Point(boxLenghtXAngs,0.0,zCorner)
PointsArray[0][ySize-1] =Point(0.0,boxLenghtYAngs,zCorner)
PointsArray[xSize-1][ySize-1] =Point(boxLenghtXAngs,boxLenghtYAngs,zCorner)
#Number of squares per side
NSideSquares=2**(FractalLevels)
RMS=0.0
while (RMS > 1.05*RMSin or RMS < 0.95*RMSin):
#Fractal cycle
for level in range(0, FractalLevels):
deltaSideSquares=2**(FractalLevels-level)
#calculate the central point
for i in range(0,NSideSquares,deltaSideSquares):
for j in range(0,NSideSquares,deltaSideSquares):
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2]=Point((PointsArray[i][j].x+PointsArray[i+deltaSideSquares][j].x)/2,
(PointsArray[i][j].y+PointsArray[i][j+deltaSideSquares].y)/2,
(PointsArray[i][j].z+PointsArray[i][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#calculate the sides
for i in range(0,NSideSquares,deltaSideSquares):
for j in range(0,NSideSquares,deltaSideSquares):
#point 0
if i==0:
PointsArray[i][j+deltaSideSquares/2]=Point((i)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i-deltaSideSquares/2-1][j+deltaSideSquares/2].z+PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
else:
PointsArray[i][j+deltaSideSquares/2]=Point((i)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i-deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 1
if j==0:
PointsArray[i+deltaSideSquares/2][j]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j-deltaSideSquares/2-1].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
else:
PointsArray[i+deltaSideSquares/2][j]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j].z+PointsArray[i+deltaSideSquares/2][j-deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 2
if i+deltaSideSquares== xSize-1:
PointsArray[i+deltaSideSquares][j+deltaSideSquares/2]=Point((i+deltaSideSquares)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
PointsArray[0][j+deltaSideSquares/2].z)
else:
PointsArray[i+deltaSideSquares][j+deltaSideSquares/2]=Point((i+deltaSideSquares)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares/2)*boxLenghtYAngs/NSideSquares,
(PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j].z+PointsArray[i+3*deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#point 3
if j+deltaSideSquares==ySize-1:
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares)*boxLenghtYAngs/NSideSquares,
PointsArray[i+deltaSideSquares/2][0].z)#copy.deepcopy(PointsArray[i+deltaSideSquares/2][j].z))
else:
PointsArray[i+deltaSideSquares/2][j+deltaSideSquares]=Point((i+deltaSideSquares/2)*boxLenghtXAngs/NSideSquares, (j+deltaSideSquares)*boxLenghtYAngs/NSideSquares,
(PointsArray[i][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares/2][j+deltaSideSquares/2].z+PointsArray[i+deltaSideSquares][j+deltaSideSquares].z+PointsArray[i+deltaSideSquares/2][j+3*deltaSideSquares/2].z)/4+
random.gauss(mu,scaleFactorK*2**(-(level+1)*H)))
#forcing the max value to be at z=boxLenghtZAngs
#finding the min Z value
maxZ=PointsArray[0][0].z
sumsq=0.0
for i in range(0,xSize):
for j in range(0,ySize):
sumsq=(PointsArray[i][j].z-mu)**2+sumsq
if maxZ<PointsArray[i][j].z:
maxZ=PointsArray[i][j].z
RMS=math.sqrt(sumsq/(xSize*ySize))
print "RMS = ", RMS, "Angstrom"
for i in range(0,xSize):
for j in range(0,ySize):
PointsArray[i][j].z=PointsArray[i][j].z+boxLenghtZAngs-maxZ
#Making squares from the points
SquaresArray = [[0.0 for x in range(0,xSize-1)]
for y in range(0,ySize-1)]
for i in range(0,xSize-1):
for j in range(0,ySize-1):
SquaresArray[i][j]=Square(PointsArray[i][j].x, PointsArray[i][j].y,
PointsArray[i+1][j].x, PointsArray[i+1][j].y,
PointsArray[i+1][j+1].x, PointsArray[i+1][j+1].y,
PointsArray[i][j+1].x, PointsArray[i][j+1].y,
(PointsArray[i][j].z+PointsArray[i+1][j].z+PointsArray[i+1][j+1].z+PointsArray[i][j+1].z)/4)
#Plotting the squares and the points
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
x=[]
y=[]
z=[]
for i in range(0,xSize):
for j in range(0,ySize):
x.append(PointsArray[i][j].x)
y.append(PointsArray[i][j].y)
z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x+boxLenghtXAngs)
#y.append(PointsArray[i][j].y)
#z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x)
#y.append(PointsArray[i][j].y+boxLenghtYAngs)
#z.append(PointsArray[i][j].z)
#x.append(PointsArray[i][j].x+boxLenghtXAngs)
#y.append(PointsArray[i][j].y+boxLenghtYAngs)
#z.append(PointsArray[i][j].z)
#print '%5.3f,%5.3f,%5.3f \t\t'%(PointsArray[i][j].x,PointsArray[i][j].y,PointsArray[i][j].z ),
#print ""
#for i in range(0,xSize-1):
# for j in range(0,ySize-1):
# ax.plot([SquaresArray[i][j].x0,SquaresArray[i][j].x1,SquaresArray[i][j].x2,SquaresArray[i][j].x3, SquaresArray[i][j].x0 ],
# [SquaresArray[i][j].y0,SquaresArray[i][j].y1,SquaresArray[i][j].y2,SquaresArray[i][j].y3, SquaresArray[i][j].y0], 0)
#ax.scatter(x,y,z,c=z)
#ax.plot([1,1], [2,2], 0)
#ax.plot([SquaresArray[0][0].x0,SquaresArray[0][0].x1,SquaresArray[0][0].x2,SquaresArray[0][0].x3, SquaresArray[0][0].x0 ],
#[SquaresArray[0][0].y0,SquaresArray[0][0].y1,SquaresArray[0][0].y2,SquaresArray[0][0].y3,SquaresArray[0][0].y0], 0)
#ax.plot_trisurf(x, y, z, cmap=plt.cm.Spectral)
#ax.set_xlabel('X axis')
#ax.set_ylabel('Y axis')
#plt.show()
#####################################################################
#using atomeye to generate the second bulk region
#print '#######################################'
#print 'Generating the Bulk2 Fe region'
#####################################################################
#Making the crystal surface rough
print '#######################################'
print 'Applying the roughness to the Bulk2 Fe region'
atomsBulk2Rough = copy.deepcopy(atomsBulk)
for k in reversed(range(0, Atoms.get_number_of_atoms(atomsBulk2Rough))):
deleted=False
for i in range(0,xSize-1):
for j in range(0,ySize-1):
if atomsBulk2Rough[k].x>=SquaresArray[i][j].x0 and atomsBulk2Rough[k].x<SquaresArray[i][j].x1 and atomsBulk2Rough[k].y>=SquaresArray[i][j].y0 and atomsBulk2Rough[k].y<SquaresArray[i][j].y2 and atomsBulk2Rough[k].z>SquaresArray[i][j].z:
del atomsBulk2Rough[k]
deleted=True
break
if deleted==True:
break
#ase.io.write("RoughFeBulk2.cfg", atomsBulk2Rough, "cfg")
#os.system("atomsk RoughFeBulk2.cfg lmp >& /dev/null")
#os.system("mv RoughFeBulk2.lmp data.RoughFeBulk2")
#Assembling the interface
print '#######################################'
print 'Assembling the Bulk1 and the Bulk2 Fe regions'
atomsBulk2Rough.center(vacuum=0, axis=2)
atomsBulkRough.center(vacuum=0, axis=2)
atomsBulk2Rough.translate([0,0,0])
atomsBulkRough.translate([0,0,boxLenghtZAngs+Separation])
atomsWEA=atomsBulk2Rough+atomsBulkRough
#atomsWEA.center(vacuum=50, axis=2)
#ase.io.write("WEA.cfg", atomsWEA, "cfg")
#print '#######################################'
#print 'Writing data file WEA.cfg'
#os.system("atomsk WEA.cfg lmp >& /dev/null")
#os.system("mv WEA.lmp data.WEA")
#############################################################
#############################################################
#############################################################
print '#######################################'
print 'Writing file WEA.lt for moltemplate'
#Printing the .lt file for moltemplate
f = open('WEA.lt', 'w')
f.write("FESurface inherits LOPLSAA {\n")
f.write("write(\"Data Atoms\") {\n")
for k in range(0, Atoms.get_number_of_atoms(atomsWEA)):
f.write("$atom:FE"+str(k)+" $mol:... @atom:100000 0.00 "+str(atomsWEA[k].x)+" "+str(atomsWEA[k].y)+" "+str(atomsWEA[k].z)+"\n")
f.write("} } \n")
f.close()
# $atom:FE1 $mol:... @atom:10000 0.00 0.000 0.000 0.000
|
JE1314/LAMMPS_builder
|
root/Rough.py
|
Python
|
gpl-3.0
| 20,277
|
[
"ASE",
"CRYSTAL"
] |
c1a68d4a553b5544b1bb979982672466ba3e4754545c593e45ede833d1f691db
|
# Django Imports
from django.db.models import F
from django.shortcuts import redirect, render
from django.core.exceptions import ObjectDoesNotExist
# Python Imports
import random, decimal
import datetime as time
# WaW Imports
from wawmembers.models import *
from wawmembers.forms import *
from wawmembers.loggers import aidlogger
import wawmembers.tasks as newtask
import wawmembers.display as display
import wawmembers.outcomes_policies as outcomes
import wawmembers.newsgenerator as news
import wawmembers.taskgenerator as taskdata
import wawmembers.utilities as utilities
import wawmembers.variables as v
'''
Everything that deals with one world interacting with another. Wars, attacks, spies, etc.
'''
D = decimal.Decimal
def stats_ind(request, userid):
'Displays information about other worlds.'
logged_in = request.user.is_authenticated()
if logged_in:
worlds = World.objects.select_related('preferences', 'alliance').filter(Q(pk=userid)|Q(user=request.user))
#1 query instead of 4
target = worlds.get(pk=userid)
world = worlds.get(user=request.user)
displayactions = True
else:
target = World.objects.select_related('preferences', 'alliance').get(pk=userid)
world = World() #anon user
if target.pk == world.pk:
return redirect('main') # redirect to your own page
# variable setup
message = ""
atwar = haswars = offlist = deflist = admin = None
warprotection = gdpprotection = targetprotection = None
peaceoffer = warfuelcost = raidcost = None
costforgeuaid = indefwar = None
nospies = spyintarget = spyform = spyintel = timeforintel = None
aidform = traderessend = sendtrade = receivetrade = None
defaultopen = 'domestic,economic,diplomacy,military'
lastseen = str(v.now() - target.lastloggedintime).split(',')[0]
alliance = target.alliance
# military levels setup
millevel = world.techlevel
displayactions = (True if logged_in else False)
try:
if world.preferences.vacation:
raise Exception # not allowed to take actions when you're on vacation
except:
pass
else:
defaultopen = world.statsopenprefs
# war status
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True):
atwar = True
war = War.objects.get(attacker=world, defender=target)
if war.peaceofferbyatk is not None:
peaceoffer = True
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True):
atwar = True
war = War.objects.get(attacker=target, defender=world)
if war.peaceofferbydef is not None:
peaceoffer = True
if abs(world.econsystem - target.econsystem) == 2:
costforgeuaid = True
if world.wardefender.count() > 0:
indefwar = True
# convenience admin links
if world.pk == 1:
admin = True
if request.method == 'POST' and logged_in:
form = request.POST
if "sendcomm" in form:
if SentComm.objects.filter(sender=world,
datetime__gte=v.now()-time.timedelta(seconds=v.delay)).count() > v.commlimit:
message = "Stop spamming"
else:
commdata = form['comm']
if len(commdata) > 500:
message = 'Maximum 500 characters!'
elif len(commdata) == 0:
message = 'You cannot send an empty comm!'
elif '<' in commdata or '>' in commdata:
message = 'The comm contains invalid characters!'
else:
Comm.objects.create(target=target, sender=world, content=commdata)
SentComm.objects.create(target=target, sender=world, content=commdata)
message = 'Communique sent.'
if "wardec" in form:
ownpower = utilities.totalpower(world)
targetpower = utilities.totalpower(target)
if target.preferences.vacation:
message = 'This world is in vacation mode. You cannot declare war on it.'
elif ownpower == 0:
message = 'You cannot declare war with no fleet power!'
elif atwar:
message = 'You are already at war with this world!'
elif target in world.declaredwars.all():
message = 'You have already warred this world recently!'
elif world.warsperturn == 3:
message = 'You have declared too many wars recently! Your logistics division needs time to organise the fleet.'
elif v.now() < (target.creationtime + time.timedelta(weeks=1)) and (target.noobprotect):
message = 'You cannot declare war on a world that has only just joined the galaxy!'
elif v.now() < target.warprotection:
message = 'Your generals refuse to attack this world which has recently lost a war! Their honour is at stake.'
elif (target.gdp < 0.75*(world.gdp)) and (v.now() > target.abovegdpprotection):
message = 'Your fleet refuses to attack such a puny world!'
elif target.wardefender.count() == 3:
message = 'Your fleet refuses to attack a world under such siege!'
elif world.warattacker.count() == 3:
message = 'Your fleet refuses to divide its attentions any further!'
elif ownpower < 0.1*targetpower:
message = 'Your fleet refuses to declare war on such a mighty enemy!'
elif len(form['warreason']) > 20:
message = 'Your war declaration message is too long.'
else:
endprotect = ''
aboveprotect = ''
warreason = form['warreason']
if v.now() < world.warprotection:
world.warprotection = v.now()
world.brokenwarprotect = v.now() + time.timedelta(days=1)
world.noobprotect = False
world.save(update_fields=['warprotection','brokenwarprotect','noobprotect'])
endprotect = '<br>Your war protection is now over, and you may not regain it for 24 hours!'
if target.gdp > 3 * world.gdp:
world.abovegdpprotection = v.now() + time.timedelta(days=5)
world.save(update_fields=['abovegdpprotection'])
aboveprotect = '<br>Other worlds of any GDP may attack you, for 5 days.'
War.objects.create(attacker=world, defender=target, reason=warreason)
htmldata = news.wardec(world,warreason)
NewsItem.objects.create(target=target, content=htmldata)
world.declaredwars.add(target)
action = {'warsperturn': {'action': 'add', 'amount': 1}}
utilities.atomic_world(world.pk, action)
message = 'You have declared WAR!' + endprotect + aboveprotect
if "attack" in form and atwar is None:
message = 'You are not at war with this world!'
elif "attack" in form:
form = attackform(world, request.POST['attack'], request.POST)
if form.is_valid():
fuelcost = 0
enemybp = 0
sector = form.cleaned_data['fleets'][0].sector
ineligiblefleets = []
for fleeet in form.cleaned_data['fleets']:
fuelcost += fleeet.fuelcost()
if fleeet.attacked:
ineligiblefleets.append(['', fleeet.name])
for f in fleet.objects.filter(controller=target, sector=sector):
enemybp += f.power()
if world.warpfuel < fuelcost:
message = 'You do not have enough warpfuel to support this attack!'
elif len(ineligiblefleets) > 0:
message = utilities.resource_text(ineligiblefleets).replace(' ', ' ')
message += 'need to reorganise and rearm until they can attack again!'
elif enemybp == 0 and sector != target.sector:
message = "Can't attack enemy fleets in %s, there are none!" % sector
else:
if world in War.objects.filter(defender=target) and v.now() < world.warprotection:
world.warprotection = v.now()
actions = {'warpfuel': {'action': 'subtract', 'amount': fuelcost}}
utilities.atomic_world(world.pk, actions)
#extract sector from fleet location
return attack(request, world, target, form.cleaned_data['fleets'], war)
# freighter raid
if "raid" in form and atwar is None:
message = 'You are not at war with this world!'
elif "raid" in form:
power = utilities.militarypowerwfuel(world, war.region)
raidlist = utilities.regionshiplist(world, war.region)[:2] + [0, 0, 0, 0, 0, 0, 0]
raidcost = utilities.warpfuelcost(raidlist)
if utilities.noweariness(world, war.region, 5):
message = 'Your fleet is too exhausted to raid!'
elif power == 0:
message = 'You do not have a fleet to attack with!'
elif world.warpfuel < raidcost:
message = 'You do not have enough warpfuel to support this attack!'
elif world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True) and war.timetonextattack > v.now():
timedifference = war.timetonextattack - v.now()
hours, minutes, seconds = utilities.timedeltadivide(timedifference)
message = 'You cannot launch another attack so soon! Your fleet still needs %s:%s:%s to \
regroup before it can launch another attack.' % (hours, minutes, seconds)
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True) and war.timetonextdefense > v.now():
timedifference = war.timetonextdefense - v.now()
hours, minutes, seconds = utilities.timedeltadivide(timedifference)
message = 'You cannot launch another attack so soon! Your fleet still needs %s:%s:%s to \
regroup before it can launch another attack.' % (hours, minutes, seconds)
else:
if world in War.objects.filter(defender=target) and v.now() < world.warprotection:
world.warprotection = v.now()
world.save(update_fields=['warprotection'])
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True):
war.timetonextattack = v.now() + time.timedelta(hours=8)
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True):
war.timetonextdefense = v.now() + time.timedelta(hours=8)
war.save()
utilities.wearinesschange(world, war.region, -5)
world.warpfuel = F('warpfuel') - raidcost
world.save(update_fields=['warpfuel'])
world = World.objects.get(pk=world.pk)
return raid(request, world, target, war)
if 'peace' in form:
if atwar is None:
message = 'You are not at war with this world!'
else:
if form['peace'] == 'offerpeace':
htmldata = news.offerpeace(world, target)
newsitem = ActionNewsItem.objects.create(target=target, content=htmldata, actiontype=1)
else:
htmldata = news.peacerevoke(world)
NewsItem.objects.create(target=target, content=htmldata)
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True):
if form['peace'] == 'offerpeace':
war.peaceofferbyatk = newsitem
else:
try:
war.peaceofferbyatk.delete()
except: pass
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True):
if form['peace'] == 'offerpeace':
war.peaceofferbydef = newsitem
else:
try:
war.peaceofferbydef.delete()
except: pass
war.save()
if form['peace'] == 'offerpeace':
message = 'An offer of peace has been sent, which your enemy will have to accept.'
else:
peaceoffer = None
message = 'You have revoked your peace offer.'
if 'directaid' in form:
form = Aidform(world, request.POST)
if world.gdp < 250:
message = 'Your world\'s economy is too weak to support your humanitarian efforts!'
elif form.is_valid():
actions = {}
tgtactions = {}
resources = []
required_capacity = 0
data = form.cleaned_data
reference = []
reference += v.resources
geu = False
for resource in reference:
if data.has_key(resource) and resource == 'budget': #instant transfer
if data[resource] == 0:
continue
action = {'budget': {'action': 'add', 'amount': data[resource]}}
utilities.atomic_world(world.pk, action, target.pk)
htmldata = news.directaidcompletion(world, [['GEU', data[resource]]])
NewsItem.objects.create(target=target, content=htmldata)
#logs
tgtlog = ResourceLog.objects.create(owner=target, target=world)
Logresource.objects.create(resource="GEU", amount=data[resource], log=tgtlog)
message = "%s has recieved %s %s!" % (target.name, data[resource], 'GEU')
else:
if data.has_key(resource) and data[resource] > 0:
resources.append([resource, data[resource]])
required_capacity += v.freighter_capacity[resource] * data[resource]
actions.update({resource: {'action': 'subtract', 'amount': data[resource]}})
tgtactions.update({resource: {'action': 'add', 'amount': data[resource]}})
freighters = world.freighters - world.freightersinuse
required_freighters = (required_capacity / v.freighter_capacity['total'])+ 1
if len(resources) == 0: #pure budget aid
pass
elif freighters >= required_freighters:
#gots enough freighters
delay = (1 if world.sector == target.sector else 2)
outcometime = datetime=v.now() + time.timedelta(hours=delay)
actions.update({
'freightersinuse': {'action': 'add', 'amount': required_freighters},
})
utilities.atomic_world(world.pk, actions)
taskdetails = taskdata.directaidarrival(world, resources)
task = Task.objects.create(target=target,
content=taskdetails, datetime=outcometime)
newtask.directaid.apply_async(args=(world.pk, target.pk,
task.pk, resources, freighters), eta=outcometime)
if data['budget'] > 0:
resources = [['GEU', data['budget']]] + resources
#create logs!
log = ResourceLog.objects.create(owner=world, target=target, sent=True)
for resource in resources:
Logresource.objects.create(resource=resource[0], amount=resource[1], log=log)
hour = ('hours' if delay == 2 else 'hour')
if len(message):
message = message[:-1] + " and will recieve %s in %s %s!" % (
utilities.resource_text(resources), delay, hour)
else:
message = "%s will recieve %s in %s %s!" % (
target.name, utilities.resource_text(resources), delay, hour)
else: #not enough freighters
message = "We do not have enough freighters, we have %s and need %s" % (freighters, required_freighters)
if 'shipaid' in form:
form = Shipaidform(world, form)
if form.is_valid():
data = form.cleaned_data
ship = data['ship_choice']
amount = data['amount']
delay = (4 if target.sector == world.sector else 8)
outcometime = datetime=v.now() + time.timedelta(minutes=1)
if data['amount'] > data['fleet_choice'].__dict__[data['ship_choice']]:
message = "%s doesn't have that many %s!" % (data['fleet_choice'].name, ship)
else: #is all good
action = {'subtractships': {data['ship_choice']: amount}}
utilities.atomic_fleet(data['fleet_choice'].pk, action)
log = ResourceLog.objects.create(owner=world, target=target, sent=True)
shipname = (ship.replace('_', ' ') if amount > 1 else ship[:-1].replace('_', ' ')) #to plural or not plural
Logresource.objects.create(resource=shipname, amount=amount, log=log)
#more stuff
ref = fleet()
ref.__dict__[ship] = amount
training = data['fleet_choice'].maxtraining() * data['fleet_choice'].ratio()
taskdetails = taskdata.shipaidarrival(world, shipname, amount)
task = Task.objects.create(target=target,
content=taskdetails, datetime=outcometime)
newtask.shipaid.apply_async(args=(world.pk, target.pk,
task.pk, ship, amount, training), eta=outcometime)
message = "%s %s is en route to %s from %s" % (
amount, shipname, target.name, data['fleet_choice'].name)
if "infiltrate" in form:
form = SelectSpyForm(world, request.POST)
if form.is_valid():
data = form.cleaned_data
spyid = data['spyselect']
try:
spy = Spy.objects.get(pk=spyid)
except:
message = "There is no such spy!"
else:
if target.preferences.vacation:
message = 'This world is in vacation mode. You cannot infiltrate it.'
elif spy.owner != world:
message = "This spy does not belong to your intelligence services!"
elif Spy.objects.filter(owner=world, location=target).exists():
message = "You already have a spy in this world!"
elif spy.location != world:
message = "This spy is not at your home world!"
elif spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
else:
message = infiltrate(spy, target)
if "propaganda" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 250:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 250}}
utilities.atomic_world(world.pk, actions)
message = propaganda(spy, target)
if "gunrun" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.millevel < 1000:
message = outcomes.gunrun('NoTech')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
world.millevel = F('millevel') - 1000
world.save(update_fields=['millevel'])
message = gunrun(spy, target)
if "intel" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 200:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 200}}
utilities.atomic_world(world.pk, actions)
message = intel(spy, target)
if "sabyard" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.shipyards - target.shipyardsinuse == 0:
message = outcomes.sabotage('NoFreeYards')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabyard(spy, target)
if "sabfuel" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.warpfuelprod < 10:
message = outcomes.sabotage('NoFuelProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabfuel(spy, target)
if "sabdur" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.duraniumprod < 5:
message = outcomes.sabotage('NoDurProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabdur(spy, target)
if "sabtrit" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.tritaniumprod < 2:
message = outcomes.sabotage('NoTritProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabtrit(spy, target)
if "sabadam" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
elif target.adamantiumprod < 1:
message = outcomes.sabotage('NoAdamProd')
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabadam(spy, target)
if "sabhangars" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable for missions."
elif world.budget < 2000:
message = outcomes.nomoney()
else:
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.save(update_fields=['nextaction'])
actions = {'budget': {'action': 'subtract', 'amount': 2000}}
utilities.atomic_world(world.pk, actions)
message = sabhangars(spy, target)
if "withdraw" in form:
try:
spy = Spy.objects.get(owner=world, location=target)
except ObjectDoesNotExist:
message = "You have no spy in this world!"
else:
if spy.nextaction > v.now():
message = "This spy is currently recovering and unavailable."
else:
spy.inteltime = v.now()
spy.location = spy.owner
spy.nextaction = v.now() + time.timedelta(hours=8)
spy.timespent = 0
spy.save()
message = 'You have successfully withdrawn your spy from the enemy world!'
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True):
atwar = True
war = War.objects.get(attacker=world, defender=target)
if war.peaceofferbyatk is not None:
peaceoffer = True
elif world.pk in War.objects.filter(attacker=target).values_list('defender', flat=True):
atwar = True
war = War.objects.get(attacker=target, defender=world)
if war.peaceofferbydef is not None:
peaceoffer = True
spyform = SelectSpyForm(world)
# recalculate variables in case an action has changed them
if v.now() < world.warprotection:
warprotection = True
if target.gdp > 3 * world.gdp:
gdpprotection = True
if v.now() < target.warprotection:
targetprotection = True
if Spy.objects.filter(owner=world, location=world).count() == 0:
nospies = True
if Spy.objects.filter(owner=world).filter(location=target).count() == 1:
spyintarget = Spy.objects.filter(owner=world, location=target)[0]
if spyintarget.inteltime > v.now():
spyintel = True
timediff = spyintarget.inteltime - v.now()
hours, minutes, seconds = utilities.timedeltadivide(timediff)
timeforintel = 'You have %s:%s:%s of intel remaining.' % (hours, minutes, seconds)
#if the two worlds are at war
#calculate what fleets can attack where and what buttons to render
attackforms = []
if atwar:
worldfleets = world.controlled_fleets.all().exclude(sector='warping').exclude(sector='hangar')
targetfleets = target.controlled_fleets.all().exclude(sector='warping').exclude(sector='hangar')
sectors = {'amyntas': 0, 'bion': 0, 'cleon': 0, 'draco': 0}
for unit in worldfleets:
sectors[unit.sector] = 1
if unit.sector == target.sector:
sectors[unit.sector] = 2
for unit in targetfleets:
sectors[unit.sector] += 1
for sector in v.sectors: #organised list so it shows amyntas -> draco
if sectors[sector] >= 2: #both worlds has fleets in given sector
attackforms.append({'form': attackform(world, sector), 'sector': sector})
if len(attackforms) == 0:
attackforms = False #so we can display error message
milinfo = utilities.mildisplaylist(target, main=False)
mildisplay = display.fleet_display(milinfo[0], milinfo[1], main=False)
target.refresh_from_db()
if target.warattacker.count() > 0 or target.wardefender.count() > 0:
haswars = True
offlist = [wars.defender for wars in target.warattacker.all()]
deflist = [wars.attacker for wars in target.wardefender.all()]
initdata = {}
for resource in v.resources:
if world.__dict__[resource] > 0:
initdata.update({resource: 0})
return render(request, 'stats_ind.html', {'target': target, 'displayactions': displayactions, 'message':message, 'atwar':atwar,
'alliance':alliance, 'millevel': millevel, 'aidfleet': aidfleetform(world), 'aidform':Aidform(world, initial=initdata), 'haswars':haswars, 'offlist':offlist, 'deflist':deflist, 'warprotection':warprotection,
'peaceoffer':peaceoffer, 'gdpprotection':gdpprotection, 'warfuelcost':warfuelcost, 'costforgeuaid':costforgeuaid, 'indefwar':indefwar,
'nospies':nospies, 'spyintarget':spyintarget, 'mildisplay': mildisplay, 'spyform':spyform, 'spyintel':spyintel, 'timeforintel':timeforintel,
'defaultopen':defaultopen, 'lastonline': display.lastonline(target), 'attackforms': attackforms, 'shipaid': Shipaidform(world),
'receivetrade':receivetrade, 'lastseen': lastseen, 'raidcost':raidcost, 'targetprotection':targetprotection})
def battle(attacker, defender):
pass
def attack(request, world, target, fleets, war):
'Calculates consequences of a war attack.'
# variable setup
actions = {}
targetactions = {}
sector = fleets[0].sector
flagworld = flagtarget = False
defensefleets = target.controlled_fleets.all().filter(sector=sector)
OOS = (True if sector != target.sector else False) #out of sector combat gets no def bonus
warover = False
surrender = False
#attacker setup
baseworldpower = totalworldpower = 0
for ships in fleets:
totalworldpower += ships.powermodifiers()
baseworldpower += ships.basepower()
if ships.flagship:
flagworld = True
#defender setup
basetargetpower = totaltargetpower = 0
for ships in defensefleets:
print ships.basepower(), ships.powermodifiers()
totaltargetpower += ships.powermodifiers()
basetargetpower += ships.basepower()
if ships.flagship:
flagtarget = True
# automatic victory
if (0.1*totalworldpower > basetargetpower or basetargetpower == 0) and OOS == False:
#no automatic victory for OOS combat
warover = True
battlevictory = True
surrender = True
else:
# total damage per world
if OOS:
attackdamage = utilities.war_result(totalworldpower, totaltargetpower, basetargetpower)
else:
attackdamage = utilities.war_result(totalworldpower, totaltargetpower, basetargetpower, bonus=True)
defensedamage = utilities.war_result(totaltargetpower, totalworldpower, baseworldpower)
#now we determine how much damage each fleet sustained
allfleets = {}
for balls in fleets:
ratio = float(balls.basepower()) / float(baseworldpower)
sustained = defensedamage * ratio
allfleets.update({balls: sustained})
for balls in defensefleets:
ratio = float(balls.basepower()) / float(basetargetpower)
sustained = attackdamage * ratio
allfleets.update({balls: sustained})
#determine shiplosses
allfleetlosses = {}
worldloss = fleet()
targetloss = fleet()
p = []
for fleetobj, damage in allfleets.iteritems():
allfleetlosses.update({fleetobj.pk: utilities.war_losses(damage, fleetobj)})
p.append(allfleetlosses[fleetobj.pk].heidilist())
if fleetobj.controller == world:
worldloss.merge(allfleetlosses[fleetobj.pk])
else:
targetloss.merge(allfleetlosses[fleetobj.pk])
#nukiepants the fleets
totaldeath = fleet()
for fleetpk, losses in allfleetlosses.iteritems():
utilities.atomic_fleet(fleetpk, {'loss': losses})
totaldeath.merge(losses)
# resource salvage
salvdur, salvtrit, salvadam = utilities.salvage(totaldeath)
if OOS:
Salvage.objects.create(sector=sector, duranium=salvdur, tritanium=salvtrit, adamantium=salvadam)
else:
targetactions.update({
'salvdur': {'action': 'add', 'amount': salvdur},
'salvtrit': {'action': 'add', 'amount': salvtrit},
'salvadam': {'action': 'add', 'amount': salvadam},
})
# damage results for assigning victory
battlevictory = (True if attackdamage > defensedamage else False)
# flagship interaction
flagmeet = flagworldlose = flagtargetlose = False
if flagworld and flagtarget:
flagmeet = True
flagworldlose = (True if random.randint(1,50) == 1 else False)
flagtargetlose = (True if random.randint(1,50) == 1 else False)
elif flagworld:
flagworldlose = (True if random.randint(1,80) == 1 else False)
elif flagtarget:
flagtargetlose = (True if random.randint(1,80) == 1 else False)
#doublecheck outcome
if not flagtargetlose:
for ships in defensefleets:
if ships.flagship:
ships.refresh_from_db()
if ships == fleet():
flagtargetlose = True
#necessary because reasons
#nah for realios
#flagships is rngesus and an obliterated fleet with intact flagship is no
#so if fleet got decimated flagship is kill as well
if flagworldlose or flagtargetlose:
if flagworldlose and not flagtargetlose:
actions.update({'contentment': {'action': 'add', 'amount': utilities.attrchange(world.contentment, -10)}})
targetactions.update({'contentment': {'action': 'add', 'amount': utilities.attrchange(target.contentment, 10)}})
elif flagtargetlose and not flagworldlose:
actions.update({'contentment': {'action': 'add', 'amount': utilities.attrchange(world.contentment, 10)}})
targetactions.update({'contentment': {'action': 'add', 'amount': utilities.attrchange(target.contentment, -10)}})
utilities.atomic_world(target.pk, targetactions)
utilities.atomic_world(world.pk, actions)
targetactions = {}
actions = {}
if flagworldlose:
for f in fleets:
if f.flagship:
utilities.atomic_fleet(f.pk, {'set': {'flagship': False}})
if flagtargetlose:
for f in defensefleets:
if f.flagship:
utilities.atomic_fleet(f.pk, {'set': {'flagship': False}})
# reload worlds
world.refresh_from_db()
target.refresh_from_db()
# reload data after attack
defensefleets = target.controlled_fleets.all().filter(sector=sector)
baseworldpower = totalworldpower = 0
for ships in fleets:
ships.refresh_from_db()
totalworldpower += ships.powermodifiers()
baseworldpower += ships.basepower()
#defender setup
basetargetpower = totaltargetpower = 0
for ships in defensefleets:
totaltargetpower += ships.powermodifiers()
basetargetpower += ships.basepower()
#setting flagship data for easy manipulation
#or at least easier than passing every variable like heidi did
flag = {'world': flagworld, 'target': flagtarget, 'worldloss': flagworldlose, 'targetloss': flagtargetlose,
'worldname': world.flagshipname, 'targetname': target.flagshipname, 'meet': flagmeet}
# war end condition
if OOS and basetargetpower == 0:
print "Triggered"
#OOS war end calculations and sheeit
surplusfreighters = 0
for ships in defensefleets:
surplusfreighters += ships.freighters
#attacker takes remaining freighters for himself
utilities.atomic_fleet(ships.pk, {'set': {'freighters': 0}})
#freighters are distributed evenly between attacking fleets
#but preferentially given to those who needs them
for ships in fleets:
if ships.enoughfuel()[1] == 'freighters': #fleet needs freighters to function
needed = (ships.fuelcost() * v.freighter_capacity['warpfuel'] / \
v.freighter_capacity['total']) + 1
if surplusfreighters >= needed:
surplusfreighters -= needed
atomic_fleet(ships.pk, {'add': {'freighters': needed}})
else:
surplusfreighters = 0
atomic_fleet(ships.pk, {'add': {'freighters': surplusfreighters}})
break
stolenfreighters = surplusfreighters
assignment = [0] * len(fleets)
if surplusfreighters > len(fleets):
for n in assignment:
n += 1
surplusfreighters -= 1
while surplusfreighters > 0:
assignment[random.randint(0, len(assignment)-1)] += 1
surplusfreighters -= 1
for ships, freighters in zip(fleets, assignment):
utilities.atomic_fleet(ships.pk, {'add': {'freighters': freighters}})
resultdetails, htmldata = news.OOSfinalbattleresult(sector, world, target, worldloss,
targetloss, stolenfreighters, fleets, defensefleets, flag)
elif (0.1*totalworldpower > basetargetpower or basetargetpower == 0) and not OOS:
#home sector victory battle
warover = True
else:
#regular combat result
resultdetails, htmldata = news.battleresult(sector, world, target, worldloss,
targetloss, fleets, defensefleets, flag)
NewsItem.objects.create(target=target, content=htmldata)
if warover:
losses = { #maximum losses
'warpfuel': (target.warpfuel / 2 if target.warpfuel > 0 else 0),
'duranium': (target.duranium / 2 if target.duranium > 0 else 0),
'tritanium': (target.tritanium / 2 if target.tritanium > 0 else 0),
'adamantium': (target.adamantium / 2 if target.adamantium > 0 else 0),
}
capacity = 0
for ships in fleets:
capacity += ships.__dict__['freighters'] * v.freighter_capacity['total']
preferred = world.preferences.winresource
actions = {
'warpoints': {'action': 'add', 'amount': target.warpoints+1},
'budget': {'action': 'add', 'amount': (target.budget / 2 if target.budget > 0 else 0)},
'gdp': {'action': 'add', 'amount': (target.gdp / 6 if target.gdp > 0 else 0)},
'growth': {'action': 'add', 'amount': (target.growth/2 if target.growth > 0 else 0)},
}
if capacity > losses[preferred] * v.freighter_capacity[preferred]:
actions.update({preferred: {'action': 'add', 'amount': losses[preferred] * v.freighter_capacity[preferred]}})
capacity -= losses[preferred] * v.freighter_capacity[preferred]
else:
actions.update({preferred: {'action': 'add', 'amount': 0}})
while capacity > v.freighter_capacity[preferred]:
actions[preferred]['amount'] += 1
capacity -= v.freighter_capacity[preferred]
losses.pop(preferred)
while capacity > 0: #distribute war loot lol
nogo = 0
for item in losses: #even distribution for the non-preferred resources
if losses[item] == 0:
nogo += 1
continue
if capacity >= v.freighter_capacity[item]:
if actions.has_key(item):
actions[item]['amount'] += 1
else:
actions.update({item: {'action': 'add', 'amount': 1}})
capacity -= v.freighter_capacity[item]
losses[item] -= 1
else:
nogo += 1
if sum(losses.values()) == 0 or nogo == len(losses):
break
resources = ['gdp', 'growth'] + v.resources #for proper display
#because dicts aren't ordered
for key in resources[:]: #workaround for python being lazy
if not actions.has_key(key):
resources.remove(key)
continue
targetactions.update({key: {'action': 'subtract', 'amount': actions[key]['amount']}})
targetactions.update({'warpoints': {'action': 'set', 'amount': 0}})
# logs
winlog = Warlog(owner=world, target=target, victory=True)
winlog.set(actions, resources)
winlog.save()
loserlog = Warlog(owner=target, target=world, victory=False)
loserlog.set(actions, resources, reverse=True)
winlog.save()
if surrender:
resultdetails, htmldata = news.warresult(sector, world, target, actions,
' no ships at all', 0, fleets, defensefleets)
else:
resultdetails, htmldata = news.finalbattleresult(sector, world, target, actions,
resources, ' no ships at all', worldloss, targetloss, fleets, defensefleets)
newsitem = NewsItem(target=target, content=htmldata)
newsitem.save()
if world.pk in War.objects.filter(defender=target).values_list('attacker', flat=True): # if you're the attacker
# rumsoddium transfer
if target.rumsoddium >= 1:
actions.update({'rumsoddium': {'action': 'add', 'amount': target.rumsoddium}})
targetactions.update({'rumsoddium': {'action': 'set', 'amount': 0}})
htmldata = news.rumsoddium(world)
NewsItem.objects.create(target=target, content=htmldata)
resultdetails += '<p class="halfline"> </p><span class="green">You have also taken their prized rumsoddium!</span>'
data = GlobalData.objects.get(pk=1)
data.rumsoddiumwars = F('rumsoddiumwars') + 1
data.save(update_fields=['rumsoddiumwars'])
# attribute change
targetactions.update({
'contentment': {'action': 'add', 'amount': utilities.attrchange(target.contentment, -20)},
'stability': {'action': 'add', 'amount': utilities.attrchange(target.stability, -10)}
})
# war protection
if not v.now() < target.brokenwarprotect:
targetactions.update({'warprotection': {'action': 'set', 'amount': v.now() + time.timedelta(days=5)}})
# end of war
war.delete()
utilities.atomic_world(world.pk, actions)
utilities.atomic_world(target.pk, targetactions)
return render(request, 'warresult.html', {'resultdetails': resultdetails, 'battlevictory': battlevictory})
def raid(request, world, target, war):
'Calculates consequences of a war raid.'
# variable setup
raidlist = utilities.regionshiplist(world, war.region)[:2] + [0, 0, 0, 0, 0, 0, 0] # only count fighters and corvettes
totownpower = utilities.powerallmodifiers(world, war.region, raidlist, False)
baseownpower = utilities.powerfromlist(raidlist, False)
otherlist = utilities.regionshiplist(target, war.region)[:2] + [0, 0, 0, 0, 0, 0, 0]
tototherpower = utilities.powerallmodifiers(target, war.region, otherlist, False)
baseotherpower = utilities.powerfromlist(otherlist, False)
staginglist = utilities.regionshiplist(target, 'S')[:2] + [0, 0, 0, 0, 0, 0, 0]
totstagingpower = utilities.powerallmodifiers(target, 'S', staginglist, False)
basestagingpower = utilities.powerfromlist(staginglist, False)
otherfuelcost = utilities.warpfuelcost(utilities.regionshiplist(target, war.region))
othersupply = utilities.freighterregion(target, war.region)*200
owntraining = utilities.percenttraining(world, war.region)
othertraining = utilities.percenttraining(target, war.region)
stagingtraining = utilities.percenttraining(target, 'S')
# involve staging fleet
if war.region == target.region:
stagingactive = True
try:
hsratio = (tototherpower/(tototherpower+totstagingpower))
except:
hsratio = 1
tototherpower += totstagingpower
baseotherpower += basestagingpower
otherfuelcost += utilities.warpfuelcost(utilities.regionshiplist(target, 'S'))
othersupply += utilities.freighterregion(target, 'S')*200
otherlist = [x+y for x, y in zip(otherlist, staginglist)]
if hsratio != 1:
othertraining += stagingtraining
othertraining /= 2
# total losses
deflosses = utilities.war_result(totownpower, tototherpower, baseotherpower, list(otherlist), bonus=True)
attlosses = utilities.war_result(tototherpower, totownpower, baseownpower, list(raidlist))
if stagingactive:
homelosses, staginglosses = utilities.staginglosssplit(deflosses, otherlist, staginglist, hsratio)
else:
homelosses = deflosses
staginglosses = [0, 0, 0, 0, 0, 0, 0, 0, 0]
loss = utilities.raidloss(otherfuelcost, othersupply)
# decide winner
upperchance = (20 if owntraining <= othertraining else 10)
if random.randint(1, upperchance) == 1:
supersuccess = True # victory without fighting
success = True
else:
supersuccess = False
# subtract ships
utilities.warloss_byregion(target, war.region, homelosses)
utilities.warloss_byregion(target, 'S', staginglosses)
utilities.warloss_byregion(world, war.region, attlosses)
damagedealt = utilities.powerfromlist(deflosses, False)
damagesustained = utilities.powerfromlist(attlosses, False)
# compare damage for winner
if damagedealt > damagesustained or damagesustained == 0:
success = True
loss = utilities.raidloss(otherfuelcost, othersupply)
utilities.freighterloss(target, war.region, loss)
else:
success = None
loss = 0
resultdetails = news.raidresult(war, world, target, deflosses, attlosses, success, supersuccess, loss)
htmldata = news.raidnotify(war, world, target, deflosses, attlosses, success, supersuccess, loss)
NewsItem.objects.create(target=target, content=htmldata)
return render(request, 'raidresult.html', {'resultdetails': resultdetails, 'success': success})
def infiltrate(spy, target):
'Results of a spy infiltration.'
chance = random.randint(1, 100)
if 90 + spy.infiltration >= chance:
spy.location = target
spy.infiltration += 1
spy.save()
message = 'Your agent successfully bypassed your target\'s security and got to work creating a secret network.'
else:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtinfiltration(spy, reveal)
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = 'Your agent was caught and executed by your target\'s security forces.' + revmsg
return message
def propaganda(spy, target):
'Results of spy propaganda.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 75 + spy.propaganda >= chance:
spy.propaganda += 1
spy.save(update_fields=['propaganda'])
cont = utilities.attrchange(target.contentment, -20)
utilities.atomic_world(target.pk, {'contentment': {'action': 'add', 'amount': cont}})
result = 'Your propaganda campaign was successful in sowing discontent among the people.'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your propaganda campaign seems to have had no effect on the populace\'s opinion of their leader.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtpropaganda(spy, reveal)
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def gunrun(spy, target):
'Results of spy gunrunning.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 65 + spy.gunrunning >= chance:
spy.gunrunning += 1
spy.save(update_fields=['gunrunning'])
rebels = utilities.attrchange(target.rebels, 10, zero=True)
utilities.atomic_world(target.pk, {'rebels': {'action': 'add', 'amount': rebels}})
result = 'Your tech was passed on to the rebels, who successfully mount some resistance!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'The rebels make inefficient use of your tech, and there is no increase in their number.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtgunrun(spy, reveal)
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def intel(spy, target):
'Results of spy intel.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 85 + spy.intelligence >= chance:
spy.intelligence += 1
spy.inteltime = v.now() + time.timedelta(hours=24)
spy.save(update_fields=['intelligence', 'inteltime'])
result = 'You managed to set up your electronic surveillance system successfully!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Unfortunately your network was discovered before it could collect any important data.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtintel(spy, reveal)
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabyard(spy, target):
'Results of spy sabotaging shipyards.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 35 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
target.shipyards = F('shipyards') - 1
actions = {'shipyards': {'action': 'subtract', 'amount': 1}}
if target.productionpoints > target.shipyards:
reduction = target.productionpoints / target.shipyards
actions.update({'productionpoints': {'action': 'subtract', 'amount': reduction}})
utilities.atomic_world(target.pk, actions)
htmldata = news.notifysab('yard')
NewsItem.objects.create(target=target, content=htmldata)
result = 'Your crack team successfully bypassed security and blew up an enemy shipyard!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was discovered by enemy security before they could destroy anything.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsab(spy, reveal, 'yard')
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabfuel(spy, target):
'Results of spy sabotaging fuel refinery.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 45 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
utilities.atomic_world(target.pk, {'warpfuelprod': {'action': 'subtract', 'amount': v.production['warpfuelprod']}})
htmldata = news.notifysab('fuel')
NewsItem.objects.create(target=target, content=htmldata)
result = 'Your team managed to sneak onto the refinery and critically damage it!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was detected by the shipyard\'s automated defense systems before they could do any damage.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsab(spy, reveal, 'fuel')
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabdur(spy, target):
'Results of spy sabotaging duranium mine.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 45 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
utilities.atomic_world(target.pk, {'duraniumprod': {'action': 'subtract', 'amount': v.production['duraniumprod']}})
htmldata = news.notifysab('dur')
NewsItem.objects.create(target=target, content=htmldata)
result = 'Your crack team successfully bypassed security and destroyed an enemy duranium mine!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was discovered by enemy security before they could destroy anything.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsab(spy, reveal, 'dur')
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabtrit(spy, target):
'Results of spy sabotaging tritanium mine.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 45 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
utilities.atomic_world(target.pk, {'tritaniumprod': {'action': 'subtract', 'amount': v.production['tritaniumprod']}})
htmldata = news.notifysab('trit')
NewsItem.objects.create(target=target, content=htmldata)
result = 'Your crack team successfully bypassed security and blew up an enemy tritanium mine!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was discovered by enemy security before they could destroy anything.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsab(spy, reveal, 'trit')
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabadam(spy, target):
'Results of spy sabotaging adamantium mine.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 45 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
utilities.atomic_world(target.pk, {'adamantiumprod': {'action': 'subtract', 'amount': v.production['adamantiumprod']}})
htmldata = news.notifysab('adam')
NewsItem.objects.create(target=target, content=htmldata)
result = 'Your crack team successfully bypassed security and blew up an enemy adamantium mine!'
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was discovered by enemy security before they could destroy anything.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsab(spy, reveal, 'adam')
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
def sabhangars(spy, target):
'Results of spy sabotaging hangar.'
caughtmsg = revmsg = ''
chance = random.randint(1, 100)
if 45 + spy.sabotage >= chance:
spy.sabotage += 2
spy.save(update_fields=['sabotage'])
targetpower = utilities.militarypower(target, 'H')
shiplist = utilities.regionshiplist(target, 'H')
deflosses = utilities.war_losses(0.1*targetpower, shiplist)
utilities.warloss_byregion(target, 'H', deflosses)
htmldata = news.notifysabhangars(deflosses)
NewsItem.objects.create(target=target, content=htmldata)
hangarlosses = news.losses(deflosses)
result = "You successfully managed to sabotage the enemy's orbital hangars, and destroyed%s as a result!" % hangarlosses
else:
caught, caughtmsg = utilities.caught(spy)
result = 'Your covert team was discovered by enemy security before they could destroy anything.'
if caught:
reveal, revmsg = utilities.reveal(spy)
htmldata = news.spycaughtsabhangars(spy, reveal)
NewsItem.objects.create(target=target, content=htmldata)
spy.delete()
message = result + caughtmsg + revmsg
return message
|
heidi666/WorldsAtWar
|
wawmembers/interactions.py
|
Python
|
mit
| 61,805
|
[
"Galaxy"
] |
ab3882264678b5fa9b4646c870db210343c66d7fe11d6425d2730b1cab1b8b55
|
# TODO: By PySCF-1.5 release
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent space: 3 -> 4
# * Function/method should be all lowercase
# * Line wrap around 80 columns
# * Use either double quote or single quote, not mix
#
# 2. Conventions required by PySCF
# * Use proper logger function for debug messages
# * Add attribute ._keys for sanity check
# * Class attributes should be all lowercase
# * Use .verbose to control print level
#
# 3. Use proper functions provided by PySCF
#
# This file is adapted with permission from the wmme program of Gerald Knizia.
# See http://sites.psu.edu/knizia/software/
#====================================================
from __future__ import print_function
"""Functions to read data from Molpro XML files generated via
...
{put,xml,FileName; keepspherical; nosort}
(after a orbital-generating command, e.g., "{df-rks,pbe}")
"""
import wmme
import numpy as np
import xml.etree.ElementTree as ET
class FOrbitalInfo(object):
def __init__(self, Coeffs, fEnergy, fOcc, iSym, iOrbInSym, Basis):
self.Coeffs = np.array(Coeffs)
assert(isinstance(Basis, wmme.FBasisSet))
self.Basis = Basis
self.fOcc = fOcc
self.fEnergy = fEnergy
self.iSym = iSym
self.iOrbInSym = iOrbInSym
@property
def Desc(self):
return "%i.%i [E=%.4f O=%.4f]" % (self.iOrbInSym, self.iSym, self.fEnergy, self.fOcc)
@property
def Name(self):
return "%i.%i" % (self.iOrbInSym, self.iSym)
class FMolproXmlData(object):
def __init__(self, Atoms, OrbBasis, Orbitals, FileName=None, Variables=None):
self.Atoms = Atoms
self.OrbBasis = OrbBasis
self.Orbitals = Orbitals
# make an orbital matrix by concatenating the individual
# orbital coefficient arrays.
nBf = self.OrbBasis.nFn
nOrb = len(self.Orbitals)
self.Variables = Variables
if self.Variables is not None:
self.ToAng = float(self.Variables["_TOANG"])
if nOrb == 0:
Orbs = np.zeros((nBf, nOrb))
else:
nBfOrb = len(Orbitals[0].Coeffs)
Orbs = np.zeros((nBfOrb, nOrb))
for i in range(nOrb):
Orbs[:,i] = Orbitals[i].Coeffs
# check if we need to convert from cartesian to spherical.
nBfCa = self.OrbBasis.nFnCa
if Orbs.shape[0] == nBf:
# nope--already spherical.
pass
elif Orbs.shape[0] == nBfCa:
# yes (old molpro version)
ls = OrbBasis.GetAngmomList()
Orbs = _Vec_Ca2Sh(Orbs, ls)
else:
raise Exception("MolproXml import: Import orbital matrix neither consistent with spherical nor cartesian basis functions.")
self.Orbs = Orbs
self.FileName = FileName
def remove_namespace(doc, namespace):
"""Remove namespace in the passed document in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
def remove_all_namespaces(doc):
def remove_namespace1(s):
if s.startswith("{"):
return s[s.find("}")+1:]
else:
return s
for elem in doc.getiterator():
elem.tag = remove_namespace1(elem.tag)
elem.attrib = dict([(remove_namespace1(k),v) for (k,v) in elem.attrib.items()])
def _ReadNodeArray(Node):
return np.array(Node.text.split()).astype(float)
def _ReadAtoms(AtomNode, XmlToAng):
Elements = []
Positions = []
IdToAtom = {} # maps XML tag of atom to the index in the atom array.
for XmlAtom in AtomNode:
iAtom = len(Elements)
Id = XmlAtom.attrib["id"] # it's a string
Element = XmlAtom.attrib["elementType"]
# input is in angstroems. (not AUs)
x = float(XmlAtom.attrib['x3'])
y = float(XmlAtom.attrib['y3'])
z = float(XmlAtom.attrib['z3'])
IdToAtom[Id] = iAtom
Elements.append(Element)
# convert output to bohr units.
Positions.append((1./XmlToAng) * np.array([x,y,z]))
#Positions.append(np.array([x,y,z]))
return wmme.FAtomSet(np.array(Positions).T, Elements), IdToAtom
def _ReadBasisSet(BasisNode, Atoms, IdToAtom):
IdToBf = {}
BasisShells = []
for ShellNode in BasisNode.findall("basisGroup"):
#print "shell: %s -> %s " % (ShellNode.tag, ShellNode.attrib)
nCo = int(ShellNode.attrib['contractions'])
nExp = int(ShellNode.attrib['primitives'])
MinL = int(ShellNode.attrib['minL'])
MaxL = int(ShellNode.attrib['maxL'])
if (MinL != MaxL):
raise Exception("Basis sets with MinL != MaxL are not supported.")
l = MinL
AngularType = ShellNode.attrib['angular']
if AngularType != "spherical":
raise Exception("Only spherical harmonic basis sets are supported.")
Id = ShellNode.attrib['id']
Exps = _ReadNodeArray(ShellNode.find('basisExponents'))
Cos = []
for CoNode in ShellNode.findall('basisContraction'):
Cos.append(_ReadNodeArray(CoNode))
if (len(Exps) != nExp):
raise Exception("Inconsistent basis declaration: nExp != len(Exp).")
if (len(Cos) != nCo):
raise Exception("Inconsistent basis declaration: nCo != len(Cos).")
Bf = wmme.FBasisShell(l, np.array(Exps), np.array(Cos).T)
BasisShells.append(Bf)
IdToBf[Id] = Bf
#print "Id = '%s' %s" % (Id, Bf)
Associations = {}
for AssocNode in BasisNode.findall("association"):
def ReadLinks(Node, Type):
#print "Node.attrib: %s" % Node.attrib
xlink = Node.attrib["href"]
s = Type + "["
xlink = xlink[xlink.find(s)+len(s):xlink.rfind(']')]
xlink = xlink.replace("@id=","").replace("'","").replace(" or ", " ")
return xlink
ShellLinks = ReadLinks(AssocNode.find('bases'), "basisGroup").split()
AtomLinks = ReadLinks(AssocNode.find('atoms'), "atom").split()
#print "atoms: %s\n -> shells: %s" % (AtomLinks, ShellLinks)
for AtId in AtomLinks:
for ShellId in ShellLinks:
#print " link: %s -> %s" % (AtId, ShellId)
L = Associations.get(AtId, [])
L.append(ShellId)
Associations[AtId] = L
#print "Associations: %s" % Associations
AtomToId = dict((v,k) for (k,v) in IdToAtom.items())
def GetAtomAssociations(AtomId):
# Molpro2012 apparently sometimes linked to atom ids with names like "1"/"2" etc
# even if the atom ids were actually defined as "a1"/"a2" etc.
if AtomId in Associations:
return Associations[AtomId]
elif AtomId.startswith("a"):
return Associations[AtomId[1:]]
else:
raise Exception("something went wrong in the association of basis sets and atoms.")
# assemble the basis set in AtomSet order.
Shells1 = []
for (iAt,At) in enumerate(Atoms):
#ShellIds = Associations[AtomToId[iAt]]
ShellIds = GetAtomAssociations(AtomToId[iAt])
for ShellId in ShellIds:
Shells1.append(wmme.FBasisShell1(At, IdToBf[ShellId]))
BasisSet = wmme.FBasisSet(Shells1, Atoms)
if BasisSet.nFn != int(BasisNode.attrib["length"]):
raise Exception("Expected size of basis and actual size of basis do not match.")
return BasisSet
def _ReadOrbitals(OrbitalsNode, Atoms, OrbBasis, SkipVirtual):
nBf = OrbBasis.nFn
Orbitals = []
count =0 #ELVIRA
print("# orb iSym iOrbInSym") #ELVIRA
nOrbInSym = np.array(8*[0])
for OrbNode in OrbitalsNode.findall("orbital"):
fOcc = float(OrbNode.attrib["occupation"])
if SkipVirtual and fOcc == 0.0:
continue
fEnergy = float(OrbNode.attrib["energy"])
iSym = int(OrbNode.attrib["symmetryID"])
iOrbInSym = nOrbInSym[iSym-1] + 1 # 1-based.
nOrbInSym[iSym-1] += 1
Coeffs = _ReadNodeArray(OrbNode)
#if len(Coeffs) != nBf:
#raise Exception("Number of orbital coefficients differs from number of basis functions.")
Orbitals.append(FOrbitalInfo(Coeffs, fEnergy, fOcc, iSym, iOrbInSym, OrbBasis))
if fOcc != 0.0:
print(count , " ", iSym, " ", iOrbInSym) #ELVIRA
count +=1 #ElVIRA
return Orbitals
def ReadMolproXml(FileName,SkipVirtual=False):
XmlTree = ET.parse(FileName)
Root = XmlTree.getroot()
remove_all_namespaces(Root)
Molecule = list(Root)[0]
VariablesNode = Molecule.find("variables")
if VariablesNode is None:
Variables = None
else:
Variables = {}
for VariableNode in VariablesNode:
#print VariableNode.tag, VariableNode.attrib
L = []
for v in VariableNode.findall("value"):
L.append(v.text)
if len(L) == 1:
L = L[0]
Variables[VariableNode.attrib['name']] = L
if Variables is not None and "_TOANG" in Variables:
XmlToAng = float(Variables["_TOANG"])
else:
XmlToAng = wmme.ToAng
# read atom declarations
AtomArrayNode = Molecule.find("atomArray")
if AtomArrayNode is None:
# atomArray was put into cml:molecule at some point in time.
CmlMoleculeNode = Molecule.find("molecule")
AtomArrayNode = CmlMoleculeNode.find("atomArray")
Atoms, IdToAtom = _ReadAtoms(AtomArrayNode, XmlToAng)
# find the XML node describing the main orbital basis
OrbBasisNode = None
for Node in Molecule.findall("basisSet"):
if Node.attrib["id"] == "ORBITAL":
OrbBasisNode = Node
assert(OrbBasisNode is not None)
OrbBasis = _ReadBasisSet(OrbBasisNode, Atoms, IdToAtom)
Orbitals = _ReadOrbitals(Molecule.find("orbitals"), Atoms, OrbBasis, SkipVirtual)
#print "Number of orbitals read: %s" % len(Orbitals)
return FMolproXmlData(Atoms, OrbBasis, Orbitals, FileName=FileName, Variables=Variables)
def _nCartY(l):
return ((l+1)*(l+2))/2
def _Vec_CaMolden2CaMolpro(Orbs, ls):
# transform from Molden cartesian component order to Molpro cartesian component order
I = []
iOffset = 0
for l in ls:
I0 = list(range(_nCartY(l)))
if l == 3:
# in IrImportTrafo.cpp:
# double c3 = pOrb[4]; double c4 = pOrb[5]; double c5 = pOrb[3]; double c6 = pOrb[8]; double c7 = pOrb[6]; double c8 = pOrb[7];
# pOrb[3] = c3; pOrb[4] = c4; pOrb[5] = c5; pOrb[6] = c6; pOrb[7] = c7; pOrb[8] = c8;
# all others have equal cartesian component order.
I0[3] = 4
I0[4] = 5
I0[5] = 3
I0[6] = 8
I0[7] = 6
I0[8] = 7
I += [(o + iOffset) for o in I0]
iOffset += len(I0)
I = np.array(I)
return Orbs[I,:]
def _Vec_Ca2Sh(Ca, ls):
# transformation orbital matrix from Molpro's obscure cartesian format to Molpro's equally obscure spherical format.
# Input: Matrix nCartAo x nOrb
# Returns: Matix nShAo x nOrb
# note: Ported from IrImportTrafo.cpp.
assert(len(Ca.shape) == 2)
nCa = 0
nSh = 0
for l in ls:
nCa += _nCartY(l)
nSh += 2*l + 1
if nCa != Ca.shape[0]:
raise Exception("Expected first dimension of orbital matrix to have nCartAo dimension (which is %i), but it has %i rows" % (nCa, Ca.shape[0]))
# allocate output matrix.
Sh = np.zeros((nSh, Ca.shape[1]))
sd0 = 5.e-01
sd1 = 1.7320508075688772
sd2 = 8.660254037844386e-01
sd3 = 6.1237243569579458e-01
sd4 = 2.4494897427831779
sd5 = 1.5
sd6 = 7.9056941504209488e-01
sd7 = 2.3717082451262845
sd8 = 3.872983346207417
sd9 = 1.9364916731037085
sda = 3.75e-01
sdb = 7.5e-01
sdc = 3.
sdd = 1.1180339887498947
sde = 6.7082039324993676
sdf = 3.1622776601683791
sd10 = 7.3950997288745202e-01
sd11 = 4.4370598373247123
sd12 = 5.5901699437494734e-01
sd13 = 3.3541019662496838
sd14 = 2.9580398915498081
sd15 = 2.0916500663351889
sd16 = 6.2749501990055672
sd17 = 4.8412291827592718e-01
sd18 = 9.6824583655185437e-01
sd19 = 5.809475019311126
sd1a = 2.5617376914898995
sd1b = 5.1234753829797981
sd1c = 5.2291251658379723e-01
sd1d = 1.0458250331675947
sd1e = 4.1833001326703778
sd1f = 1.5687375497513918
sd20 = 1.2549900398011134e+01
sd21 = 8.8741196746494246
sd22 = 2.2185299186623562
sd23 = 1.3311179511974137e+01
sd24 = 3.5078038001005702
sd25 = 7.0156076002011396
sd26 = 7.0156076002011403e-01
sd27 = 1.8750000000000002
sd28 = 3.7500000000000004
sd29 = 5.
sd2a = 1.0246950765959596e+01
sd2b = 6.7169328938139616e-01
sd2c = 1.0075399340720942e+01
sd2d = 9.0571104663683977e-01
sd2e = 1.8114220932736795
sd2f = 1.4491376746189438e+01
sd30 = 2.3268138086232857
sd31 = 2.3268138086232856e+01
sd32 = 1.1634069043116428e+01
sd33 = 4.9607837082461076e-01
sd34 = 2.4803918541230536
sd35 = 4.9607837082461073
sd36 = 2.9764702249476645e+01
sd37 = 4.5285552331841988e-01
sd38 = 7.245688373094719
sd39 = 4.0301597362883772
sd3a = 1.3433865787627923e+01
sd3b = 2.7171331399105201
sd3c = 5.434266279821041
sd3d = 8.1513994197315611
sd3e = 2.1737065119284161e+01
sd3f = 1.984313483298443
sd40 = 1.9843134832984429e+01
sd41 = 3.125e-01
sd42 = 9.375e-01
sd43 = 5.625
sd44 = 1.125e+01
sd45 = 7.4999999999999991
sd46 = 2.8641098093474002
sd47 = 5.7282196186948005
sd48 = 1.1456439237389599e+01
sd49 = 4.5825756949558407
iSh = 0
iCa = 0
for l in ls:
if l == 0:
Sh[iSh,:] = 1.0*Ca[iCa,:]
iCa += 1
iSh += 1
elif l == 1:
Sh[iSh+0,:] = (1.0*Ca[iCa+0,:])
Sh[iSh+1,:] = (1.0*Ca[iCa+1,:])
Sh[iSh+2,:] = (1.0*Ca[iCa+2,:])
iCa += 3
iSh += 3
elif l == 2:
Sh[iSh+0,:] = -(0.6666666666666666*Ca[iCa+0,:])*sd0 - (0.6666666666666666*Ca[iCa+1,:])*sd0 + (0.6666666666666666*Ca[iCa+2,:])
Sh[iSh+1,:] = (0.5773502691896258*Ca[iCa+3,:])*sd1
Sh[iSh+2,:] = (0.5773502691896258*Ca[iCa+4,:])*sd1
Sh[iSh+3,:] = (0.6666666666666666*Ca[iCa+0,:])*sd2 - (0.6666666666666666*Ca[iCa+1,:])*sd2
Sh[iSh+4,:] = (0.5773502691896258*Ca[iCa+5,:])*sd1
iCa += 6
iSh += 5
elif l == 3:
Sh[iSh+0,:] = -(0.29814239699997197*Ca[iCa+5,:])*sd3 + (0.29814239699997197*Ca[iCa+7,:])*sd4 - (0.4*Ca[iCa+0,:])*sd3
Sh[iSh+1,:] = -(0.29814239699997197*Ca[iCa+3,:])*sd3 + (0.29814239699997197*Ca[iCa+8,:])*sd4 - (0.4*Ca[iCa+1,:])*sd3
Sh[iSh+2,:] = -(0.29814239699997197*Ca[iCa+4,:])*sd5 - (0.29814239699997197*Ca[iCa+6,:])*sd5 + (0.4*Ca[iCa+2,:])
Sh[iSh+3,:] = -(0.29814239699997197*Ca[iCa+5,:])*sd7 + (0.4*Ca[iCa+0,:])*sd6
Sh[iSh+4,:] = (0.2581988897471611*Ca[iCa+9,:])*sd8
Sh[iSh+5,:] = (0.29814239699997197*Ca[iCa+3,:])*sd7 - (0.4*Ca[iCa+1,:])*sd6
Sh[iSh+6,:] = (0.29814239699997197*Ca[iCa+4,:])*sd9 - (0.29814239699997197*Ca[iCa+6,:])*sd9
iCa += 10
iSh += 7
elif l == 4:
Sh[iSh+0,:] = -(0.1301200097264711*Ca[iCa+10,:])*sdc - (0.1301200097264711*Ca[iCa+11,:])*sdc + (0.1301200097264711*Ca[iCa+9,:])*sdb + (0.22857142857142856*Ca[iCa+0,:])*sda + (0.22857142857142856*Ca[iCa+1,:])*sda + (0.22857142857142856*Ca[iCa+2,:])
Sh[iSh+1,:] = (0.1126872339638022*Ca[iCa+14,:])*sde - (0.15118578920369088*Ca[iCa+3,:])*sdd - (0.15118578920369088*Ca[iCa+5,:])*sdd
Sh[iSh+2,:] = -(0.1126872339638022*Ca[iCa+13,:])*sd7 - (0.15118578920369088*Ca[iCa+4,:])*sd7 + (0.15118578920369088*Ca[iCa+7,:])*sdf
Sh[iSh+3,:] = -(0.1301200097264711*Ca[iCa+9,:])*sd11 + (0.22857142857142856*Ca[iCa+0,:])*sd10 + (0.22857142857142856*Ca[iCa+1,:])*sd10
Sh[iSh+4,:] = -(0.1126872339638022*Ca[iCa+12,:])*sd7 - (0.15118578920369088*Ca[iCa+6,:])*sd7 + (0.15118578920369088*Ca[iCa+8,:])*sdf
Sh[iSh+5,:] = (0.1301200097264711*Ca[iCa+10,:])*sd13 - (0.1301200097264711*Ca[iCa+11,:])*sd13 - (0.22857142857142856*Ca[iCa+0,:])*sd12 + (0.22857142857142856*Ca[iCa+1,:])*sd12
Sh[iSh+6,:] = (0.15118578920369088*Ca[iCa+3,:])*sd14 - (0.15118578920369088*Ca[iCa+5,:])*sd14
Sh[iSh+7,:] = -(0.1126872339638022*Ca[iCa+13,:])*sd16 + (0.15118578920369088*Ca[iCa+4,:])*sd15
Sh[iSh+8,:] = (0.1126872339638022*Ca[iCa+12,:])*sd16 - (0.15118578920369088*Ca[iCa+6,:])*sd15
iCa += 15
iSh += 9
elif l == 5:
Sh[iSh+0,:] = -(0.04337333657549037*Ca[iCa+12,:])*sd19 + (0.05819143739626463*Ca[iCa+3,:])*sd18 - (0.05819143739626463*Ca[iCa+5,:])*sd19 + (0.0761904761904762*Ca[iCa+10,:])*sd17 + (0.0761904761904762*Ca[iCa+14,:])*sd8 + (0.12698412698412698*Ca[iCa+0,:])*sd17
Sh[iSh+1,:] = -(0.04337333657549037*Ca[iCa+8,:])*sd19 - (0.05819143739626463*Ca[iCa+17,:])*sd19 + (0.05819143739626463*Ca[iCa+6,:])*sd18 + (0.0761904761904762*Ca[iCa+19,:])*sd8 + (0.0761904761904762*Ca[iCa+1,:])*sd17 + (0.12698412698412698*Ca[iCa+15,:])*sd17
Sh[iSh+2,:] = -(0.05819143739626463*Ca[iCa+18,:])*sd1b + (0.05819143739626463*Ca[iCa+9,:])*sd1b + (0.0761904761904762*Ca[iCa+16,:])*sd1a - (0.0761904761904762*Ca[iCa+2,:])*sd1a
Sh[iSh+3,:] = -(0.04337333657549037*Ca[iCa+12,:])*sd20 + (0.05819143739626463*Ca[iCa+3,:])*sd1d + (0.05819143739626463*Ca[iCa+5,:])*sd1e + (0.0761904761904762*Ca[iCa+10,:])*sd1f - (0.12698412698412698*Ca[iCa+0,:])*sd1c
Sh[iSh+4,:] = -(0.05039526306789696*Ca[iCa+11,:])*sd21 + (0.05039526306789696*Ca[iCa+4,:])*sd21
Sh[iSh+5,:] = (0.04337333657549037*Ca[iCa+8,:])*sd20 - (0.05819143739626463*Ca[iCa+17,:])*sd1e - (0.05819143739626463*Ca[iCa+6,:])*sd1d - (0.0761904761904762*Ca[iCa+1,:])*sd1f + (0.12698412698412698*Ca[iCa+15,:])*sd1c
Sh[iSh+6,:] = -(0.04337333657549037*Ca[iCa+7,:])*sd23 + (0.0761904761904762*Ca[iCa+16,:])*sd22 + (0.0761904761904762*Ca[iCa+2,:])*sd22
Sh[iSh+7,:] = -(0.05819143739626463*Ca[iCa+6,:])*sd25 + (0.0761904761904762*Ca[iCa+1,:])*sd24 + (0.12698412698412698*Ca[iCa+15,:])*sd26
Sh[iSh+8,:] = (0.04337333657549037*Ca[iCa+7,:])*sd28 - (0.05819143739626463*Ca[iCa+18,:])*sd29 - (0.05819143739626463*Ca[iCa+9,:])*sd29 + (0.0761904761904762*Ca[iCa+16,:])*sd27 + (0.0761904761904762*Ca[iCa+2,:])*sd27 + (0.12698412698412698*Ca[iCa+20,:])
Sh[iSh+9,:] = -(0.05819143739626463*Ca[iCa+3,:])*sd25 + (0.0761904761904762*Ca[iCa+10,:])*sd24 + (0.12698412698412698*Ca[iCa+0,:])*sd26
Sh[iSh+10,:] = -(0.05039526306789696*Ca[iCa+11,:])*sd1b + (0.05039526306789696*Ca[iCa+13,:])*sd2a - (0.05039526306789696*Ca[iCa+4,:])*sd1b
iCa += 21
iSh += 11
elif l == 6:
Sh[iSh+0,:] = (0.026526119002773005*Ca[iCa+10,:])*sd2c - (0.026526119002773005*Ca[iCa+3,:])*sd2c + (0.06926406926406926*Ca[iCa+0,:])*sd2b - (0.06926406926406926*Ca[iCa+21,:])*sd2b
Sh[iSh+1,:] = -(0.017545378532260507*Ca[iCa+17,:])*sd2f - (0.017545378532260507*Ca[iCa+8,:])*sd2f + (0.022972292920210562*Ca[iCa+19,:])*sd2f + (0.02353959545345999*Ca[iCa+6,:])*sd2e + (0.03828715486701761*Ca[iCa+15,:])*sd2d + (0.03828715486701761*Ca[iCa+1,:])*sd2d
Sh[iSh+2,:] = -(0.017545378532260507*Ca[iCa+7,:])*sd31 + (0.022972292920210562*Ca[iCa+16,:])*sd32 + (0.03828715486701761*Ca[iCa+2,:])*sd30
Sh[iSh+3,:] = -(0.015100657524077793*Ca[iCa+12,:])*sd36 + (0.026526119002773005*Ca[iCa+10,:])*sd34 + (0.026526119002773005*Ca[iCa+23,:])*sd35 + (0.026526119002773005*Ca[iCa+3,:])*sd34 + (0.026526119002773005*Ca[iCa+5,:])*sd35 - (0.06926406926406926*Ca[iCa+0,:])*sd33 - (0.06926406926406926*Ca[iCa+21,:])*sd33
Sh[iSh+4,:] = -(0.017545378532260507*Ca[iCa+11,:])*sd31 + (0.022972292920210562*Ca[iCa+4,:])*sd32 + (0.03828715486701761*Ca[iCa+22,:])*sd30
Sh[iSh+5,:] = -(0.026526119002773005*Ca[iCa+10,:])*sd37 + (0.026526119002773005*Ca[iCa+14,:])*sd38 + (0.026526119002773005*Ca[iCa+23,:])*sd38 - (0.026526119002773005*Ca[iCa+25,:])*sd38 + (0.026526119002773005*Ca[iCa+3,:])*sd37 - (0.026526119002773005*Ca[iCa+5,:])*sd38 + (0.06926406926406926*Ca[iCa+0,:])*sd37 - (0.06926406926406926*Ca[iCa+21,:])*sd37
Sh[iSh+6,:] = -(0.02353959545345999*Ca[iCa+6,:])*sd3a + (0.03828715486701761*Ca[iCa+15,:])*sd39 + (0.03828715486701761*Ca[iCa+1,:])*sd39
Sh[iSh+7,:] = -(0.017545378532260507*Ca[iCa+18,:])*sd3e + (0.017545378532260507*Ca[iCa+7,:])*sd3c + (0.022972292920210562*Ca[iCa+16,:])*sd3d + (0.02353959545345999*Ca[iCa+9,:])*sd38 - (0.03828715486701761*Ca[iCa+2,:])*sd3b
Sh[iSh+8,:] = -(0.017545378532260507*Ca[iCa+17,:])*sd40 + (0.017545378532260507*Ca[iCa+8,:])*sd40 + (0.03828715486701761*Ca[iCa+15,:])*sd3f - (0.03828715486701761*Ca[iCa+1,:])*sd3f
Sh[iSh+9,:] = (0.015100657524077793*Ca[iCa+12,:])*sd44 - (0.026526119002773005*Ca[iCa+10,:])*sd42 - (0.026526119002773005*Ca[iCa+14,:])*sd45 + (0.026526119002773005*Ca[iCa+23,:])*sd43 - (0.026526119002773005*Ca[iCa+25,:])*sd45 - (0.026526119002773005*Ca[iCa+3,:])*sd42 + (0.026526119002773005*Ca[iCa+5,:])*sd43 - (0.06926406926406926*Ca[iCa+0,:])*sd41 - (0.06926406926406926*Ca[iCa+21,:])*sd41 + (0.06926406926406926*Ca[iCa+27,:])
Sh[iSh+10,:] = -(0.017545378532260507*Ca[iCa+11,:])*sd3c + (0.017545378532260507*Ca[iCa+13,:])*sd3e - (0.022972292920210562*Ca[iCa+4,:])*sd3d - (0.02353959545345999*Ca[iCa+24,:])*sd38 + (0.03828715486701761*Ca[iCa+22,:])*sd3b
Sh[iSh+11,:] = (0.017545378532260507*Ca[iCa+11,:])*sd47 - (0.017545378532260507*Ca[iCa+13,:])*sd48 + (0.022972292920210562*Ca[iCa+4,:])*sd46 - (0.02353959545345999*Ca[iCa+24,:])*sd48 + (0.03828715486701761*Ca[iCa+22,:])*sd46 + (0.03828715486701761*Ca[iCa+26,:])*sd49
Sh[iSh+12,:] = -(0.017545378532260507*Ca[iCa+18,:])*sd48 + (0.017545378532260507*Ca[iCa+7,:])*sd47 + (0.022972292920210562*Ca[iCa+16,:])*sd46 - (0.02353959545345999*Ca[iCa+9,:])*sd48 + (0.03828715486701761*Ca[iCa+20,:])*sd49 + (0.03828715486701761*Ca[iCa+2,:])*sd46
iCa += 28
iSh += 13
return Sh
def _main():
# read a file, including orbitals and basis sets, and test
# if the output orbitals are orthogonal.
def rmsd(a):
return np.mean(a.flatten()**2)**.5
FileName = "benzene.xml"
#FileName = "/home/cgk/dev/xml-molpro/test1.xml"
XmlData = ReadMolproXml(FileName,SkipVirtual=True)
print("Atoms from file [a.u.]:\n{}".format(XmlData.Atoms.MakeXyz(NumFmt="%20.15f",Scale=1/wmme.ToAng)))
OrbBasis = XmlData.OrbBasis
#BasisLibs = ["def2-nzvpp-jkfit.libmol"]
BasisLibs = []
ic = wmme.FIntegralContext(XmlData.Atoms, XmlData.OrbBasis, FitBasis="univ-JKFIT", BasisLibs=BasisLibs)
from wmme import mdot
C = XmlData.Orbs
S = ic.MakeOverlap()
print("Orbital matrix shape: {} (loaded from '{}')".format(C.shape, FileName))
print("Overlap matrix shape: {} (made via WMME)".format(S.shape))
np.set_printoptions(precision=4,linewidth=10000,edgeitems=3,suppress=False)
SMo = mdot(C.T, S, C)
print("Read orbitals:")
for OrbInfo in XmlData.Orbitals:
print("{:30s}".format(OrbInfo.Desc))
print("MO deviation from orthogonality: {:.2e}".format(rmsd(SMo - np.eye(SMo.shape[0]))))
pass
if __name__ == "__main__":
_main()
# coding: utf-8
|
gkc1000/pyscf
|
pyscf/tools/Molpro2Pyscf/MolproXml.py
|
Python
|
apache-2.0
| 23,499
|
[
"Molpro",
"PySCF"
] |
f9d42d9c8bbcce9c4a1c52dae7e241eaacb3fb6ca3a79a2c6ea638eda7ddcc2a
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=True) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=True) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib.parse
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = [
"knownfiles", "inited", "MimeTypes",
"guess_type", "guess_all_extensions", "guess_extension",
"add_type", "init", "read_mime_types",
"suffix_map", "encodings_map", "types_map", "common_types"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = _encodings_map_default.copy()
self.suffix_map = _suffix_map_default.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in _types_map_default.items():
self.add_type(type, ext, True)
for (ext, type) in _common_types_default.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file which is either a URL or a path-like object.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
url = os.fspath(url)
scheme, url = urllib.parse._splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename, encoding='utf-8') as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except OSError:
break
else:
if '\0' not in ctype:
yield ctype
i += 1
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
for subkeyname in enum_types(hkcr):
try:
with _winreg.OpenKey(hkcr, subkeyname) as subkey:
# Only check file extensions
if not subkeyname.startswith("."):
continue
# raises OSError if no 'Content Type' value
mimetype, datatype = _winreg.QueryValueEx(
subkey, 'Content Type')
if datatype != _winreg.REG_SZ:
continue
self.add_type(mimetype, subkeyname, strict)
except OSError:
continue
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
if files is None or _db is None:
db = MimeTypes()
if _winreg:
db.read_windows_registry()
if files is None:
files = knownfiles
else:
files = knownfiles + list(files)
else:
db = _db
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except OSError:
return None
with f:
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map, _suffix_map_default
global encodings_map, _encodings_map_default
global types_map, _types_map_default
global common_types, _common_types_default
suffix_map = _suffix_map_default = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = _encodings_map_default = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted by mime type.
# Make sure the entry with the preferred file extension for a particular mime type
# appears before any others of the same mimetype.
types_map = _types_map_default = {
'.js' : 'application/javascript',
'.mjs' : 'application/javascript',
'.json' : 'application/json',
'.webmanifest': 'application/manifest+json',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.wiz' : 'application/msword',
'.bin' : 'application/octet-stream',
'.a' : 'application/octet-stream',
'.dll' : 'application/octet-stream',
'.exe' : 'application/octet-stream',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.so' : 'application/octet-stream',
'.oda' : 'application/oda',
'.pdf' : 'application/pdf',
'.p7c' : 'application/pkcs7-mime',
'.ps' : 'application/postscript',
'.ai' : 'application/postscript',
'.eps' : 'application/postscript',
'.m3u' : 'application/vnd.apple.mpegurl',
'.m3u8' : 'application/vnd.apple.mpegurl',
'.xls' : 'application/vnd.ms-excel',
'.xlb' : 'application/vnd.ms-excel',
'.ppt' : 'application/vnd.ms-powerpoint',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.pps' : 'application/vnd.ms-powerpoint',
'.pwz' : 'application/vnd.ms-powerpoint',
'.wasm' : 'application/wasm',
'.bcpio' : 'application/x-bcpio',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.dvi' : 'application/x-dvi',
'.gtar' : 'application/x-gtar',
'.hdf' : 'application/x-hdf',
'.latex' : 'application/x-latex',
'.mif' : 'application/x-mif',
'.cdf' : 'application/x-netcdf',
'.nc' : 'application/x-netcdf',
'.p12' : 'application/x-pkcs12',
'.pfx' : 'application/x-pkcs12',
'.ram' : 'application/x-pn-realaudio',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.swf' : 'application/x-shockwave-flash',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.roff' : 'application/x-troff',
'.t' : 'application/x-troff',
'.tr' : 'application/x-troff',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.ms' : 'application/x-troff-ms',
'.ustar' : 'application/x-ustar',
'.src' : 'application/x-wais-source',
'.xsl' : 'application/xml',
'.rdf' : 'application/xml',
'.wsdl' : 'application/xml',
'.xpdl' : 'application/xml',
'.zip' : 'application/zip',
'.au' : 'audio/basic',
'.snd' : 'audio/basic',
'.mp3' : 'audio/mpeg',
'.mp2' : 'audio/mpeg',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.ra' : 'audio/x-pn-realaudio',
'.wav' : 'audio/x-wav',
'.bmp' : 'image/bmp',
'.gif' : 'image/gif',
'.ief' : 'image/ief',
'.jpg' : 'image/jpeg',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.png' : 'image/png',
'.svg' : 'image/svg+xml',
'.tiff' : 'image/tiff',
'.tif' : 'image/tiff',
'.ico' : 'image/vnd.microsoft.icon',
'.ras' : 'image/x-cmu-raster',
'.bmp' : 'image/x-ms-bmp',
'.pnm' : 'image/x-portable-anymap',
'.pbm' : 'image/x-portable-bitmap',
'.pgm' : 'image/x-portable-graymap',
'.ppm' : 'image/x-portable-pixmap',
'.rgb' : 'image/x-rgb',
'.xbm' : 'image/x-xbitmap',
'.xpm' : 'image/x-xpixmap',
'.xwd' : 'image/x-xwindowdump',
'.eml' : 'message/rfc822',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.nws' : 'message/rfc822',
'.css' : 'text/css',
'.csv' : 'text/csv',
'.html' : 'text/html',
'.htm' : 'text/html',
'.txt' : 'text/plain',
'.bat' : 'text/plain',
'.c' : 'text/plain',
'.h' : 'text/plain',
'.ksh' : 'text/plain',
'.pl' : 'text/plain',
'.rtx' : 'text/richtext',
'.tsv' : 'text/tab-separated-values',
'.py' : 'text/x-python',
'.etx' : 'text/x-setext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.vcf' : 'text/x-vcard',
'.xml' : 'text/xml',
'.mp4' : 'video/mp4',
'.mpeg' : 'video/mpeg',
'.m1v' : 'video/mpeg',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.mov' : 'video/quicktime',
'.qt' : 'video/quicktime',
'.webm' : 'video/webm',
'.avi' : 'video/x-msvideo',
'.movie' : 'video/x-sgi-movie',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = _common_types_default = {
'.rtf' : 'application/rtf',
'.midi': 'audio/midi',
'.mid' : 'audio/midi',
'.jpg' : 'image/jpg',
'.pict': 'image/pict',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.xul' : 'text/xul',
}
_default_mime_types()
def _main():
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print(USAGE)
if msg: print(msg)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print(guess)
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print('type:', guess, 'encoding:', encoding)
if __name__ == '__main__':
_main()
|
xyuanmu/XX-Net
|
python3.8.2/Lib/mimetypes.py
|
Python
|
bsd-2-clause
| 21,604
|
[
"NetCDF"
] |
1119a0c02b0dd61b3aa0bcf918af877e7d841b3645dad8bdd8214ffc4e847d03
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Trivial type inference for simple functions.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import dis
import inspect
import pprint
import sys
import traceback
import types
from builtins import object
from builtins import zip
from functools import reduce
from apache_beam.typehints import Any
from apache_beam.typehints import typehints
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try: # Python 2
import __builtin__ as builtins
except ImportError: # Python 3
import builtins
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
class TypeInferenceError(ValueError):
"""Error to raise when type inference failed."""
pass
def instance_to_type(o):
"""Given a Python object o, return the corresponding type hint.
"""
t = type(o)
if o is None:
return type(None)
elif t not in typehints.DISALLOWED_PRIMITIVE_TYPES:
# pylint: disable=deprecated-types-field
if sys.version_info[0] == 2 and t == types.InstanceType:
return o.__class__
if t == BoundMethod:
return types.MethodType
return t
elif t == tuple:
return typehints.Tuple[[instance_to_type(item) for item in o]]
elif t == list:
return typehints.List[
typehints.Union[[instance_to_type(item) for item in o]]
]
elif t == set:
return typehints.Set[
typehints.Union[[instance_to_type(item) for item in o]]
]
elif t == dict:
return typehints.Dict[
typehints.Union[[instance_to_type(k) for k, v in o.items()]],
typehints.Union[[instance_to_type(v) for k, v in o.items()]],
]
else:
raise TypeInferenceError('Unknown forbidden type: %s' % t)
def union_list(xs, ys):
assert len(xs) == len(ys)
return [union(x, y) for x, y in zip(xs, ys)]
class Const(object):
def __init__(self, value):
self.value = value
self.type = instance_to_type(value)
def __eq__(self, other):
return isinstance(other, Const) and self.value == other.value
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self.value)
def __repr__(self):
return 'Const[%s]' % str(self.value)[:100]
@staticmethod
def unwrap(x):
if isinstance(x, Const):
return x.type
return x
@staticmethod
def unwrap_all(xs):
return [Const.unwrap(x) for x in xs]
class FrameState(object):
"""Stores the state of the frame at a particular point of execution.
"""
def __init__(self, f, local_vars=None, stack=()):
self.f = f
self.co = f.__code__
self.vars = list(local_vars)
self.stack = list(stack)
def __eq__(self, other):
return isinstance(other, FrameState) and self.__dict__ == other.__dict__
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def copy(self):
return FrameState(self.f, self.vars, self.stack)
def const_type(self, i):
return Const(self.co.co_consts[i])
def get_closure(self, i):
num_cellvars = len(self.co.co_cellvars)
if i < num_cellvars:
return self.vars[i]
else:
return self.f.__closure__[i - num_cellvars].cell_contents
def closure_type(self, i):
"""Returns a TypeConstraint or Const."""
val = self.get_closure(i)
if isinstance(val, typehints.TypeConstraint):
return val
else:
return Const(val)
def get_global(self, i):
name = self.get_name(i)
if name in self.f.__globals__:
return Const(self.f.__globals__[name])
if name in builtins.__dict__:
return Const(builtins.__dict__[name])
return Any
def get_name(self, i):
return self.co.co_names[i]
def __repr__(self):
return 'Stack: %s Vars: %s' % (self.stack, self.vars)
def __or__(self, other):
if self is None:
return other.copy()
elif other is None:
return self.copy()
return FrameState(self.f, union_list(self.vars, other.vars), union_list(
self.stack, other.stack))
def __ror__(self, left):
return self | left
def union(a, b):
"""Returns the union of two types or Const values.
"""
if a == b:
return a
elif not a:
return b
elif not b:
return a
a = Const.unwrap(a)
b = Const.unwrap(b)
# TODO(robertwb): Work this into the Union code in a more generic way.
if type(a) == type(b) and element_type(a) == typehints.Union[()]:
return b
elif type(a) == type(b) and element_type(b) == typehints.Union[()]:
return a
return typehints.Union[a, b]
def finalize_hints(type_hint):
"""Sets type hint for empty data structures to Any."""
def visitor(tc, unused_arg):
if isinstance(tc, typehints.DictConstraint):
empty_union = typehints.Union[()]
if tc.key_type == empty_union:
tc.key_type = Any
if tc.value_type == empty_union:
tc.value_type = Any
if isinstance(type_hint, typehints.TypeConstraint):
type_hint.visit(visitor, None)
def element_type(hint):
"""Returns the element type of a composite type.
"""
hint = Const.unwrap(hint)
if isinstance(hint, typehints.SequenceTypeConstraint):
return hint.inner_type
elif isinstance(hint, typehints.TupleHint.TupleConstraint):
return typehints.Union[hint.tuple_types]
return Any
def key_value_types(kv_type):
"""Returns the key and value type of a KV type.
"""
# TODO(robertwb): Unions of tuples, etc.
# TODO(robertwb): Assert?
if (isinstance(kv_type, typehints.TupleHint.TupleConstraint)
and len(kv_type.tuple_types) == 2):
return kv_type.tuple_types
return Any, Any
known_return_types = {len: int, hash: int,}
class BoundMethod(object):
"""Used to create a bound method when we only know the type of the instance.
"""
def __init__(self, func, type):
"""Instantiates a bound method object.
Args:
func (types.FunctionType): The method's underlying function
type (type): The class of the method.
"""
self.func = func
self.type = type
def hashable(c):
try:
hash(c)
return True
except TypeError:
return False
def infer_return_type(c, input_types, debug=False, depth=5):
"""Analyses a callable to deduce its return type.
Args:
c: A Python callable to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
"""
try:
if hashable(c) and c in known_return_types:
return known_return_types[c]
elif isinstance(c, types.FunctionType):
return infer_return_type_func(c, input_types, debug, depth)
elif isinstance(c, types.MethodType):
if c.__self__ is not None:
input_types = [Const(c.__self__)] + input_types
return infer_return_type_func(c.__func__, input_types, debug, depth)
elif isinstance(c, BoundMethod):
input_types = [c.type] + input_types
return infer_return_type_func(c.func, input_types, debug, depth)
elif inspect.isclass(c):
if c in typehints.DISALLOWED_PRIMITIVE_TYPES:
return {
list: typehints.List[Any],
set: typehints.Set[Any],
tuple: typehints.Tuple[Any, ...],
dict: typehints.Dict[Any, Any]
}[c]
return c
else:
return Any
except TypeInferenceError:
if debug:
traceback.print_exc()
return Any
except Exception:
if debug:
sys.stdout.flush()
raise
else:
return Any
def infer_return_type_func(f, input_types, debug=False, depth=0):
"""Analyses a function to deduce its return type.
Args:
f: A Python function object to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
Raises:
TypeInferenceError: if no type can be inferred.
"""
if debug:
print()
print(f, id(f), input_types)
dis.dis(f)
from . import opcodes
simple_ops = dict((k.upper(), v) for k, v in opcodes.__dict__.items())
co = f.__code__
code = co.co_code
end = len(code)
pc = 0
extended_arg = 0 # Python 2 only.
free = None
yields = set()
returns = set()
# TODO(robertwb): Default args via inspect module.
local_vars = list(input_types) + [typehints.Union[()]] * (len(co.co_varnames)
- len(input_types))
state = FrameState(f, local_vars)
states = collections.defaultdict(lambda: None)
jumps = collections.defaultdict(int)
# In Python 3, use dis library functions to disassemble bytecode and handle
# EXTENDED_ARGs.
is_py3 = sys.version_info[0] == 3
if is_py3:
ofs_table = {} # offset -> instruction
for instruction in dis.get_instructions(f):
ofs_table[instruction.offset] = instruction
# Python 2 - 3.5: 1 byte opcode + optional 2 byte arg (1 or 3 bytes).
# Python 3.6+: 1 byte opcode + 1 byte arg (2 bytes, arg may be ignored).
if sys.version_info >= (3, 6):
inst_size = 2
opt_arg_size = 0
else:
inst_size = 1
opt_arg_size = 2
last_pc = -1
while pc < end: # pylint: disable=too-many-nested-blocks
start = pc
if is_py3:
instruction = ofs_table[pc]
op = instruction.opcode
else:
op = ord(code[pc])
if debug:
print('-->' if pc == last_pc else ' ', end=' ')
print(repr(pc).rjust(4), end=' ')
print(dis.opname[op].ljust(20), end=' ')
pc += inst_size
if op >= dis.HAVE_ARGUMENT:
if is_py3:
arg = instruction.arg
else:
arg = ord(code[pc]) + ord(code[pc + 1]) * 256 + extended_arg
extended_arg = 0
pc += opt_arg_size
if op == dis.EXTENDED_ARG:
extended_arg = arg * 65536
if debug:
print(str(arg).rjust(5), end=' ')
if op in dis.hasconst:
print('(' + repr(co.co_consts[arg]) + ')', end=' ')
elif op in dis.hasname:
print('(' + co.co_names[arg] + ')', end=' ')
elif op in dis.hasjrel:
print('(to ' + repr(pc + arg) + ')', end=' ')
elif op in dis.haslocal:
print('(' + co.co_varnames[arg] + ')', end=' ')
elif op in dis.hascompare:
print('(' + dis.cmp_op[arg] + ')', end=' ')
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[arg] + ')', end=' ')
# Actually emulate the op.
if state is None and states[start] is None:
# No control reaches here (yet).
if debug:
print()
continue
state |= states[start]
opname = dis.opname[op]
jmp = jmp_state = None
if opname.startswith('CALL_FUNCTION'):
if sys.version_info < (3, 6):
# Each keyword takes up two arguments on the stack (name and value).
standard_args = (arg & 0xFF) + 2 * (arg >> 8)
var_args = 'VAR' in opname
kw_args = 'KW' in opname
pop_count = standard_args + var_args + kw_args + 1
if depth <= 0:
return_type = Any
elif arg >> 8:
# TODO(robertwb): Handle this case.
return_type = Any
elif isinstance(state.stack[-pop_count], Const):
# TODO(robertwb): Handle this better.
if var_args or kw_args:
state.stack[-1] = Any
state.stack[-var_args - kw_args] = Any
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = Any
state.stack[-pop_count:] = [return_type]
else: # Python 3.6+
if opname == 'CALL_FUNCTION':
pop_count = arg + 1
if depth <= 0:
return_type = Any
else:
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
elif opname == 'CALL_FUNCTION_KW':
# TODO(udim): Handle keyword arguments. Requires passing them by name
# to infer_return_type.
pop_count = arg + 2
return_type = Any
elif opname == 'CALL_FUNCTION_EX':
# stack[-has_kwargs]: Map of keyword args.
# stack[-1 - has_kwargs]: Iterable of positional args.
# stack[-2 - has_kwargs]: Function to call.
has_kwargs = arg & 1 # type: int
pop_count = has_kwargs + 2
if has_kwargs:
# TODO(udim): Unimplemented. Requires same functionality as a
# CALL_FUNCTION_KW implementation.
return_type = Any
else:
args = state.stack[-1]
_callable = state.stack[-2]
if isinstance(args, typehints.ListConstraint):
# Case where there's a single var_arg argument.
args = [args]
elif isinstance(args, typehints.TupleConstraint):
args = list(args._inner_types())
return_type = infer_return_type(_callable.value,
args,
debug=debug,
depth=depth - 1)
else:
raise TypeInferenceError('unable to handle %s' % opname)
state.stack[-pop_count:] = [return_type]
elif opname == 'CALL_METHOD':
pop_count = 1 + arg
# LOAD_METHOD will return a non-Const (Any) if loading from an Any.
if isinstance(state.stack[-pop_count], Const) and depth > 0:
return_type = infer_return_type(state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = typehints.Any
state.stack[-pop_count:] = [return_type]
elif opname in simple_ops:
if debug:
print("Executing simple op " + opname)
simple_ops[opname](state, arg)
elif opname == 'RETURN_VALUE':
returns.add(state.stack[-1])
state = None
elif opname == 'YIELD_VALUE':
yields.add(state.stack[-1])
elif opname == 'JUMP_FORWARD':
jmp = pc + arg
jmp_state = state
state = None
elif opname == 'JUMP_ABSOLUTE':
jmp = arg
jmp_state = state
state = None
elif opname in ('POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE'):
state.stack.pop()
jmp = arg
jmp_state = state.copy()
elif opname in ('JUMP_IF_TRUE_OR_POP', 'JUMP_IF_FALSE_OR_POP'):
jmp = arg
jmp_state = state.copy()
state.stack.pop()
elif opname == 'FOR_ITER':
jmp = pc + arg
jmp_state = state.copy()
jmp_state.stack.pop()
state.stack.append(element_type(state.stack[-1]))
else:
raise TypeInferenceError('unable to handle %s' % opname)
if jmp is not None:
# TODO(robertwb): Is this guaranteed to converge?
new_state = states[jmp] | jmp_state
if jmp < pc and new_state != states[jmp] and jumps[pc] < 5:
jumps[pc] += 1
pc = jmp
states[jmp] = new_state
if debug:
print()
print(state)
pprint.pprint(dict(item for item in states.items() if item[1]))
if yields:
result = typehints.Iterable[reduce(union, Const.unwrap_all(yields))]
else:
result = reduce(union, Const.unwrap_all(returns))
finalize_hints(result)
if debug:
print(f, id(f), input_types, '->', result)
return result
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/typehints/trivial_inference.py
|
Python
|
apache-2.0
| 17,088
|
[
"VisIt"
] |
7a35f9adf1f0a555cba001a2b1764811df10967344dd0141fe9101d121f227a8
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.357866
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/bouqueteditor/web/getprotectionsettings.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getprotectionsettings(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getprotectionsettings, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_22082758 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2protection>
\t<e2serviceprotection>
\t\t<e2serviceprotectionconfigured>''')
_v = VFFSL(SL,"ps.Configured",True) # u'$ps.Configured' on line 5, col 34
if _v is not None: write(_filter(_v, rawExpr=u'$ps.Configured')) # from line 5, col 34.
write(u'''</e2serviceprotectionconfigured>
\t\t<e2serviceprotectiontype>''')
_v = VFFSL(SL,"ps.Type",True) # u'$ps.Type' on line 6, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$ps.Type')) # from line 6, col 28.
write(u'''</e2serviceprotectiontype>
\t\t<e2serviceprotectionsetuppinactive>''')
_v = VFFSL(SL,"ps.SetupPinActive",True) # u'$ps.SetupPinActive' on line 7, col 38
if _v is not None: write(_filter(_v, rawExpr=u'$ps.SetupPinActive')) # from line 7, col 38.
write(u'''</e2serviceprotectionsetuppinactive>
\t\t<e2serviceprotectionsetuppin>''')
_v = VFFSL(SL,"ps.SetupPin",True) # u'$ps.SetupPin' on line 8, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$ps.SetupPin')) # from line 8, col 32.
write(u'''</e2serviceprotectionsetuppin>
\t</e2serviceprotection>
</e2protection>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_22082758
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getprotectionsettings= 'respond'
## END CLASS DEFINITION
if not hasattr(getprotectionsettings, '_initCheetahAttributes'):
templateAPIClass = getattr(getprotectionsettings, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getprotectionsettings)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getprotectionsettings()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/bouqueteditor/web/getprotectionsettings.py
|
Python
|
gpl-2.0
| 6,010
|
[
"VisIt"
] |
7c259a82d047c4a033dd3128942844368f46df2e5da14cde496c3bcec68b6324
|
# coding: utf-8
# ### Operations on PLANETOPLOT requests: differences, ratios, etc...
#
# *Author: [Aymeric SPIGA](http://www.lmd.jussieu.fr/~aslmd)*
# In[45]:
# This line configures matplotlib to show figures embedded in the notebook.
get_ipython().magic(u'matplotlib inline')
# Operations on PLANETOPLOT objects `pp()` make it easy to show the difference or ratio between two PLANETOPLOT requests: this can be difference between two simulations, ratio between two variables, etc... The five operations -- plus minus multiply divide power -- are coded in the `pp()` class. Below we give several examples
#
# This is done by setting up a `python` script or using `ipython` for interactive use. First import `pp()` class.
# In[46]:
from ppclass import pp
# Now perform two distinct requests and apply the `get()` method to load data from netCDF file(s). NB: we use the same data file as in the main tutorial.
# In[47]:
req1 = pp(file="diagfired.nc",var="tsurf",t=0.7).get()
req2 = pp(file="diagfired.nc",var="tsurf",t=0.9).get()
# Now create a new `pp()` object containing the difference between the two requests.
# In[48]:
diff = req2-req1
# It is then easy to plot the difference between the two requested fields! Simply call the `plot()` method for the `diff` object.
# In[49]:
diff.plot()
# Operations with actual numbers are also supported. For instance, show surface temperature in degrees Celsius instead of Kelvin.
#
# Note that the computed object (here, `cels`) gets all his attributes from the original `req2` object. Before the plotting command, you might want to provide settings more suitable for `ratio`. For instance here we change units.
# In[54]:
cels = -273.15 + req1
cels.units = "$^{\circ}C$"
cels.plot()
# Computing a ratio is not the least difficult than computing additions and differences. In the example below, we changed the formatting of values to get 2 decimals since the `fmt` attribute imported from the `req2` object is not suitable (float with no decimal).
# In[44]:
ratio = req2/req1
ratio.fmt = "%.2f"
ratio.plot()
# One last example, suppose you need to plot horizontal wind modulus at a given altitude
# $$ U = \sqrt{u^2+v^2} $$
# In[30]:
u = pp(file="diagfired.nc",var="u",t=0.7,z=120.).get()
v = pp(file="diagfired.nc",var="v",t=0.7,z=120.).get()
wind = (u**2 + v**2)**0.5
wind.proj = "moll"
wind.plot()
|
aymeric-spiga/planetoplot
|
tutorial/operations.py
|
Python
|
gpl-2.0
| 2,378
|
[
"NetCDF"
] |
bc7b90b3127dcded5f7abfbe8a1af5f07d6f15d2da0c4649b5693acb39225f3b
|
#!/usr/bin/python
#1/usr/bin/env python
# __*__ coding: utf8 __*__
oneline = "Read different types of models"
import os
#=======================================================================
def dump2crd(file_in,file_out):
f=open(file_in,'r')
while f.readline().count('TIMESTEP')==0:
pass
ln=[i.strip() for i in f.readlines()]
l=[i.split() for i in ln]
f.close()
ntypes=max(int(i[1]) for i in l[8:])
dat = 'LAMMPS Description. time=0 \n \n'
dat += '%11s atoms \n' %ln[2]
dat +='%11i atom types \n \n' % ntypes
dat +='%s xlo xhi \n' %ln[4]
dat +='%s ylo yhi \n' %ln[5]
dat +='%s zlo zhi \n \n' %ln[6]
dat +='Atoms \n \n'
for i in l[8:]:
dat+='%s %s %s %s %s \n' % tuple(i[0:5])
dat +='\n Velocities \n \n'
for i in l[8:]:
dat+='%s %s %s %s \n' % (i[0],i[5],i[6],i[7])
f = open(file_out,"w")
f.write(dat)
f.close()
return
#=========================================================================
def dump2pdb(file_in,file_out):
f=open(file_in,'r')
while f.readline().count('TIMESTEP')==0:
pass
ln=[i.strip() for i in f.readlines()]
l=[i.split() for i in ln]
f.close()
dat ="""REMARK atoms as in
REMARK
REMARK< i5>< > X < > <x f8.3><y f8.3><z f8.3><f6.2><f6.2> \n"""
s= 'ATOM %5i IS %c %4i %8.3f%8.3f%8.3f%6.2f%6.2f \n'
j=33
for i in l[8:]:
j += 1
if j==240: j=33
dat+= s%(int(i[0]),chr(j),int(i[1]),float(i[2]),float(i[3]),float(i[4]),0.0,float(i[1]))
f = open(file_out,"w")
f.write(dat)
f.close()
return
#=========================================================================
def gen_find(fileparts,top=None):
'Iterate on the files found by a list of file parts, (c) A.Vorontsov'
import fnmatch
if top == None: top=os.getcwd()
for path, dirlist, filelist in os.walk(top):
for filepart in fileparts:
for name in fnmatch.filter(filelist,filepart):
yield os.path.join(path,name)
def gen_open(filenames): # open files with different types
'Iterate on the file descriptors of different types, (c) A.Vorontsov'
import gzip, bz2
for name in filenames:
if not os.path.isfile(name):
yield os.popen(name,'r')
elif name.endswith(".gz"):
yield gzip.open(name,'r')
elif name.endswith(".bz2"):
yield bz2.BZ2File(name,'r')
else:
yield open(name,'r')
def gen_cat(sources): # make union stream
'Concatenate file contents, make a whole list of strings, (c) A.Vorontsov'
for s in sources:
for item in s:
yield item
def gen_blocks(lines,separator='TIMESTEP'): # cut file with separator
'Cut file contents in blocks with separator, (c) A.Vorontsov'
dat=[]
istep = -1
for line in lines:
if line.count(separator)<>0:
istep += 1
if dat<>[]:
yield istep, dat
dat=[]
if line<>'': dat+=[line]
# if dat<>[]:
# yield istep, dat
def time_filter(gn_block,time=None): # filter of time
''' Filters blocks generator by time steps
'''
for istep, b in gn_block:
if time is None:
yield istep, b
continue
if istep in time:
yield istep, b
#-----------------------------------------------------------------
def format_default(s):
dd='x f,y f,z f,id i,type s,vx f,vy f,vz f,kl i,nkl i,nel i'.split(',')
ddd= dict([i.split() for i in dd])
return [ddd.get(i,'s') for i in s]
#---------------------------------------------------------------
def format_data(dl,format='i s f f f f f f'.split()):
def intt(x): return int(float(x))
d={'i': intt,'f': float,'s': str}
for li in dl:
ll=[i for i in li.split() if i not in '(){}[]']
yield [d[j](i) for i,j in zip(ll,format)]
#-----------------------------------------------------
def sst_ani_block(file):
dat=[]
for i in file:
if len(i.split())==1:
if dat<>[]:
yield dat
dat=[]
if i<>'': dat+=[i]
if dat<>[]:
yield dat
#-------------------------------------------------
def sst_ani(file):
istep = -1
for i in sst_ani_block(file):
istep += 1
nat = int(i[0])
yield istep, i[2:]
#======================================================
class dump_shs():
''' Class making dump of SHS Geometry and Evolution instances
'''
def __init__(self):
pass
def shs_geom(self, geom):
'Make dump out of Geom class instance'
ddd = {'atoms' : geom.atoms,
'box' : geom.vc}
return ddd
def shs_evol(self, evol):
'''Make dump out of Evol class instance
Returns list of dictionaries
'''
dddlist = []
for es in evol.geom:
dddlist.append(self.shs_geom(es))
return dddlist
#======================================================
class dump_sst:
def __init__(self, files=[], legend='type x y z'):
if not hasattr(files,'__iter__'):
files = [files]
self.files = files
self.legend = legend
self.format = ''
self.vc = [0,0,0]
def sst_xv(self,file):
it = open(file)
a = [float(k)*0.529 for k in it.next().split()][:3]
b = [float(k)*0.529 for k in it.next().split()][:3]
c = [float(k)*0.529 for k in it.next().split()][:3]
self.vc = [a, b, c]
return {'box': self.vc}
def sst_sph(self,file,leg_st='id type x y z rad charge spin'):
it=open(file)
self.data=it
def sst_ani(self, anif, xvf, step = None, leg_st='type x y z'):
ofiles=gen_open([anif])
f = gen_cat(ofiles)
itt = (i for istep, i in sst_ani(f) if istep == step)
box = self.sst_xv(xvf)
for it in itt:
leg_list=leg_st.split()
ft_list=format_default(leg_list)
atoms=[i for i in format_data(it,ft_list)]
ll=len(atoms[0])
ddd = {'atoms':atoms,'legend':leg_list[:ll],'format':ft_list[:ll],'time':step}
ddd.update(box)
return ddd
# TO DO: Dump from out files
def sst_out(self, outf, step = None, leg_st='type x y z'):
ofiles=gen_open([outf])
f = gen_cat(ofiles)
itt = (i for istep, i in sst_ani(f) if istep == step)
box = self.sst_xv(outf)
for it in itt:
leg_list=leg_st.split()
ft_list=format_default(leg_list)
atoms=[i for i in format_data(it,ft_list)]
ll=len(atoms[0])
ddd = {'atoms':atoms,'legend':leg_list[:ll],'format':ft_list[:ll],'time':step}
ddd.update(box)
return ddd
def __iter__(self):
ddd = {'format':self.format}
for file in self.files:
it=open(file)
n=int(it.next())
a=[float(k) for k in it.next().split()]
b=[float(k) for k in it.next().split()]
c=[float(k) for k in it.next().split()]
leg_list=self.legend.split()
ft_list=format_default(leg_list)
#{'atoms':atoms,'legend':leg_list[:ll],'format':ft_list[:ll],'box':[a,b,c]}
yield ddd
|
ansobolev/shs
|
shs/voronoi/dump.py
|
Python
|
mit
| 7,357
|
[
"LAMMPS"
] |
776aec146f250e53cc74f0883dc376ca247f4bfa1f01bc112aeecb7a6fc77b33
|
# -*- coding: utf-8 -*-
"""
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format
"""
from distutils.version import LooseVersion
import os
from docutils import nodes
from docutils.parsers.rst import directives, Directive, roles
from docutils.parsers.rst.directives import images
from docutils.parsers.rst.roles import set_classes
import sphinx
try:
from sphinx.builders.html import JSONHTMLBuilder
except ImportError:
from sphinxcontrib.serializinghtml import JSONHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.locale import _
from sphinx.writers.html import HTMLTranslator
def true_false(argument):
return directives.choice(argument, ('true', 'false'))
def static_dynamic(argument):
return directives.choice(argument, ('static', 'dynamic'))
class TitlesCache(object):
titles = {}
@staticmethod
def _document_key(document):
return hash(document)
@classmethod
def set_title(cls, document, title):
cls.titles[cls._document_key(document)] = title
@classmethod
def get_title(cls, document):
return cls.titles.get(cls._document_key(document), None)
@classmethod
def has_title(cls, document):
return cls._document_key(document) in cls.titles
class JSONConfluenceBuilder(JSONHTMLBuilder):
"""For backward compatibility"""
name = 'json_conf'
def __init__(self, app):
super(JSONConfluenceBuilder, self).__init__(app)
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
self.translator_class = HTMLConfluenceTranslator
self.warn('json_conf builder is deprecated and will be removed in future releases')
class HTMLConfluenceTranslator(HTMLTranslator):
def unimplemented_visit(self, node):
self.builder.warn('Unimplemented visit is not implemented for node: {}'.format(node))
def unknown_visit(self, node):
self.builder.warn('Unknown visit is not implemented for node: {}'.format(node))
def visit_admonition(self, node, name=''):
"""
Info, Tip, Note, and Warning Macros
https://confluence.atlassian.com/conf58/info-tip-note-and-warning-macros-771892344.html
<ac:structured-macro ac:name="info">
<ac:parameter ac:name="icon">false</ac:parameter>
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:rich-text-body>
<p>
This is important information.
</p>
</ac:rich-text-body>
</ac:structured-macro>
"""
confluence_admonition_map = {
'note': 'info',
'warning': 'note',
'attention': 'note',
'hint': 'tip',
'tip': 'tip',
'important': 'warning',
'error': 'warning',
'danger': 'warning',
}
admonition_type = confluence_admonition_map.get(name, 'info')
macro = """\
<ac:structured-macro ac:name="{admonition_type}">
<ac:parameter ac:name="icon">true</ac:parameter>
<ac:parameter ac:name="title"></ac:parameter>
<ac:rich-text-body>
"""
self.body.append(macro.format(admonition_type=admonition_type))
def depart_admonition(self, node=None):
macro = """
</ac:rich-text-body>
</ac:structured-macro>\n
"""
self.body.append(macro)
def imgtag(self, filename, suffix='\n', **attributes):
"""
Attached image
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Images
<ac:image>
<ri:attachment ri:filename="atlassian_logo.gif" />
</ac:image>
Supported image attributes (some of these attributes mirror the equivalent HTML 4 IMG element):
Name Description
---- -----------
ac:align image alignment
ac:border Set to "true" to set a border
ac:class css class attribute.
ac:title image tool tip.
ac:style css style
ac:thumbnail Set to "true" to designate this image as a thumbnail.
ac:alt alt text
ac:height image height
ac:width image width
"""
prefix = []
atts = {}
for (name, value) in attributes.items():
atts[name.lower()] = value
attlist = atts.items()
attlist = sorted(attlist)
parts = []
src_part = '<ri:attachment ri:filename="%s" />' % filename
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
value = u' '.join(map(unicode, value))
else:
# First assume Python 2
try:
value = unicode(value)
# Otherwise, do it the Python 3 way
except NameError:
value = str(value)
parts.append('ac:%s="%s"' % (name.lower(), self.attval(value)))
infix = '</ac:image>'
return ''.join(prefix) + '<ac:image %s>%s%s' % (' '.join(parts), src_part, infix) + suffix
def visit_image(self, node):
atts = {}
uri = node['uri']
filename = os.path.basename(uri)
atts['alt'] = node.get('alt', uri)
atts['thumbnail'] = 'true'
if 'width' in node:
atts['width'] = node['width']
if 'name' in node:
atts['title'] = node['name']
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
self.context.append('')
self.body.append(self.imgtag(filename, suffix, **atts))
def visit_title(self, node):
if isinstance(node.parent, nodes.section) and not TitlesCache.has_title(self.document):
h_level = self.section_level + self.initial_header_level - 1
if h_level == 1:
# Confluence take first title for page title from rst
# It use for making internal links
TitlesCache.set_title(self.document, node.children[0])
# ignore first header; document must have title header
raise nodes.SkipNode
HTMLTranslator.visit_title(self, node)
def visit_target(self, node):
"""
Anchor Macro
https://confluence.atlassian.com/display/DOC/Anchor+Macro
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">here</ac:parameter>
</ac:structured-macro>
"""
# Anchor confluence macros
anchor_macros = """
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">%s</ac:parameter>
</ac:structured-macro>
"""
if 'refid' in node or 'refname' in node:
if 'refuri' in node:
link = node['refuri']
elif 'refid' in node:
link = node['refid']
else:
link = node['refname']
self.body.append(anchor_macros % link)
def depart_target(self, node):
pass
def visit_literal_block(self, node):
"""
Code Block Macro
https://confluence.atlassian.com/display/DOC/Code+Block+Macro
<ac:structured-macro ac:name="code">
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:parameter ac:name="theme">FadeToGrey</ac:parameter>
<ac:parameter ac:name="linenumbers">true</ac:parameter>
<ac:parameter ac:name="language">xml</ac:parameter>
<ac:parameter ac:name="firstline">0001</ac:parameter>
<ac:parameter ac:name="collapse">true</ac:parameter>
<ac:plain-text-body><![CDATA[<b>This is my code</b>]]></ac:plain-text-body>
</ac:structured-macro>
"""
parts = ['<ac:structured-macro ac:name="code">']
if 'language' in node:
# Collapsible argument
if node['language'] == 'collapse':
parts.append('<ac:parameter ac:name="collapse">true</ac:parameter>')
valid = ['actionscript3', 'bash', 'csharp', 'coldfusion', 'cpp', 'css', 'delphi', 'diff', 'erlang',
'groovy', 'html/xml', 'java', 'javafx', 'javascript', 'none', 'perl', 'php', 'powershell',
'python', 'ruby', 'scala', 'sql', 'vb']
if node['language'] not in valid:
node['language'] = 'none'
parts.append('<ac:parameter ac:name="language">%s</ac:parameter>' % node['language'])
if 'linenos' in node and node['linenos']:
parts.append('<ac:parameter ac:name="linenumbers">true</ac:parameter>')
if 'caption' in node and node['caption']:
parts.append('<ac:parameter ac:name="title">%s</ac:parameter>' % node['caption'])
parts.append('<ac:plain-text-body><![CDATA[%s]]></ac:plain-text-body>' % node.rawsource)
parts.append('</ac:structured-macro>')
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_download_reference(self, node):
"""
Link to an attachment
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Links
<ac:link>
<ri:attachment ri:filename="atlassian_logo.gif" />
<ac:plain-text-link-body><![CDATA[Link to a Confluence Attachment]]></ac:plain-text-link-body>
</ac:link>
"""
if 'filename' not in node:
self.context.append('')
return
text = None
if len(node.children) > 0 and len(node.children[0].children) > 0:
text = node.children[0].children[0]
parts = [
'<ac:link>',
'<ri:attachment ri:filename="%s" />' % node['filename'],
'<ac:plain-text-link-body>',
'<![CDATA[%s]]>' % text if text else '',
'</ac:plain-text-link-body>',
'</ac:link>',
]
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_section(self, node):
# removed section open tag
self.section_level += 1
def depart_section(self, node):
# removed section close tag
self.section_level -= 1
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refuri']
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
else:
assert 'refid' in node, 'References must have "refuri" or "refid" attribute.'
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def visit_desc(self, node):
""" Replace <dl> """
self.body.append(self.starttag(node, 'div', style="margin-top: 10px"))
def depart_desc(self, node):
self.body.append('</div>\n\n')
def visit_desc_signature(self, node):
""" Replace <dt> """
# the id is set automatically
self.body.append(self.starttag(
node, 'div', style='margin-left: 20px; font-weight: bold;'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
""" Copy-paste from original method """
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</div>')
def visit_desc_content(self, node):
""" Replace <dd> """
self.body.append(self.starttag(
node, 'div', '', style='margin-left: 40px;'))
def depart_desc_content(self, node):
self.body.append('</div>')
def visit_table(self, node):
""" Fix ugly table border
"""
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="0"))
def write_colspecs(self):
""" Fix ugly column width
"""
pass
class ImageConf(images.Image):
"""
Image confluence directive
"""
def run(self):
# remove 'align' processing
# remove 'target' processing
self.options.pop('align', None)
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
return [image_node]
class TocTree(Directive):
"""
Replace sphinx "toctree" directive to confluence macro
Table of Contents Macro
https://confluence.atlassian.com/display/DOC/Table+of+Contents+Macro
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxdepth': int,
'name': directives.unchanged,
'caption': directives.unchanged_required,
'glob': directives.flag,
'hidden': directives.flag,
'includehidden': directives.flag,
'titlesonly': directives.flag,
}
def run(self):
macro = """
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>\n
"""
attributes = {'format': 'html'}
raw_node = nodes.raw('', macro, **attributes)
return [raw_node]
class JiraIssuesDirective(Directive):
"""
JIRA Issues Macro
https://confluence.atlassian.com/doc/jira-issues-macro-139380.html
<ac:structured-macro ac:name="jira" ac:schema-version="1" ac:macro-id="da6b6413-0b93-4052-af90-dbb252175860">
<ac:parameter ac:name="server">Atlassian JIRA (JAC)</ac:parameter>
<ac:parameter ac:name="columns">key,summary,created</ac:parameter>
<ac:parameter ac:name="maximumIssues">20</ac:parameter>
<ac:parameter ac:name="jqlQuery">project = CONF AND FixVersion=5.8 </ac:parameter>
<ac:parameter ac:name="serverId">146780e9-1234-312f-1243-ed0555666fa</ac:parameter>
</ac:structured-macro>
"""
required_arguments = 1
has_content = False
final_argument_whitespace = True
option_spec = {
"anonymous": true_false,
"server_id": directives.unchanged,
"baseurl": directives.unchanged,
"columns": directives.unchanged,
"count": true_false,
"height": directives.positive_int,
"title": directives.unchanged,
"render_mode": static_dynamic,
"url": directives.unchanged,
"width": directives.unchanged,
"maximum_issues": directives.positive_int
}
def run(self):
result = ['<ac:structured-macro ac:name="jira" ac:schema-version="1">']
param_macro = '<ac:parameter ac:name="{name}">{value}</ac:parameter>'
for name, value in self.options.items():
result.append(param_macro.format(name=underscore_to_camelcase(name), value=value))
jql_query = self.arguments[0]
result.append(param_macro.format(name='jqlQuery', value=jql_query))
result.append('</ac:structured-macro>')
attributes = {'format': 'html'}
raw_node = nodes.raw('', '\n'.join(result), **attributes)
return [raw_node]
class JiraIssueRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:structured-macro ac:name="jira" ac:schema-version="1">
<ac:parameter ac:name="key">{key}</ac:parameter>
<ac:parameter ac:name="showSummary">false</ac:parameter>
</ac:structured-macro>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(key=text), **attributes)], []
class JiraUserRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:link>
<ri:user ri:username="{username}"/>
</ac:link>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(username=text), **attributes)], []
class CaptionedCodeBlock(CodeBlock):
def run(self):
ret = super(CaptionedCodeBlock, self).run()
caption = self.options.get('caption')
if caption and isinstance(ret[0], nodes.container):
container_node = ret[0]
if isinstance(container_node[0], nodes.caption):
container_node[1]['caption'] = caption
return [container_node[1]]
return ret
def underscore_to_camelcase(text):
return ''.join(word.title() if i else word for i, word in enumerate(text.split('_')))
def get_path():
from os import path
package_dir = path.abspath(path.dirname(__file__))
template_path = path.join(package_dir, 'themes')
return template_path
def setup(app):
"""
:type app: sphinx.application.Sphinx
"""
app.config.html_theme_path = [get_path()]
app.config.html_theme = 'confluence'
app.config.html_scaled_image_link = False
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
app.set_translator("html", HTMLConfluenceTranslator)
app.set_translator("json", HTMLConfluenceTranslator)
else:
app.config.html_translator_class = 'sphinx_confluence.HTMLConfluenceTranslator'
app.config.html_add_permalinks = ''
jira_issue = JiraIssueRole('jira_issue', nodes.Inline)
app.add_role(jira_issue.name, jira_issue)
jira_user = JiraUserRole('jira_user', nodes.Inline)
app.add_role(jira_user.name, jira_user)
app.add_directive('image', ImageConf)
app.add_directive('toctree', TocTree)
app.add_directive('jira_issues', JiraIssuesDirective)
app.add_directive('code-block', CaptionedCodeBlock)
app.add_builder(JSONConfluenceBuilder)
|
Arello-Mobile/sphinx-confluence
|
sphinx_confluence/__init__.py
|
Python
|
mit
| 20,132
|
[
"VisIt"
] |
3cd69292de067ecb4f58605b38360c1313cedca376558847edddcfc997732250
|
# Copyright 2015-present Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import copy
import urlparse
import uuid
import os.path
from datetime import datetime, timedelta
import pytz
import lz4.frame
import lxml.etree
import yaml
import redis
import gevent
import gevent.event
import netaddr
import werkzeug.urls
from six import string_types
import libtaxii
import libtaxii.clients
import libtaxii.messages_11
from libtaxii.constants import MSG_STATUS_MESSAGE, ST_SUCCESS
import stix.core.stix_package
import stix.core.stix_header
import stix.indicator
import stix.common.vocabs
import stix.common.information_source
import stix.common.identity
import stix.extensions.marking.ais
import stix.data_marking
import stix.extensions.marking.tlp
import stix_edh
import cybox.core
import cybox.objects.address_object
import cybox.objects.domain_name_object
import cybox.objects.uri_object
import cybox.objects.file_object
import mixbox.idgen
import mixbox.namespaces
from . import basepoller
from . import base
from . import actorbase
from .utils import dt_to_millisec, interval_in_sec, utc_millisec
# stix_edh is imported to register the EDH data marking extensions, but it is not directly used.
# Delete the symbol to silence the warning about the import being unnecessary and prevent the
# PyCharm 'Optimize Imports' operation from removing the import.
del stix_edh
LOG = logging.getLogger(__name__)
_STIX_MINEMELD_HASHES = [
'ssdeep',
'md5',
'sha1',
'sha256',
'sha512'
]
def set_id_namespace(uri, name):
# maec and cybox
NS = mixbox.namespaces.Namespace(uri, name)
mixbox.idgen.set_id_namespace(NS)
class TaxiiClient(basepoller.BasePollerFT):
def __init__(self, name, chassis, config):
self.poll_service = None
self.collection_mgmt_service = None
self.last_taxii_run = None
self.last_stix_package_ts = None
super(TaxiiClient, self).__init__(name, chassis, config)
def configure(self):
super(TaxiiClient, self).configure()
self.initial_interval = self.config.get('initial_interval', '1d')
self.initial_interval = interval_in_sec(self.initial_interval)
if self.initial_interval is None:
LOG.error(
'%s - wrong initial_interval format: %s',
self.name, self.initial_interval
)
self.initial_interval = 86400
self.max_poll_dt = self.config.get(
'max_poll_dt',
86400
)
# options for processing
self.ip_version_auto_detect = self.config.get('ip_version_auto_detect', True)
self.ignore_composition_operator = self.config.get('ignore_composition_operator', False)
self.create_fake_indicator = self.config.get('create_fake_indicator', False)
self.hash_priority = self.config.get('hash_priority', _STIX_MINEMELD_HASHES)
self.lower_timestamp_precision = self.config.get('lower_timestamp_precision', False)
self.discovery_service = self.config.get('discovery_service', None)
self.collection = self.config.get('collection', None)
# option for enabling client authentication
self.client_credentials_required = self.config.get(
'client_credentials_required',
True
)
self.username = self.config.get('username', None)
self.password = self.config.get('password', None)
if self.username is not None or self.password is not None:
self.client_credentials_required = False
# option for enabling client cert, default disabled
self.client_cert_required = self.config.get('client_cert_required', False)
self.key_file = self.config.get('key_file', None)
if self.key_file is None and self.client_cert_required:
self.key_file = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s.pem' % self.name
)
self.cert_file = self.config.get('cert_file', None)
if self.cert_file is None and self.client_cert_required:
self.cert_file = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s.crt' % self.name
)
self.subscription_id = None
self.subscription_id_required = self.config.get('subscription_id_required', False)
self.ca_file = self.config.get('ca_file', None)
if self.ca_file is None:
self.ca_file = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s-ca.crt' % self.name
)
self.side_config_path = self.config.get('side_config', None)
if self.side_config_path is None:
self.side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s_side_config.yml' % self.name
)
self.prefix = self.config.get('prefix', self.name)
self.confidence_map = self.config.get('confidence_map', {
'low': 40,
'medium': 60,
'high': 80
})
self._load_side_config()
def _load_side_config(self):
if not self.client_credentials_required and not self.subscription_id_required:
LOG.info('{} - side config not needed'.format(self.name))
return
try:
with open(self.side_config_path, 'r') as f:
sconfig = yaml.safe_load(f)
except Exception as e:
LOG.error('%s - Error loading side config: %s', self.name, str(e))
return
if self.client_credentials_required:
username = sconfig.get('username', None)
password = sconfig.get('password', None)
if username is not None and password is not None:
self.username = username
self.password = password
LOG.info('{} - Loaded credentials from side config'.format(self.name))
if self.subscription_id_required:
subscription_id = sconfig.get('subscription_id', None)
if subscription_id is not None:
self.subscription_id = subscription_id
LOG.info('{} - Loaded subscription id from side config'.format(self.name))
def _saved_state_restore(self, saved_state):
super(TaxiiClient, self)._saved_state_restore(saved_state)
self.last_taxii_run = saved_state.get('last_taxii_run', None)
LOG.info('last_taxii_run from sstate: %s', self.last_taxii_run)
def _saved_state_create(self):
sstate = super(TaxiiClient, self)._saved_state_create()
sstate['last_taxii_run'] = self.last_taxii_run
return sstate
def _saved_state_reset(self):
super(TaxiiClient, self)._saved_state_reset()
self.last_taxii_run = None
def _build_taxii_client(self):
result = libtaxii.clients.HttpClient()
up = urlparse.urlparse(self.discovery_service)
if up.scheme == 'https':
result.set_use_https(True)
if self.username and self.password:
if self.key_file and self.cert_file:
result.set_auth_type(
libtaxii.clients.HttpClient.AUTH_CERT_BASIC
)
result.set_auth_credentials({
'username': self.username,
'password': self.password,
'key_file': self.key_file,
'cert_file': self.cert_file
})
else:
result.set_auth_type(
libtaxii.clients.HttpClient.AUTH_BASIC
)
result.set_auth_credentials({
'username': self.username,
'password': self.password
})
else:
if self.key_file and self.cert_file:
result.set_auth_type(
libtaxii.clients.HttpClient.AUTH_CERT
)
result.set_auth_credentials({
'key_file': self.key_file,
'cert_file': self.cert_file
})
else:
result.set_auth_type(
libtaxii.clients.HttpClient.AUTH_NONE
)
if self.ca_file is not None and os.path.isfile(self.ca_file):
result.set_verify_server(
verify_server=True,
ca_file=self.ca_file
)
return result
def _call_taxii_service(self, service_url, tc, request):
up = urlparse.urlparse(service_url)
hostname = up.hostname
path = up.path
port = up.port
resp = tc.call_taxii_service2(
hostname,
path,
libtaxii.constants.VID_TAXII_XML_11,
request,
port=port
)
return resp
def _discover_services(self, tc):
msg_id = libtaxii.messages_11.generate_message_id()
request = libtaxii.messages_11.DiscoveryRequest(msg_id)
request = request.to_xml()
resp = self._call_taxii_service(self.discovery_service, tc, request)
tm = libtaxii.get_message_from_http_response(resp, msg_id)
LOG.debug('Discovery_Response {%s} %s',
type(tm), tm.to_xml(pretty_print=True))
if tm.message_type == MSG_STATUS_MESSAGE:
raise RuntimeError('{} - Error retrieving collections: {} - {}'.format(
self.name, tm.status_type, tm.message
))
self.collection_mgmt_service = None
for si in tm.service_instances:
if si.service_type != libtaxii.constants.SVC_COLLECTION_MANAGEMENT:
continue
self.collection_mgmt_service = si.service_address
break
if self.collection_mgmt_service is None:
raise RuntimeError('%s - collection management service not found' %
self.name)
def _check_collections(self, tc):
msg_id = libtaxii.messages_11.generate_message_id()
request = libtaxii.messages_11.CollectionInformationRequest(msg_id)
request = request.to_xml()
resp = self._call_taxii_service(self.collection_mgmt_service, tc, request)
tm = libtaxii.get_message_from_http_response(resp, msg_id)
LOG.debug('Collection_Information_Response {%s} %s',
type(tm), tm.to_xml(pretty_print=True))
if tm.message_type == MSG_STATUS_MESSAGE:
raise RuntimeError('{} - Error retrieving collections: {} - {}'.format(
self.name, tm.status_type, tm.message
))
tci = None
for ci in tm.collection_informations:
if ci.collection_name != self.collection:
continue
tci = ci
break
if tci is None:
raise RuntimeError('%s - collection %s not found' %
(self.name, self.collection))
if tci.polling_service_instances is None or \
len(tci.polling_service_instances) == 0:
raise RuntimeError('%s - collection %s doesn\'t support polling' %
(self.name, self.collection))
if tci.collection_type != libtaxii.constants.CT_DATA_FEED:
raise RuntimeError(
'%s - collection %s is not a data feed (%s)' %
(self.name, self.collection, tci.collection_type)
)
for pi in tci.polling_service_instances:
LOG.info('{} - message binding: {}'.format(
self.name, pi.poll_message_bindings
))
if pi.poll_message_bindings[0] == libtaxii.constants.VID_TAXII_XML_11:
self.poll_service = pi.poll_address
LOG.info('{} - poll service found'.format(self.name))
break
else:
raise RuntimeError(
'%s - collection %s does not support TAXII 1.1 message binding (%s)' %
(self.name, self.collection, tci.collection_type)
)
LOG.debug('%s - poll service: %s',
self.name, self.poll_service)
def _poll_fulfillment_request(self, tc, result_id, result_part_number):
msg_id = libtaxii.messages_11.generate_message_id()
request = libtaxii.messages_11.PollFulfillmentRequest(
message_id=msg_id,
result_id=result_id,
result_part_number=result_part_number,
collection_name=self.collection
)
request = request.to_xml()
resp = self._call_taxii_service(self.poll_service, tc, request)
return libtaxii.get_message_from_http_response(resp, msg_id)
def _poll_collection(self, tc, begin=None, end=None):
msg_id = libtaxii.messages_11.generate_message_id()
prargs = dict(
message_id=msg_id,
collection_name=self.collection,
exclusive_begin_timestamp_label=begin,
inclusive_end_timestamp_label=end,
)
if self.subscription_id_required:
prargs['subscription_id'] = self.subscription_id
else:
pps = libtaxii.messages_11.PollParameters(
response_type='FULL',
allow_asynch=False
)
prargs['poll_parameters'] = pps
request = libtaxii.messages_11.PollRequest(**prargs)
LOG.debug('%s - first poll request %s',
self.name, request.to_xml(pretty_print=True))
request = request.to_xml()
resp = self._call_taxii_service(self.poll_service, tc, request)
tm = libtaxii.get_message_from_http_response(resp, msg_id)
LOG.debug('%s - Poll_Response {%s} %s',
self.name, type(tm), tm.to_xml(pretty_print=True))
if tm.message_type == MSG_STATUS_MESSAGE:
if tm.status_type == ST_SUCCESS:
LOG.info('{} - TAXII Server returned success with no STIX packages'.format(
self.name
))
return []
raise RuntimeError('{} - Error polling: {} - {}'.format(
self.name, tm.status_type, tm.message
))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
self._handle_content_blocks(
tm.content_blocks,
stix_objects
)
while tm.more:
tm = self._poll_fulfillment_request(
tc,
result_id=tm.result_id,
result_part_number=tm.result_part_number+1
)
LOG.debug('{} - Poll_Response {!r}'.format(
self.name, tm.to_xml(pretty_print=True)
))
if tm.message_type == MSG_STATUS_MESSAGE:
if tm.status_type == ST_SUCCESS:
break
raise RuntimeError('{} - Error polling: {} - {}'.format(
self.name, tm.status_type, tm.message
))
self._handle_content_blocks(
tm.content_blocks,
stix_objects
)
LOG.debug('%s - stix_objects: %s', self.name, stix_objects)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
if len(stix_objects['indicators']) == 0 and len(stix_objects['observables']) != 0:
LOG.info('{} - TAXII Content contains observables but no indicators'.format(self.name))
if self.create_fake_indicator:
stix_objects['indicators']['minemeld:00000000-0000-0000-0000-000000000000'] = {
'observables': stix_objects['observables'].values(),
'ttps': []
}
return [[iid, iv, params]
for iid, iv in stix_objects['indicators'].iteritems()]
def _incremental_poll_collection(self, taxii_client, begin, end):
cbegin = begin
dt = timedelta(seconds=self.max_poll_dt)
self.last_stix_package_ts = None
while cbegin < end:
cend = min(end, cbegin+dt)
LOG.info('{} - polling {!r} to {!r}'.format(self.name, cbegin, cend))
result = self._poll_collection(
taxii_client,
begin=cbegin,
end=cend
)
for i in result:
yield i
if self.last_stix_package_ts is not None:
self.last_taxii_run = self.last_stix_package_ts
cbegin = cend
def _handle_content_blocks(self, content_blocks, objects):
try:
for cb in content_blocks:
if cb.content_binding.binding_id != \
libtaxii.constants.CB_STIX_XML_111:
LOG.error('%s - Unsupported content binding: %s',
self.name, cb.content_binding.binding_id)
continue
try:
stixpackage = stix.core.stix_package.STIXPackage.from_xml(
lxml.etree.fromstring(cb.content)
)
except Exception:
LOG.exception(
'%s - Exception parsing content block',
self.name
)
continue
if stixpackage.indicators:
for i in stixpackage.indicators:
ci = {}
if i.timestamp is not None:
ci = {
'timestamp': dt_to_millisec(i.timestamp),
}
if i.description is not None and i.description.structuring_format is None:
# copy description only if there is no markup to avoid side-effects
ci['description'] = i.description.value
if i.confidence is not None:
confidence = str(i.confidence.value).lower()
if confidence in self.confidence_map:
ci['confidence'] = \
self.confidence_map[confidence]
os = []
ttps = []
if i.observables:
for o in i.observables:
os.append(self._decode_observable(o))
if i.observable and len(os) == 0:
os.append(self._decode_observable(i.observable))
if i.indicated_ttps:
for t in i.indicated_ttps:
ttps.append(self._decode_ttp(t))
ci['observables'] = os
ci['ttps'] = ttps
objects['indicators'][i.id_] = ci
if stixpackage.observables:
for o in stixpackage.observables:
co = self._decode_observable(o)
objects['observables'][o.id_] = co
if stixpackage.ttps:
for t in stixpackage.ttps:
ct = self._decode_ttp(t)
objects['ttps'][t.id_] = ct
timestamp = stixpackage.timestamp
if isinstance(timestamp, datetime):
timestamp = dt_to_millisec(timestamp)
if self.last_stix_package_ts is None or timestamp > self.last_stix_package_ts:
LOG.debug('{} - last STIX package timestamp set to {!r}'.format(self.name, timestamp))
self.last_stix_package_ts = timestamp
except:
LOG.exception("%s - exception in _handle_content_blocks" %
self.name)
raise
def _decode_observable(self, o):
LOG.debug('observable: %s', o.to_dict())
if o.idref:
return {'idref': o.idref}
odict = o.to_dict()
result = {}
oc = odict.get('observable_composition', None)
if oc:
ocoperator = oc.get('operator', None)
if ocoperator != 'OR' and not self.ignore_composition_operator:
LOG.error(
'%s - Observable composition with %s not supported yet: %s',
self.name, ocoperator, odict
)
return None
result['type'] = '_cyboxOR'
result['observables'] = []
for nestedo in oc.get('observables', []):
if 'idref' not in nestedo:
LOG.error(
'%s - only Observable references are supported in Observable Composition: %s',
self.name, odict
)
return None
result['observables'].append(nestedo['idref'])
return result
oo = odict.get('object', None)
if oo is None:
LOG.error('%s - no object in observable', self.name)
return None
op = oo.get('properties', None)
if op is None:
LOG.error('%s - no properties in observable object', self.name)
return None
return self._decode_object_properties(op, odict=odict)
def _decode_object_properties(self, op, odict=None):
result = {}
ot = op.get('xsi:type', None)
if ot is None:
LOG.error('%s - no type in observable props', self.name)
return None
if ot == 'DomainNameObjectType':
result['type'] = 'domain'
ov = op.get('value', None)
if ov is None:
LOG.error('%s - no value in observable props', self.name)
return None
if not isinstance(ov, string_types):
ov = ov.get('value', None)
if ov is None:
LOG.error('%s - no value in observable value', self.name)
return None
elif ot == 'FileObjectType':
ov = ''
if 'file_name' in op.keys():
file_name = op.get('file_name')
if isinstance(file_name, dict):
ov = op['file_name'].get('value', None)
result['type'] = 'file.name'
else:
ov = op['file_name']
result['type'] = 'file.name'
hashes = op.get('hashes', [])
if not isinstance(hashes, list) or len(hashes) == 0:
LOG.error('{} - FileObjectType with unhandled structure: {!r}'.format(
self.name, op
))
return None
indicator_type = None
cprio = -1
indicator_hashes = {}
for h in hashes:
hvalue = h.get('simple_hash_value', None)
if hvalue is None:
continue
if not isinstance(hvalue, string_types):
if not isinstance(hvalue, dict):
continue
hvalue = hvalue.get('value', None)
if hvalue is None:
continue
htype = h.get('type', None)
if htype is None:
continue
elif isinstance(htype, string_types):
htype = htype.lower()
elif isinstance(htype, dict):
htype = htype.get('value', None)
if htype is None or not isinstance(htype, string_types):
continue
htype = htype.lower()
if htype not in self.hash_priority:
continue
prio = self.hash_priority.index(htype)
if prio > cprio:
indicator_type = htype
cprio = prio
indicator_hashes[htype] = hvalue
if indicator_type is None:
LOG.error('{} - No valid hash found in FileObjectType: {!r}'.format(
self.name, op
))
return None
if ov == '':
ov = indicator_hashes[indicator_type]
result['type'] = indicator_type
for h, v in indicator_hashes.iteritems():
if h == indicator_type:
continue
result['{}_{}'.format(self.prefix, h)] = v
elif ot == 'SocketAddressObjectType':
ip_address = op.get('ip_address', None)
if ip_address is None:
return None
return self._decode_object_properties(ip_address)
elif ot == 'AddressObjectType':
ov = op.get('address_value', None)
if ov is None:
LOG.error('%s - no value in observable props', self.name)
return None
if not isinstance(ov, string_types):
ov = ov.get('value', None)
if ov is None:
LOG.error('%s - no value in observable value', self.name)
return None
# set the IP Address type
if not self.ip_version_auto_detect:
addrcat = op.get('category', None)
if addrcat == 'ipv6-addr':
result['type'] = 'IPv6'
elif addrcat == 'ipv4-addr':
result['type'] = 'IPv4'
elif addrcat == 'e-mail':
result['type'] = 'email-addr'
else:
LOG.error('{} - unknown address category: {}'.format(self.name, addrcat))
return None
else:
# some feeds do not set the IP Address type and it
# defaults to ipv4-addr even if the IP is IPv6
# this is to auto detect the type
if type(ov) == list:
address = ov[0]
else:
address = ov
try:
parsed = netaddr.IPNetwork(address)
except (netaddr.AddrFormatError, ValueError):
LOG.error('{} - Unknown IP version: {}'.format(self.name, address))
return None
if parsed.version == 4:
result['type'] = 'IPv4'
elif parsed.version == 6:
result['type'] = 'IPv6'
if result['type'] in ['IPv4', 'IPv6']:
source = op.get('is_source', None)
if source is True:
result['direction'] = 'inbound'
elif source is False:
result['direction'] = 'outbound'
if 'type' not in result:
LOG.error('%s - no IP category and unknown version')
return None
elif ot == 'URIObjectType':
result['type'] = 'URL'
ov = op.get('value', None)
if ov is None:
LOG.error('%s - no value in observable props', self.name)
return None
if not isinstance(ov, string_types):
ov = ov.get('value', None)
if ov is None:
LOG.error('%s - no value in observable value', self.name)
return None
elif ot == 'LinkObjectType':
if op.get('type', 'URL') != 'URL':
LOG.error('{} - Unhandled LinkObjectType type: {!r}'.format(self.name, op))
return None
result['type'] = 'URL'
ov = op.get('value', None)
if ov is None:
LOG.error('%s - no value in observable props', self.name)
return None
if not isinstance(ov, string_types):
ov = ov.get('value', None)
if ov is None:
LOG.error('%s - no value in observable value', self.name)
return None
elif ot == 'EmailMessageObjectType':
result['type'] = 'email-message'
ov = ''
LOG.debug('EmailMessageObjectType OP: {!r}'.format(op))
body = op.get('raw_body', None)
if body is not None:
result['body'] = body
LOG.debug('EmailMessage Body: {!r}'.format(body))
header = op.get('header', None)
if header is not None:
result['header'] = header
try:
ov = header.get('from').get('address_value').get('value')
except Exception:
LOG.error('{} - no email address listed'.format(self.name))
subject = op.get('subject', None)
if subject is not None:
result['subject'] = subject
if ov == '':
ov = subject
elif ot == 'ArtifactObjectType':
ov = ''
result['type'] = 'artifact'
LOG.debug('ArtifactObjectType OV: {!r}'.format(ov))
title = odict.get('title', None)
if title is not None:
ov = title
result['title'] = title
description = odict.get('description', None)
if description is not None:
result['description'] = description
if ov == '':
ov = description
artifact = op['raw_artifact']
if artifact is not None:
result['artifact'] = artifact
elif ot == 'PDFFileObjectType':
ov = ''
result['type'] = 'pdf-file'
if 'file_name' in op.keys():
file_name = op.get('file_name')
if type(file_name) == dict:
if file_name.get('value', None) is not None:
ov = op['file_name'].get('value', None)
else:
ov = op['file_name']
else:
ov = file_name
LOG.debug('PDFObjectType OV: {!r}'.format(ov))
if 'file_path' in op.keys():
result['file_path'] = op['file_path'].get('value', None)
if 'file_size' in op.keys():
result['file_size'] = op['file_size'].get('value', None)
if 'metadata' in op.keys():
result['metadata'] = op['metadata']
if 'file_format' in op.keys():
result['file_format'] = op['file_format']
hashes = op.get('hashes', None)
if hashes is not None:
for i in hashes:
if 'type' in i.keys():
if isinstance(i['type'], string_types):
hash_type = i['type']
else:
hash_type = i['type'].get('value', None)
if 'simple_hash_value' in i.keys():
if isinstance(i['simple_hash_value'], string_types):
result[hash_type] = i['simple_hash_value']
else:
result[hash_type] = i['simple_hash_value'].get('value', None)
elif ot == 'WhoisObjectType':
ov = ''
result['type'] = 'whois'
LOG.debug('WhoisObjectType OV: {!r}'.format(ov))
remarks = op.get('remarks', None)
if remarks is not None:
result['remarks'] = op['remarks']
ov = remarks.split('\n')[0]
elif ot == 'HTTPSessionObjectType':
ov = ''
result['type'] = 'http-session'
if 'http_request_response' in op.keys():
tmp = op['http_request_response']
if len(tmp) == 1:
item = tmp[0]
LOG.debug('HTTPSessionObjectType item: {!r}'.format(item))
http_client_request = item.get('http_client_request', None)
if http_client_request is not None:
http_request_header = http_client_request.get('http_request_header', None)
if http_request_header is not None:
raw_header = http_request_header.get('raw_header', None)
if raw_header is not None:
result['header'] = raw_header
ov = raw_header.split('\n')[0]
else:
LOG.error('{} - multiple HTTPSessionObjectTypes not supported'.format(self.name))
elif ot == 'PortObjectType':
result['type'] = 'port'
LOG.debug('PortObjectType OP: {!r}'.format(op))
protocol = op.get('layer4_protocol', None)
port = op.get('port_value', None)
ov = '{}:{}'.format(protocol, port)
elif ot == 'WindowsExecutableFileObjectType':
ov = ''
result['type'] = 'windows-executable'
LOG.debug('WindowsExecutableFileObjectType OP: {!r}'.format(op))
if 'file_name' in op.keys():
if isinstance(op['file_name'], string_types):
ov = op['file_name']
else:
ov = op['file_name'].get('value', None)
if 'size_in_bytes' in op.keys():
result['file_size'] = op['size_in_bytes']
if 'file_format' in op.keys():
result['file_format'] = op['file_format']
hashes = op.get('hashes', None)
if hashes is not None:
for i in hashes:
if 'type' in i.keys():
if isinstance(i['type'], string_types):
hash_type = i['type']
else:
hash_type = i['type'].get('value', None)
if 'simple_hash_value' in i.keys():
if isinstance(i['simple_hash_value'], string_types):
result[hash_type] = i['simple_hash_value']
else:
result[hash_type] = i['simple_hash_value'].get('value', None)
elif ot == 'CISCP:IndicatorTypeVocab-0.0':
result['type'] = op['xsi:type']
LOG.debug('CISCP:IndicatorTypeVocab-0.0 OP: {!r}'.format(op))
ov = None
LOG.error('{} - CISCP:IndicatorTypeVocab-0.0 Type not currently supported'.format(self.name))
return None
elif ot == 'WindowsRegistryKeyObjectType':
result['type'] = op['xsi:type']
LOG.debug('WindowsRegistryKeyObjectType OP: {!r}'.format(op))
ov = None
LOG.error('{} - WindowsRegistryKeyObjectType Type not currently supported'.format(self.name))
return None
elif ot == 'stixVocabs:IndicatorTypeVocab-1.0':
result['type'] = op['xsi:type']
LOG.debug('stixVocabs:IndicatorTypeVocab-1.0 OP: {!r}'.format(op))
ov = None
LOG.error('{} - stixVocabs:IndicatorTypeVocab-1.0 Type not currently supported'.format(self.name))
return None
elif ot == 'NetworkConnectionObjectType':
result['type'] = 'NetworkConnection'
LOG.debug('NetworkConnectionObjectType OP: {!r}'.format(op))
ov = None
LOG.error('{} - NetworkConnectionObjectType Type not currently supported'.format(self.name))
return None
else:
LOG.error('{} - unknown type {} {!r}'.format(self.name, ot, op))
return None
result['indicator'] = ov
LOG.debug('{!r}'.format(result))
return result
def _decode_ttp(self, t):
tdict = t.to_dict()
if 'ttp' in tdict:
tdict = tdict['ttp']
if 'idref' in tdict:
return {'idref': tdict['idref']}
if 'description' in tdict:
return {'description': tdict['description']}
if 'title' in tdict:
return {'description': tdict['title']}
return {'description': ''}
def _process_item(self, item):
result = []
value = {}
iid, iv, stix_objects = item
value['%s_indicator' % self.prefix] = iid
if 'description' in iv:
value['{}_indicator_description'.format(self.prefix)] = iv['description']
if 'confidence' in iv:
value['confidence'] = iv['confidence']
if len(iv['ttps']) != 0:
ttp = iv['ttps'][0]
if 'idref' in ttp:
ttp = stix_objects['ttps'].get(ttp['idref'])
if ttp is not None and 'description' in ttp:
value['%s_ttp' % self.prefix] = ttp['description']
composed_observables = []
for o in iv['observables']:
if o is None:
continue
v = copy.copy(value)
ob = o
if 'idref' in o:
ob = stix_objects['observables'].get(o['idref'], None)
v['%s_observable' % self.prefix] = o['idref']
if ob is None:
continue
if ob['type'] == '_cyboxOR':
for o in ob['observables']:
composed_observables.append(o)
continue
v['type'] = ob['type']
if type(ob['indicator']) == list:
indicator = ob['indicator']
else:
indicator = [ob['indicator']]
for i in indicator:
result.append([i, v])
for o in composed_observables:
v = copy.copy(value)
ob = stix_objects['observables'].get(o, None)
v['%s_observable' % self.prefix] = o
if ob is None:
continue
if ob['type'] == '_cyboxOR':
LOG.error(
'%s - Nested Observable Composition not supported',
self.name
)
continue
v['type'] = ob['type']
if type(ob['indicator']) == list:
indicator = ob['indicator']
else:
indicator = [ob['indicator']]
for i in indicator:
result.append([i, v])
return result
def _build_iterator(self, now):
if self.client_credentials_required:
if self.username is None or self.password is None:
raise RuntimeError(
'%s - username or password required and not set, poll not performed' % self.name
)
if self.cert_file is not None and not os.path.isfile(self.cert_file):
raise RuntimeError(
'%s - client cert required and not set, poll not performed' % self.name
)
if self.key_file is not None and not os.path.isfile(self.key_file):
raise RuntimeError(
'%s - client cert key required and not set, poll not performed' % self.name
)
if self.subscription_id_required and self.subscription_id is None:
raise RuntimeError(
'%s - subscription id required and not set, poll not performed' % self.name
)
tc = self._build_taxii_client()
self._discover_services(tc)
self._check_collections(tc)
last_run = self.last_taxii_run
max_back = now-(self.initial_interval*1000)
if last_run is None or last_run < max_back:
last_run = max_back
begin = datetime.utcfromtimestamp(last_run/1000)
begin = begin.replace(tzinfo=pytz.UTC)
end = datetime.utcfromtimestamp(now/1000)
end = end.replace(tzinfo=pytz.UTC)
if self.lower_timestamp_precision:
end = end.replace(second=0, microsecond=0)
begin = begin.replace(second=0, microsecond=0)
return self._incremental_poll_collection(
taxii_client=tc,
begin=begin,
end=end
)
def _flush(self):
self.last_taxii_run = None
super(TaxiiClient, self)._flush()
def hup(self, source=None):
LOG.info('%s - hup received, reload side config', self.name)
self._load_side_config()
super(TaxiiClient, self).hup(source)
@staticmethod
def gc(name, config=None):
basepoller.BasePollerFT.gc(name, config=config)
side_config_path = None
if config is not None:
side_config_path = config.get('side_config', None)
if side_config_path is None:
side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}_side_config.yml'.format(name)
)
try:
os.remove(side_config_path)
except:
pass
client_cert_required = False
if config is not None:
client_cert_required = config.get('client_cert_required', False)
cert_path = None
if config is not None:
cert_path = config.get('cert_file', None)
if cert_path is None and client_cert_required:
cert_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}.crt'.format(name)
)
if cert_path is not None:
try:
os.remove(cert_path)
except:
pass
key_path = None
if config is not None:
key_path = config.get('key_file', None)
if key_path is None and client_cert_required:
key_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}.pem'.format(name)
)
if key_path is not None:
try:
os.remove(key_path)
except:
pass
def _stix_ip_observable(namespace, indicator, value):
category = cybox.objects.address_object.Address.CAT_IPV4
if value['type'] == 'IPv6':
category = cybox.objects.address_object.Address.CAT_IPV6
indicators = [indicator]
if '-' in indicator:
# looks like an IP Range, let's try to make it a CIDR
a1, a2 = indicator.split('-', 1)
if a1 == a2:
# same IP
indicators = [a1]
else:
# use netaddr builtin algo to summarize range into CIDR
iprange = netaddr.IPRange(a1, a2)
cidrs = iprange.cidrs()
indicators = map(str, cidrs)
observables = []
for i in indicators:
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
ao = cybox.objects.address_object.Address(
address_value=i,
category=category
)
o = cybox.core.Observable(
title='{}: {}'.format(value['type'], i),
id_=id_,
item=ao
)
observables.append(o)
return observables
def _stix_email_addr_observable(namespace, indicator, value):
category = cybox.objects.address_object.Address.CAT_EMAIL
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
ao = cybox.objects.address_object.Address(
address_value=indicator,
category=category
)
o = cybox.core.Observable(
title='{}: {}'.format(value['type'], indicator),
id_=id_,
item=ao
)
return [o]
def _stix_domain_observable(namespace, indicator, value):
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
do = cybox.objects.domain_name_object.DomainName()
do.value = indicator
do.type_ = 'FQDN'
o = cybox.core.Observable(
title='FQDN: ' + indicator,
id_=id_,
item=do
)
return [o]
def _stix_url_observable(namespace, indicator, value):
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
uo = cybox.objects.uri_object.URI(
value=indicator,
type_=cybox.objects.uri_object.URI.TYPE_URL
)
o = cybox.core.Observable(
title='URL: ' + indicator,
id_=id_,
item=uo
)
return [o]
def _stix_hash_observable(namespace, indicator, value):
id_ = '{}:observable-{}'.format(
namespace,
uuid.uuid4()
)
uo = cybox.objects.file_object.File()
uo.add_hash(indicator)
o = cybox.core.Observable(
title='{}: {}'.format(value['type'], indicator),
id_=id_,
item=uo
)
return [o]
_TYPE_MAPPING = {
'IPv4': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': _stix_ip_observable
},
'IPv6': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': _stix_ip_observable
},
'URL': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_URL_WATCHLIST,
'mapper': _stix_url_observable
},
'domain': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_DOMAIN_WATCHLIST,
'mapper': _stix_domain_observable
},
'sha256': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_FILE_HASH_WATCHLIST,
'mapper': _stix_hash_observable
},
'sha1': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_FILE_HASH_WATCHLIST,
'mapper': _stix_hash_observable
},
'md5': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_FILE_HASH_WATCHLIST,
'mapper': _stix_hash_observable
},
'email-addr': {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_MALICIOUS_EMAIL,
'mapper': _stix_email_addr_observable
}
}
class DataFeed(actorbase.ActorBaseFT):
def __init__(self, name, chassis, config):
self.redis_skey = name
self.redis_skey_value = name+'.value'
self.redis_skey_chkp = name+'.chkp'
self.SR = None
self.ageout_glet = None
super(DataFeed, self).__init__(name, chassis, config)
def configure(self):
super(DataFeed, self).configure()
self.redis_url = self.config.get('redis_url',
os.environ.get('REDIS_URL', 'unix:///var/run/redis/redis.sock')
)
self.namespace = self.config.get('namespace', 'minemeld')
self.namespaceuri = self.config.get(
'namespaceuri',
'https://go.paloaltonetworks.com/minemeld'
)
self.age_out_interval = self.config.get('age_out_interval', '24h')
self.age_out_interval = interval_in_sec(self.age_out_interval)
if self.age_out_interval < 60:
LOG.info('%s - age out interval too small, forced to 60 seconds')
self.age_out_interval = 60
self.max_entries = self.config.get('max_entries', 1000 * 1000)
self.attributes_package_title = self.config.get('attributes_package_title', [])
if not isinstance(self.attributes_package_title, list):
LOG.error('{} - attributes_package_title should be a list - ignored')
self.attributes_package_title = []
self.attributes_package_description = self.config.get('attributes_package_description', [])
if not isinstance(self.attributes_package_description, list):
LOG.error('{} - attributes_package_description should be a list - ignored')
self.attributes_package_description = []
self.attributes_package_sdescription = self.config.get('attributes_package_short_description', [])
if not isinstance(self.attributes_package_sdescription, list):
LOG.error('{} - attributes_package_sdescription should be a list - ignored')
self.attributes_package_sdescription = []
self.attributes_package_information_source = self.config.get('attributes_package_information_source', [])
if not isinstance(self.attributes_package_information_source, list):
LOG.error('{} - attributes_package_information_source should be a list - ignored')
self.attributes_package_information_source = []
def connect(self, inputs, output):
output = False
super(DataFeed, self).connect(inputs, output)
def read_checkpoint(self):
self._connect_redis()
self.last_checkpoint = self.SR.get(self.redis_skey_chkp)
def create_checkpoint(self, value):
self._connect_redis()
self.SR.set(self.redis_skey_chkp, value)
def remove_checkpoint(self):
self._connect_redis()
self.SR.delete(self.redis_skey_chkp)
def _connect_redis(self):
if self.SR is not None:
return
self.SR = redis.StrictRedis.from_url(
self.redis_url
)
def _read_oldest_indicator(self):
olist = self.SR.zrange(
self.redis_skey, 0, 0,
withscores=True
)
LOG.debug('%s - oldest: %s', self.name, olist)
if len(olist) == 0:
return None, None
return int(olist[0][1]), olist[0][0]
def initialize(self):
self._connect_redis()
def rebuild(self):
self._connect_redis()
self.SR.delete(self.redis_skey)
self.SR.delete(self.redis_skey_value)
def reset(self):
self._connect_redis()
self.SR.delete(self.redis_skey)
self.SR.delete(self.redis_skey_value)
def _add_indicator(self, score, indicator, value):
if self.length() >= self.max_entries:
LOG.info('dropped overflow')
self.statistics['drop.overflow'] += 1
return
type_ = value['type']
type_mapper = _TYPE_MAPPING.get(type_, None)
if type_mapper is None:
self.statistics['drop.unknown_type'] += 1
LOG.error('%s - Unsupported indicator type: %s', self.name, type_)
return
set_id_namespace(self.namespaceuri, self.namespace)
title = None
if len(self.attributes_package_title) != 0:
for pt in self.attributes_package_title:
if pt not in value:
continue
title = '{}'.format(value[pt])
break
description = None
if len(self.attributes_package_description) != 0:
for pd in self.attributes_package_description:
if pd not in value:
continue
description = '{}'.format(value[pd])
break
sdescription = None
if len(self.attributes_package_sdescription) != 0:
for pd in self.attributes_package_sdescription:
if pd not in value:
continue
sdescription = '{}'.format(value[pd])
break
information_source = None
if len(self.attributes_package_information_source) != 0:
for isource in self.attributes_package_information_source:
if isource not in value:
continue
information_source = '{}'.format(value[isource])
break
if information_source is not None:
identity = stix.common.identity.Identity(name=information_source)
information_source = stix.common.information_source.InformationSource(identity=identity)
handling = None
share_level = value.get('share_level', None)
if share_level in ['white', 'green', 'amber', 'red']:
marking_specification = stix.data_marking.MarkingSpecification()
marking_specification.controlled_structure = "//node() | //@*"
tlp = stix.extensions.marking.tlp.TLPMarkingStructure()
tlp.color = share_level.upper()
marking_specification.marking_structures.append(tlp)
handling = stix.data_marking.Marking()
handling.add_marking(marking_specification)
header = None
if (title is not None or
description is not None or
handling is not None or
sdescription is not None or
information_source is not None):
header = stix.core.STIXHeader(
title=title,
description=description,
handling=handling,
short_description=sdescription,
information_source=information_source
)
spid = '{}:indicator-{}'.format(
self.namespace,
uuid.uuid4()
)
sp = stix.core.STIXPackage(id_=spid, stix_header=header)
observables = type_mapper['mapper'](self.namespace, indicator, value)
for o in observables:
id_ = '{}:indicator-{}'.format(
self.namespace,
uuid.uuid4()
)
if value['type'] == 'URL':
eindicator = werkzeug.urls.iri_to_uri(indicator, safe_conversion=True)
else:
eindicator = indicator
sindicator = stix.indicator.indicator.Indicator(
id_=id_,
title='{}: {}'.format(
value['type'],
eindicator
),
description='{} indicator from {}'.format(
value['type'],
', '.join(value['sources'])
),
timestamp=datetime.utcnow().replace(tzinfo=pytz.utc)
)
confidence = value.get('confidence', None)
if confidence is None:
LOG.error('%s - indicator without confidence', self.name)
sindicator.confidence = "Unknown" # We shouldn't be here
elif confidence < 50:
sindicator.confidence = "Low"
elif confidence < 75:
sindicator.confidence = "Medium"
else:
sindicator.confidence = "High"
sindicator.add_indicator_type(type_mapper['indicator_type'])
sindicator.add_observable(o)
sp.add_indicator(sindicator)
spackage = 'lz4'+lz4.frame.compress(
sp.to_json(),
compression_level=lz4.frame.COMPRESSIONLEVEL_MINHC
)
with self.SR.pipeline() as p:
p.multi()
p.zadd(self.redis_skey, score, spid)
p.hset(self.redis_skey_value, spid, spackage)
result = p.execute()[0]
self.statistics['added'] += result
def _delete_indicator(self, indicator_id):
with self.SR.pipeline() as p:
p.multi()
p.zrem(self.redis_skey, indicator_id)
p.hdel(self.redis_skey_value, indicator_id)
result = p.execute()[0]
self.statistics['removed'] += result
def _age_out_run(self):
while True:
now = utc_millisec()
low_watermark = now - self.age_out_interval*1000
otimestamp, oindicator = self._read_oldest_indicator()
LOG.debug(
'{} - low watermark: {} otimestamp: {}'.format(
self.name,
low_watermark,
otimestamp
)
)
while otimestamp is not None and otimestamp < low_watermark:
self._delete_indicator(oindicator)
otimestamp, oindicator = self._read_oldest_indicator()
wait_time = 30
if otimestamp is not None:
next_expiration = (
(otimestamp + self.age_out_interval*1000) - now
)
wait_time = max(wait_time, next_expiration/1000 + 1)
LOG.debug('%s - sleeping for %d secs', self.name, wait_time)
gevent.sleep(wait_time)
@base._counting('update.processed')
def filtered_update(self, source=None, indicator=None, value=None):
now = utc_millisec()
self._add_indicator(now, indicator, value)
@base._counting('withdraw.ignored')
def filtered_withdraw(self, source=None, indicator=None, value=None):
# this is a TAXII data feed, old indicators never expire
pass
def length(self, source=None):
return self.SR.zcard(self.redis_skey)
def start(self):
super(DataFeed, self).start()
self.ageout_glet = gevent.spawn(self._age_out_run)
def stop(self):
super(DataFeed, self).stop()
self.ageout_glet.kill()
LOG.info(
"%s - # indicators: %d",
self.name,
self.SR.zcard(self.redis_skey)
)
@staticmethod
def gc(name, config=None):
actorbase.ActorBaseFT.gc(name, config=config)
if config is None:
config = {}
redis_skey = name
redis_skey_value = '{}.value'.format(name)
redis_skey_chkp = '{}.chkp'.format(name)
redis_url = config.get('redis_url',
os.environ.get('REDIS_URL', 'unix:///var/run/redis/redis.sock')
)
cp = None
try:
cp = redis.ConnectionPool.from_url(
redis_url
)
SR = redis.StrictRedis(connection_pool=cp)
SR.delete(redis_skey)
SR.delete(redis_skey_value)
SR.delete(redis_skey_chkp)
except Exception as e:
raise RuntimeError(str(e))
finally:
if cp is not None:
cp.disconnect()
|
PaloAltoNetworks/minemeld-core
|
minemeld/ft/taxii.py
|
Python
|
apache-2.0
| 58,146
|
[
"Amber"
] |
eb4b89a3e5160816fa7caf8135e99e288a9bdba789e2ed6fb92e79e4925e2c2d
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Represent information for graphical display.
Classes in this module are designed to hold information in a way that
makes it easy to draw graphical figures.
"""
# reportlab
from reportlab.lib import colors
# local stuff
from Bio.Graphics.BasicChromosome import ChromosomeSegment
from Bio.Graphics.BasicChromosome import TelomereSegment
# --- constants
# This is a default color scheme based on the light spectrum.
# Based on my vague recollections from biology, this is our friend ROY G. BIV
RAINBOW_COLORS = {(1, 1): colors.violet,
(2, 2): colors.indigo,
(3, 3): colors.blue,
(4, 4): colors.green,
(5, 5): colors.yellow,
(6, 6): colors.orange,
(7, 20): colors.red}
class ChromosomeCounts(object):
"""Represent a chromosome with count information.
This is used to display information about counts along a chromosome.
The segments are expected to have different count information, which
will be displayed using a color scheme.
I envision using this class when you think that certain regions of
the chromosome will be especially abundant in the counts, and you
want to pick those out.
"""
def __init__(self, segment_names, color_scheme=RAINBOW_COLORS):
"""Initialize a representation of chromosome counts.
Arguments:
o segment_names - An ordered list of all segment names along
the chromosome. The count and other information will be added to
these.
o color_scheme - A coloring scheme to use in the counts. This should
be a dictionary mapping count ranges to colors (specified in
reportlab.lib.colors).
"""
self._names = segment_names
self._count_info = {}
self._label_info = {}
self._scale_info = {}
for name in self._names:
self._count_info[name] = 0
self._label_info[name] = None
self._scale_info[name] = 1
self._color_scheme = color_scheme
def add_count(self, segment_name, count=1):
"""Add counts to the given segment name.
Arguments:
o segment_name - The name of the segment we should add counts to.
If the name is not present, a KeyError will be raised.
o count - The counts to add the current segment. This defaults to
a single count.
"""
try:
self._count_info[segment_name] += count
except KeyError:
raise KeyError("Segment name %s not found." % segment_name)
def scale_segment_value(self, segment_name, scale_value=None):
"""Divide the counts for a segment by some kind of scale value.
This is useful if segments aren't represented by raw counts, but
are instead counts divided by some number.
"""
try:
self._count_info[segment_name] = \
float(self._count_info[segment_name]) / float(scale_value)
except KeyError:
raise KeyError("Segment name %s not found." % segment_name)
def add_label(self, segment_name, label):
"""Add a label to a specfic segment.
Raises a KeyError is the specified segment name is not found.
"""
if segment_name in self._label_info:
self._label_info[segment_name] = label
else:
raise KeyError("Segment name %s not found." % segment_name)
def set_scale(self, segment_name, scale):
"""Set the scale for a specific chromosome segment.
By default all segments have the same scale -- this allows scaling
by the size of the segment.
Raises a KeyError is the specified segment name is not found.
"""
if segment_name in self._label_info:
self._scale_info[segment_name] = scale
else:
raise KeyError("Segment name %s not found." % segment_name)
def get_segment_info(self):
"""Retrieve the color and label info about the segments.
Returns a list consiting of two tuples specifying the counts and
label name for each segment. The list is ordered according to the
original listing of names. Labels are set as None if no label
was specified.
"""
order_info = []
for seg_name in self._names:
order_info.append((self._count_info[seg_name],
self._label_info[seg_name]))
return order_info
def fill_chromosome(self, chromosome):
"""Add the collected segment information to a chromosome for drawing.
Arguments:
o chromosome - A Chromosome graphics object that we can add
chromosome segments to.
This creates ChromosomeSegment (and TelomereSegment) objects to
fill in the chromosome. The information is derived from the
label and count information, with counts transformed to the
specified color map.
Returns the chromosome with all of the segments added.
"""
for seg_num in range(len(self._names)):
is_end_segment = 0
# make the top and bottom telomeres
if seg_num == 0:
cur_segment = TelomereSegment()
is_end_segment = 1
elif seg_num == len(self._names) - 1:
cur_segment = TelomereSegment(1)
is_end_segment = 1
# otherwise, they are just regular segments
else:
cur_segment = ChromosomeSegment()
seg_name = self._names[seg_num]
if self._count_info[seg_name] > 0:
color = self._color_from_count(self._count_info[seg_name])
cur_segment.fill_color = color
if self._label_info[seg_name] is not None:
cur_segment.label = self._label_info[seg_name]
# give end segments extra size so they look right
if is_end_segment:
cur_segment.scale = 3
else:
cur_segment.scale = self._scale_info[seg_name]
chromosome.add(cur_segment)
return chromosome
def _color_from_count(self, count):
"""Translate the given count into a color using the color scheme.
"""
for count_start, count_end in self._color_scheme:
if count >= count_start and count <= count_end:
return self._color_scheme[(count_start, count_end)]
# if we got here we didn't find a color for the count
raise ValueError("Count value %s was not found in the color scheme."
% count)
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Graphics/DisplayRepresentation.py
|
Python
|
gpl-2.0
| 6,817
|
[
"Biopython"
] |
dda91a9832426d998e365254b1903b715b3f185fc97d4464e821e1391d91042b
|
import numpy as _np
import ESMF as _ESMF
from brushcutter import lib_ioncdf as _ncdf
from brushcutter import lib_common as _lc
from brushcutter import fill_msg_grid as _fill
from brushcutter import mod_drown_sosie as _mod_drown_sosie
import matplotlib.pylab as _plt
import time as ptime
class obc_vectvariable():
''' A class describing an open boundary condition vector variable
on an obc_segment '''
def __init__(self,segment,variable_name_u,variable_name_v,use_locstream=False,**kwargs):
''' constructor of obc_variable : import from segment and adds attributes
specific to this variable
*** args :
* segment : existing obc_segment object
* variable_name_u : name of the zonal component in output file
* variable_name_v : name of the meridional component in output file
*** kwargs (mandatory) :
* geometry : shape of the output field (line, surface)
* obctype : radiation, flather,...
'''
self.vector = True
# read args
self.variable_name_u = variable_name_u
self.variable_name_v = variable_name_v
self.items = []
self.items.append('variable_name')
# iterate over all attributes of segment and copy them
self.__dict__.update(segment.__dict__)
# iterate over all kwargs and store them as attributes for the object
if kwargs is not None:
self.__dict__.update(kwargs)
for key, value in kwargs.items():
self.items.append(key)
# boundary geometry
if self.geometry == 'line':
self.dimensions_name_u = ('time','ny_' + self.segment_name,'nx_' + self.segment_name,)
self.dimensions_name_v = ('time','ny_' + self.segment_name,'nx_' + self.segment_name,)
elif self.geometry == 'surface':
self.dimensions_name_u = ('time','nz_' + self.segment_name + '_' + self.variable_name_u, \
'ny_' + self.segment_name,'nx_' + self.segment_name,)
self.dimensions_name_v = ('time','nz_' + self.segment_name + '_' + self.variable_name_v, \
'ny_' + self.segment_name,'nx_' + self.segment_name,)
# default parameters for land extrapolation
# can be modified by changing the attribute of object
self.drown_methods = ['sosie','ncl']
self.xmsg = -99
self.guess = 1 # guess = 1 zonal mean
self.gtype = 1 # cyclic or not
self.nscan = 1500 # usually much less than this
self.epsx = 1.e-4 # variable dependent / not with reduced var
self.relc = 0.6 # relaxation coefficient
# Create a field on the centers of the grid
self.use_locstream = use_locstream
if use_locstream:
self.field_target = _ESMF.Field(self.locstream_target)
else:
self.field_target = _ESMF.Field(self.grid_target, staggerloc=_ESMF.StaggerLoc.CENTER)
return None
def allocate(self):
''' Allocate the output array '''
if self.geometry == 'surface':
data = _np.empty((self.nz,self.ny,self.nx))
elif self.geometry == 'line':
data = _np.empty((self.ny,self.nx))
return data
def set_constant_value(self,value_u,value_v,depth_vector=None):
''' Set constant value to field '''
if depth_vector is not None:
self.depth_dz_from_vector(depth_vector)
self.data_u_out = self.allocate()
self.data_v_out = self.allocate()
self.data_u_out[:] = value_u
self.data_v_out[:] = value_v
return None
def set_vertical_profile(self,top_value,bottom_value,shape='linear',depth_vector=None):
''' create a vertical profile '''
if depth_vector is not None:
self.depth_dz_from_vector(depth_vector)
self.data = self.allocate()
if shape == 'linear':
slope = ( top_value - bottom_value) / (depth_vector[0] - depth_vector[-1])
for kz in _np.arange(self.nz):
self.data[kz,:,:] = bottom_value + slope * (depth_vector[kz] - depth_vector[-1])
return None
def set_horizontal_shear(self,value_1,value_n,shape='linear',direction='x',depth_vector=None):
if depth_vector is not None:
self.depth_dz_from_vector(depth_vector)
self.data = self.allocate()
if shape == 'linear':
if direction == 'x':
dx = (value_n - value_1) / self.nx
if depth_vector is not None:
for kz in _np.arange(self.nz):
for ky in _np.arange(self.ny):
self.data[kz,ky,:] = _np.arange(value_1,value_n,dx)
else:
for ky in _np.arange(self.ny):
self.data[ky,:] = _np.arange(value_1,value_n,dx)
if direction == 'y':
dy = (value_n - value_1) / self.ny
if depth_vector is not None:
for kz in _np.arange(self.nz):
for kx in _np.arange(self.nx):
self.data[kz,:,kx] = _np.arange(value_1,value_n,dy)
else:
for kx in _np.arange(self.nx):
self.data[:,kx] = _np.arange(value_1,value_n,dy)
return None
def interpolate_from(self,filename,variable_u,variable_v,frame=None,drown='sosie',maskfile=None,maskvar=None, \
missing_value=None,from_global=True,depthname='z', \
timename='time',coord_names_u=['lon','lat'],coord_names_v=['lon','lat'],x_coords_u=None,y_coords_u=None,x_coords_v=None,y_coords_v=None,method='bilinear',\
interpolator_u=None,interpolator_v=None,autocrop=True):
''' interpolate_from performs a serie of operation :
* read input data
* perform extrapolation over land if desired
* read or create mask if extrapolation
* call ESMF for regridding
Optional arguments (=default) :
* frame=None : time record from input data (e.g. 1,2,..,12) when input file contains more than one record.
* drown=True : perform extrapolation of ocean values onto land
* maskfile=None : to read mask from a file (else uses missing value of variable)
* maskvar=None : if maskfile is defined, we need to provide name of mask array in maskfile
* missing_value=None : when missing value attribute not defined in input file, this allows to pass it
* use_locstream=False : interpolate from ESMF grid to ESMF locstream instead of ESMF grid, a bit faster.
use only to interpolate to boundary.
* from_global=True : if input file is global leave to true. If input is regional, set to False.
interpolating from a regional extraction can significantly speed up processing.
'''
# 1. Create ESMF source grids
if maskfile is not None:
self.gridsrc_u, imin_src_u, imax_src_u, jmin_src_u, jmax_src_u = self.create_source_grid(maskfile,\
from_global,coord_names_u,x_coords=x_coords_u,y_coords=y_coords_u,autocrop=autocrop)
self.gridsrc_v, imin_src_v, imax_src_v, jmin_src_v, jmax_src_v = self.create_source_grid(maskfile,\
from_global,coord_names_v,x_coords=x_coords_v,y_coords=y_coords_v,autocrop=autocrop)
else:
self.gridsrc_u, imin_src_u, imax_src_u, jmin_src_u, jmax_src_u = self.create_source_grid(filename,\
from_global,coord_names_u,x_coords=x_coords_u,y_coords=y_coords_u,autocrop=autocrop)
self.gridsrc_v, imin_src_v, imax_src_v, jmin_src_v, jmax_src_v = self.create_source_grid(filename,\
from_global,coord_names_v,x_coords=x_coords_v,y_coords=y_coords_v,autocrop=autocrop)
# 2. read the original field
datasrc_u = _ncdf.read_field(filename,variable_u,frame=frame)
datasrc_v = _ncdf.read_field(filename,variable_v,frame=frame)
if self.geometry == 'surface':
datasrc_u = datasrc_u[:,jmin_src_u:jmax_src_u,imin_src_u:imax_src_u]
datasrc_v = datasrc_v[:,jmin_src_v:jmax_src_v,imin_src_v:imax_src_v]
self.depth, self.nz, self.dz = _ncdf.read_vert_coord(filename,depthname,self.nx,self.ny)
else:
datasrc_u = datasrc_u[jmin_src_u:jmax_src_u,imin_src_u:imax_src_u]
datasrc_v = datasrc_v[jmin_src_v:jmax_src_v,imin_src_v:imax_src_v]
self.depth=0.; self.nz=1; self.dz=0.
# read time
try:
self.timesrc = _ncdf.read_time(filename,timename,frame=frame)
except:
print('input data time variable not read')
# TODO !! make rotation to east,north from source grid.
# important : if the grid is regular, we don't need to colocate u,v and
# the interpolation will be more accurate.
# Run colocation only if grid is non-regular.
# angle_src_u = _lc.compute_angle_from_lon_lat(self.gridsrc_u.coords[0][0].T,\
# self.gridsrc_u.coords[0][1].T)
# angle_src_v = _lc.compute_angle_from_lon_lat(self.gridsrc_v.coords[0][0].T,\
# self.gridsrc_v.coords[0][1].T)
# 3. perform extrapolation over land
print('drown')
start = ptime.time()
if drown in self.drown_methods:
dataextrap_u = self.perform_extrapolation(datasrc_u,maskfile,maskvar,missing_value,drown)
dataextrap_v = self.perform_extrapolation(datasrc_v,maskfile,maskvar,missing_value,drown)
else:
dataextrap_u = datasrc_u.copy()
dataextrap_v = datasrc_v.copy()
end = ptime.time()
print('end drown', end-start)
# 4. ESMF interpolation
# Create a field on the centers of the grid
field_src_u = _ESMF.Field(self.gridsrc_u, staggerloc=_ESMF.StaggerLoc.CENTER)
field_src_v = _ESMF.Field(self.gridsrc_v, staggerloc=_ESMF.StaggerLoc.CENTER)
# Set up a regridding object between source and destination
print('create regridding for u')
if interpolator_u is None:
if method == 'bilinear':
regridme_u = _ESMF.Regrid(field_src_u, self.field_target,
unmapped_action=_ESMF.UnmappedAction.IGNORE,
regrid_method=_ESMF.RegridMethod.BILINEAR)
elif method == 'patch':
regridme_u = _ESMF.Regrid(field_src_u, self.field_target,
unmapped_action=_ESMF.UnmappedAction.IGNORE,
regrid_method=_ESMF.RegridMethod.PATCH)
else:
regridme_u = interpolator_u
print('create regridding for v')
if interpolator_v is None:
if method == 'bilinear':
regridme_v = _ESMF.Regrid(field_src_v, self.field_target,
unmapped_action=_ESMF.UnmappedAction.IGNORE,
regrid_method=_ESMF.RegridMethod.BILINEAR)
elif method == 'patch':
regridme_v = _ESMF.Regrid(field_src_v, self.field_target,
unmapped_action=_ESMF.UnmappedAction.IGNORE,
regrid_method=_ESMF.RegridMethod.PATCH)
else:
regridme_v = interpolator_v
print('regridding u')
self.data_u = self.perform_interpolation(dataextrap_u,regridme_u,field_src_u,self.field_target,self.use_locstream)
print('regridding v')
self.data_v = self.perform_interpolation(dataextrap_v,regridme_v,field_src_v,self.field_target,self.use_locstream)
# vector rotation to output grid
self.data_u_out = self.data_u * _np.cos(self.angle_dx[self.jmin:self.jmax+1,self.imin:self.imax+1]) + \
self.data_v * _np.sin(self.angle_dx[self.jmin:self.jmax+1,self.imin:self.imax+1])
self.data_v_out = self.data_v * _np.cos(self.angle_dx[self.jmin:self.jmax+1,self.imin:self.imax+1]) - \
self.data_u * _np.sin(self.angle_dx[self.jmin:self.jmax+1,self.imin:self.imax+1])
# free memory (ESMPy has memory leak)
self.gridsrc_u.destroy()
self.gridsrc_v.destroy()
field_src_u.destroy()
field_src_v.destroy()
return regridme_u, regridme_v
def compute_mask_from_missing_value(self,data,missing_value=None):
''' compute mask from missing value :
* first try to get the mask assuming our data is a np.ma.array.
Well-written netcdf files with missing_value of _FillValue attributes
are translated into a np.ma.array
* else use provided missing value to create mask '''
try:
logicalmask = data.mask
mask = _np.ones(logicalmask.shape)
mask[_np.where(logicalmask == True)] = 0
except:
if missing_value is not None:
mask = _np.ones(data.shape)
mask[_np.where(data == missing_value)] = 0
else:
exit('Cannot create mask, please provide a missing_value, or maskfile')
#if self.debug:
# _plt.figure() ; _plt.contourf(mask[0,:,:],[0.99,1.01]) ; _plt.colorbar() ; _plt.title('land sea mask')
return mask
def perform_extrapolation(self,datasrc,maskfile,maskvar,missing_value,drown):
# 2.1 read mask or compute it
if maskvar is not None:
mask = _ncdf.read_field(maskfile,maskvar)
# to do, needs imin/imax_src,...
else:
mask = self.compute_mask_from_missing_value(datasrc,missing_value=missing_value)
# 2.2 mask the source data
if _np.ma.is_masked(datasrc):
datasrc = datasrc.data
datasrc[_np.where(mask == 0)] = self.xmsg
datamin = datasrc[_np.where(mask == 1)].min()
datamax = datasrc[_np.where(mask == 1)].max()
if self.debug:
datasrc_plt = _np.ma.masked_values(datasrc,self.xmsg)
_plt.figure() ; _plt.contourf(datasrc_plt[0,:,:],40) ; _plt.title('original') ; _plt.colorbar()
# 2.3 perform land extrapolation on reduced variable
datanorm = self.normalize(datasrc,datamin,datamax,mask)
if self.debug:
print(datanorm.min() , datanorm.max(), datamin, datamax)
datanormextrap = self.drown_field(datanorm,mask,drown)
dataextrap = self.unnormalize(datanormextrap,datamin,datamax)
return dataextrap
def drown_field(self,data,mask,drown):
''' drown_field is a wrapper around the fortran code fill_msg_grid.
depending on the output geometry, applies land extrapolation on 1 or N levels'''
if self.geometry == 'surface':
for kz in _np.arange(self.nz):
tmpin = data[kz,:,:].transpose()
if self.debug and kz == 0:
tmpin_plt = _np.ma.masked_values(tmpin,self.xmsg)
_plt.figure() ; _plt.contourf(tmpin_plt.transpose(),40) ; _plt.colorbar() ;
_plt.title('normalized before drown')
if drown == 'ncl':
tmpout = _fill.mod_poisson.poisxy1(tmpin,self.xmsg, self.guess, self.gtype, \
self.nscan, self.epsx, self.relc)
elif drown == 'sosie':
tmpout = _mod_drown_sosie.mod_drown.drown(self.kew,tmpin,mask[kz,:,:].T,\
nb_inc=200,nb_smooth=40)
data[kz,:,:] = tmpout.transpose()
if self.debug and kz == 0:
_plt.figure() ; _plt.contourf(tmpout.transpose(),40) ; _plt.colorbar() ;
_plt.title('normalized after drown')
_plt.show()
elif self.geometry == 'line':
tmpin = data[:,:].transpose()
if drown == 'ncl':
tmpout = _fill.mod_poisson.poisxy1(tmpin,self.xmsg, self.guess, self.gtype, \
self.nscan, self.epsx, self.relc)
elif drown == 'sosie':
tmpout = _mod_drown_sosie.mod_drown.drown(self.kew,tmpin,mask[:,:].T,\
nb_inc=200,nb_smooth=40)
data[:,:] = tmpout.transpose()
return data
def normalize(self,data,datamin,datamax,mask):
''' create a reduced variable to perform better drown '''
datanorm = ( data -datamin) / (datamax - datamin)
datanorm[_np.where( mask == 0 )] = self.xmsg
return datanorm
def unnormalize(self,datanorm,datamin,datamax):
''' return back to original range of values '''
data = datamin + datanorm * (datamax - datamin)
return data
def perform_interpolation(self,dataextrap,regridme,field_src,field_target,use_locstream):
data = self.allocate()
if self.geometry == 'surface':
for kz in _np.arange(self.nz):
field_src.data[:] = dataextrap[kz,:,:].transpose()
field_target = regridme(field_src, field_target)
if use_locstream:
if self.nx == 1:
data[kz,:,0] = field_target.data.copy()
elif self.ny == 1:
data[kz,0,:] = field_target.data.copy()
else:
data[kz,:,:] = field_target.data.transpose()[self.jmin:self.jmax+1, \
self.imin:self.imax+1]
if self.debug and kz == 0:
data_target_plt = _np.ma.masked_values(data[kz,:,:],self.xmsg)
#data_target_plt = _np.ma.masked_values(field_target.data,self.xmsg)
_plt.figure() ; _plt.contourf(data_target_plt[:,:],40) ; _plt.colorbar() ;
_plt.title('regridded') ; _plt.show()
elif self.geometry == 'line':
field_src.data[:] = dataextrap[:,:].transpose()
field_target = regridme(field_src, field_target)
if use_locstream:
data[:,:] = _np.reshape(field_target.data.transpose(),(self.ny,self.nx))
else:
data[:,:] = field_target.data.transpose()[self.jmin:self.jmax+1,self.imin:self.imax+1]
return data
def depth_dz_from_vector(self,depth_vector):
self.nz = depth_vector.shape[0]
self.depth = _np.empty((self.nz,self.ny,self.nx))
for ky in _np.arange(self.ny):
for kx in _np.arange(self.nx):
self.depth[:,ky,kx] = depth_vector
# compute layer thickness
self.dz = _np.empty((self.nz,self.ny,self.nx))
self.dz[:-1,:,:] = self.depth[1:,:,:] - self.depth[:-1,:,:]
# test if bounds exist first (to do), else
self.dz[-1,:,:] = self.dz[-2,:,:]
return None
def extract_subset_into(self,dst_obc_variable):
''' extract subset of data from source obc variable into dest'''
if self.geometry == 'surface':
dst_obc_variable.data_u_out = self.data_u_out[:,dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.data_v_out = self.data_v_out[:,dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.depth = self.depth[:,dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.dz = self.dz[:,dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.nz = self.nz
elif self.geometry == 'line':
dst_obc_variable.data_u_out = self.data_u_out[dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.data_v_out = self.data_v_out[dst_obc_variable.jmin:dst_obc_variable.jmax+1, \
dst_obc_variable.imin:dst_obc_variable.imax+1]
dst_obc_variable.timesrc = self.timesrc
return None
def create_source_grid(self,filename,from_global,coord_names,x_coords=None,y_coords=None,autocrop=True):
''' create ESMF grid object for source grid '''
# new way to create source grid
# TO DO : move into separate function, has to be called before drown
# so that we know the periodicity
# Allow to provide lon/lat from existing array
if x_coords is not None and y_coords is not None:
lon_src = x_coords
lat_src = y_coords
else:
lons = _ncdf.read_field(filename,coord_names[0])
lats = _ncdf.read_field(filename,coord_names[1])
if len(lons.shape) == 1:
lon_src,lat_src = _np.meshgrid(lons,lats)
else:
lon_src = lons ; lat_src = lats
# autocrop
if autocrop:
imin_src, imax_src, jmin_src, jmax_src = \
_lc.find_subset(self.grid_target,lon_src,lat_src)
lon_src = lon_src[jmin_src:jmax_src,imin_src:imax_src]
lat_src = lat_src[jmin_src:jmax_src,imin_src:imax_src]
ny_src, nx_src = lon_src.shape
if not autocrop:
imin_src = 0 ; imax_src = nx_src
jmin_src = 0 ; jmax_src = ny_src
if from_global and not autocrop:
gridsrc = _ESMF.Grid(_np.array([nx_src,ny_src]),num_peri_dims=1)
self.gtype = 1 # 1 = periodic for drown NCL
self.kew = 0 # 0 = periodic for drown sosie
else:
gridsrc = _ESMF.Grid(_np.array([nx_src,ny_src]))
self.gtype = 0 # 1 = non periodic for drown NCL
self.kew = -1 # -1 = non periodic for drown sosie
gridsrc.add_coords(staggerloc=[_ESMF.StaggerLoc.CENTER])
gridsrc.coords[_ESMF.StaggerLoc.CENTER][0][:]=lon_src.T
gridsrc.coords[_ESMF.StaggerLoc.CENTER][1][:]=lat_src.T
# original from RD
#self.gridsrc = _ESMF.Grid(filename=filename,filetype=_ESMF.FileFormat.GRIDSPEC,\
#is_sphere=from_global,coord_names=coord_names)
return gridsrc, imin_src, imax_src, jmin_src, jmax_src
|
raphaeldussin/brushcutter
|
brushcutter/lib_obc_vectvariable.py
|
Python
|
gpl-3.0
| 19,520
|
[
"NetCDF"
] |
a4393547b4d8cac494d66f2598205368f7c04da7a64c6e7aee6003a2aa5d2b62
|
"""Module image.
Instances of class Image are a primary data-holders for all PyBDSF
operations. They store the image itself together with some meta-information
(such as headers), options for processing modules and all data generated during
processing. A few convenience methods are also defined here for interactive
use: to allow viewing and output of the most important data, to allow listing
and setting of options, and to allow re-processing of Images (these methods are
used by the interactive IPython shell made by pybdsf).
This module also defines class Op, which is used as a base class for all PyBDSF
operations.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as N
from .opts import *
class Image(object):
"""Image is a primary data container for PyBDSF.
All the run-time data (such as image data, mask, etc.)
is stored here. A number of type-checked properties
are defined for the most basic image attributes, such
as image data, mask, header, user options.
To allow transparent caching of large image data to disk,
the image data must be stored in attributes ending in
"_arr". Additionally, setting subarrays does not work
using the attributes directly (e.g., img.ch0_arr[0:100,0:100]
= 0.0 will not work). Instead, set the subarray values then set
the attribute (e.g., ch0[0:100,0:100] = 0.0; img.ch0_arr = ch0).
There is little sense in declaring all possible attributes
right here as it will introduce unneeded dependencies
between modules, thus most other attributes (like island lists,
gaussian lists, etc) are inserted at run-time by the specific
PyBDSF modules.
"""
def __init__(self, opts):
self._prev_opts = None
self.extraparams = {}
self.masked = False
self.completed_Ops = []
self.waveletimage = False
self._pi = False
self.do_cache = False
self.bbspatchnum = 0
self.blankpix = 0
self.use_io = ''
self.j = 0
self.freq_pars = [0.0, 0.0, 0.0]
self.filename = ''
self.resid_gaus_arr = None
self._is_interactive_shell = False
self.opts = Opts(opts)
def __setstate__(self, state):
"""Needed for multiprocessing"""
self.thresh_pix = state['thresh_pix']
self.minpix_isl = state['minpix_isl']
self.clipped_mean = state['clipped_mean']
def __getstate__(self):
"""Needed for multiprocessing"""
state = {}
state['thresh_pix'] = self.thresh_pix
state['minpix_isl'] = self.minpix_isl
state['clipped_mean'] = self.clipped_mean
return state
def __getattribute__(self, name):
from . import functions as func
if name.endswith("_arr"):
if self.do_cache:
map_data = func.retrieve_map(self, name)
if map_data is not None:
return map_data
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
from . import functions as func
if hasattr(self, 'do_cache'):
if self.do_cache and name.endswith("_arr") and isinstance(value, N.ndarray):
func.store_map(self, name, value)
else:
super(Image, self).__setattr__(name, value)
else:
super(Image, self).__setattr__(name, value)
def __delattr__(self, name):
from . import functions as func
if self.do_cache and name.endswith("_arr"):
func.del_map(self, name)
else:
super(Image, self).__delattr__(name)
def get_map(self, map_name):
"""Returns requested map."""
from . import functions as func
if self.do_cache:
map_data = func.retrieve_map(self, map_name)
else:
map_data = getattr(self, map_name)
return map_data
def put_map(self, map_name, map_data):
"""Stores requested map."""
from . import functions as func
if self.do_cache:
func.store_map(self, map_name, map_data)
else:
setattr(self, map_name, map_data)
def list_pars(self):
"""List parameter values."""
from . import interface
interface.list_pars(self)
def set_pars(self, **kwargs):
"""Set parameter values."""
from . import interface
interface.set_pars(self, **kwargs)
def process(self, **kwargs):
"""Process Image object"""
from . import interface
success = interface.process(self, **kwargs)
return success
def save_pars(self, savefile=None):
"""Save parameter values."""
from . import interface
interface.save_pars(self, savefile)
def load_pars(self, loadfile=None):
"""Load parameter values."""
from . import interface
import os
if loadfile is None or loadfile == '':
loadfile = self.opts.filename + '.pybdsf.sav'
if os.path.exists(loadfile):
timg, err = interface.load_pars(loadfile)
if timg is not None:
orig_filename = self.opts.filename
self.opts = timg.opts
self.opts.filename = orig_filename # reset filename to original
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: '"+\
loadfile+"' is not a valid parameter save file.")
else:
raise RuntimeError(str(err))
else:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: File '"+\
loadfile+"' not found.")
else:
raise RuntimeError('File not found')
def show_fit(self, **kwargs):
"""Show results of the fit."""
from . import plotresults
if not hasattr(self, 'nisl'):
print('Image has not been processed. Please run process_image first.')
return False
plotresults.plotresults(self, **kwargs)
return True
def export_image(self, **kwargs):
"""Export an internal image to a file."""
from . import interface
try:
result = interface.export_image(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
def write_catalog(self, **kwargs):
"""Write the Gaussian, source, or shapelet list to a file"""
from . import interface
try:
result = interface.write_catalog(self, **kwargs)
return result
except RuntimeError as err:
if self._is_interactive_shell:
print("\n\033[31;1mERROR\033[0m: " + str(err))
else:
raise RuntimeError(str(err))
class Op(object):
"""Common base class for all PyBDSF operations.
At the moment this class is empty and only defines placeholder
for method __call__, which should be redefined in all derived
classes.
"""
def __call__(self, img):
raise NotImplementedError("This method should be redefined")
|
lofar-astron/PyBDSF
|
bdsf/image.py
|
Python
|
gpl-3.0
| 7,512
|
[
"Gaussian"
] |
6ebc03236bb035b823c43f137591c5ff3dc773f962b29594401124523a09400b
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/monthly/tas/'
VARIABLE='tas'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
AbsTemp=273.15
#AbsTemp=0
RefTemp=5
CRUmean=8.148 #1900-2100 land
TargetModel=[\
#'CanESM2',\
#'BCC-CSM1.1',\
#'CCSM4',\
#'CNRM-CM5',\
#'CSIRO-Mk3.6.0',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
#'INM-CM4',\
'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MRI-CGCM3',\
#'NorESM1-M',\
#'MPI-ESM-LR',\
]
COLORtar=['darkred','blue','deeppink','orange',\
'orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
#================================================ CMIP5 models
# for rcp8.5
# ls -l | awk '{printf "999%s998,\\\n",$NF}' | sort -n
modelist2=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CM',\
'CMCC-CMS',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-R',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MRI-CGCM3',\
'NorESM1-M',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
print "==============================================="
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
#plt.ylabel('SWIO Surface Downwelling Solar Radiation Change (W/m2)',fontsize=16)
plt.ylabel('SWIO Surface Temperature Changes ($^\circ$C)',fontsize=16)
plt.title("SWIO Surface Tempereture Changes simulated by CMIP5 models",fontsize=18)
#plt.title('Global Surface Downwelling Solar Radiation Changes simulated by CMIP5 models (W/m2)',fontsize=18)
plt.ylim(-2,5)
plt.xlim(1961,2099)
plt.grid()
plt.xticks(np.arange(1961, 2093+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
#=================================================== 3 windows
plt.axvspan(1996, 2005, alpha=0.3, color='red')
plt.axvspan(2046, 2055, alpha=0.3, color='red')
plt.axvspan(2090, 2099, alpha=0.3, color='red')
#=================================================== 3 windows
plt.axvline(x=2005,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for historical
########################## for historical
print "========== for rcp85 ==============="
EXPERIMENT='historical-rcp85'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
TIME='196101-209912'
filetag="swiomean"
YEAR=range(1961,2100)
Nmonth=1668
SumTemp=np.zeros(Nmonth/12)
K=0
for Model in modelist2:
#define the K-th model input file:
K=K+1 # for average
infile1=DIR+'rcp8.5'+'/'+Model+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+ENSEMBLE+'_'+TIME+'.'+filetag+'.nc'
#an example: tas_Amon_CanESM2_historical-rcp85_r1i1p1_200601-210012.globalmean.nc & \
#this file was copied locally for tests in this book
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
print 'the variable tas ===============: '
print TAS
# calculate the annual mean temp:
TEMP=range(0,Nmonth,12)
for j in range(0,Nmonth,12):
TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
print TEMP
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(TEMP)-94-10+1:len(TEMP)-94])
if Model=='HadGEM2-ES':
HadRefTemp=RefTemp
print HadRefTemp
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
##quit()
# for std
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
#quit()
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],\
linewidth=2)
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print modelist2
plt.plot(YEAR,AveTemp,label='ensemble mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2015,2,str(K)+' models',size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== put downscaling data:
# for Had
DownModellist=['Had.G71E0001', 'GFDL.G71E0001']
DownModeltag=
TimeTag=['1996-2005','2046-2055','2090-2099']
YEAR1=range(1996,2006)
YEAR2=range(2046,2056)
YEAR3=range(2090,2100)
outputDir=['output/pprcmdata/monthly/',\
'output.RCP85.2044-2055/pprcmdata/monthly/',\
'output.RCP85.2088-2099/pprcmdata/monthly/']
DownModelDir="/Users/tang/climate/Modeling/333/"
Had_dir1="/Users/tang/climate/Modeling/333/Had.G71E0001/output/pprcmdata/monthly/"
Had_dir2="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2044-2055/pprcmdata/monthly/"
Had_dir3="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2088-2100/pprcmdata/monthly/"
infile1=Had_dir1+'Had_hist.SRF.all.year.fldmean.1996-2005.nc'
infile2=Had_dir2+'Had_rcp85.SRF.all.year.fldmean.2046-2055.nc'
infile3=Had_dir3+'Had_rcp85.SRF.all.year.fldmean.2090-2099.nc'
K=0
for Model in DownModellist:
Q=0
for window in TimeTag:
Q=Q+1
infile1=DownModelDir+Model+'/'+outputDir[1]+
infile=IO.NetCDFFile(infile1,'r')
#K=K+1 # for average
#open input files
infile01=IO.NetCDFFile(infile1,'r')
infile02=IO.NetCDFFile(infile2,'r')
infile03=IO.NetCDFFile(infile3,'r')
print infile01.variables.keys()
print infile02.variables.keys()
print infile03.variables.keys()
# read the variable tas
TAS1=infile01.variables[VARIABLE][:].copy()
TAS2=infile02.variables[VARIABLE][:].copy()
TAS3=infile03.variables[VARIABLE][:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
TEMP1=range(0,9)
TEMP2=range(0,9)
TEMP3=range(0,9)
TEMP1=TAS1[:,0,0] - AbsTemp - HadRefTemp
TEMP2=TAS2[:,0,0] - AbsTemp - HadRefTemp
TEMP3=TAS3[:,0,0] - AbsTemp - HadRefTemp
#print " temp ======================== absolut"
#print TEMP
plt.plot(YEAR1,TEMP1,color="blue",linewidth=4)
plt.plot(YEAR2,TEMP2,color="blue",linewidth=4)
plt.plot(YEAR3,TEMP3,color="blue",linewidth=4)
print "==============================================="
plt.legend(loc=2)
plt.show()
quit()
|
CopyChat/Plotting
|
Python/climate_change/backup.py
|
Python
|
gpl-3.0
| 10,051
|
[
"NetCDF"
] |
f001fdb41457b500556dbdc0fcdcce25bd0780c9bfee2e2776f49dd26820fccf
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
# The purpose of this test is to detect a change in the _metric_json of MetricsBase objects. Many of the metric
# accessors require _metric_json to have a particular form.
def metric_json_check():
df = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
# Regression metric json
reg_mod = h2o.gbm(y=df["CAPSULE"], x=df[3:], training_frame=df, distribution="gaussian")
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = reg_met._metric_json.keys()
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'scoring_time',
u'predictions',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'mean_residual_deviance']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Regression metric json (GLM)
reg_mod = h2o.glm(y=df["CAPSULE"], x=df[3:], training_frame=df, family="gaussian")
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = reg_met._metric_json.keys()
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'residual_degrees_of_freedom',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'predictions',
u'AIC',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'residual_deviance',
u'mean_residual_deviance']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Binomial metric json
bin_mod = h2o.gbm(y=df["CAPSULE"].asfactor(), x=df[3:], training_frame=df, distribution="bernoulli")
bin_met = bin_mod.model_performance()
bin_metric_json_keys_have = bin_met._metric_json.keys()
bin_metric_json_keys_desired = [u'AUC',
u'Gini',
u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'logloss',
u'scoring_time',
u'thresholds_and_metric_scores',
u'predictions',
u'max_criteria_and_metric_scores',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'domain']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Binomial metric json (GLM)
bin_mod = h2o.glm(y=df["CAPSULE"].asfactor(), x=df[3:], training_frame=df, family="binomial")
bin_met = bin_mod.model_performance()
bin_metric_json_keys_have = bin_met._metric_json.keys()
bin_metric_json_keys_desired = [u'frame',
u'residual_deviance',
u'max_criteria_and_metric_scores',
u'MSE',
u'frame_checksum',
u'AIC',
u'logloss',
u'Gini',
u'predictions',
u'AUC',
u'description',
u'model_checksum',
u'duration_in_ms',
u'model_category',
u'r2',
u'residual_degrees_of_freedom',
u'__meta',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'model',
u'thresholds_and_metric_scores',
u'domain']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Multinomial metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
myX = ["Origin", "Dest", "IsDepDelayed", "UniqueCarrier", "Distance", "fDayofMonth", "fDayOfWeek"]
myY = "fYear"
mul_mod = h2o.gbm(x=df[myX], y=df[myY], training_frame=df, distribution="multinomial")
mul_met = mul_mod.model_performance()
mul_metric_json_keys_have = mul_met._metric_json.keys()
mul_metric_json_keys_desired = [u'cm',
u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'logloss',
u'scoring_time',
u'predictions',
u'hit_ratio_table',
u'model',
u'duration_in_ms',
u'frame_checksum']
mul_metric_diff = list(set(mul_metric_json_keys_have) - set(mul_metric_json_keys_desired))
assert not mul_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) multinomial " \
"metric json. The difference is {2}".format(mul_metric_json_keys_have,
mul_metric_json_keys_desired,
mul_metric_diff)
# Clustering metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
clus_mod = h2o.kmeans(x=df[0:4], k=3, standardize=False)
clus_met = clus_mod.model_performance()
clus_metric_json_keys_have = clus_met._metric_json.keys()
clus_metric_json_keys_desired = [u'tot_withinss',
u'model_category',
u'description',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'scoring_time',
u'betweenss',
u'predictions',
u'totss',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'centroid_stats']
clus_metric_diff = list(set(clus_metric_json_keys_have) - set(clus_metric_json_keys_desired))
assert not clus_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) clustering " \
"metric json. The difference is {2}".format(clus_metric_json_keys_have,
clus_metric_json_keys_desired,
clus_metric_diff)
if __name__ == "__main__":
pyunit_utils.standalone_test(metric_json_check)
else:
metric_json_check()
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_metric_json_check.py
|
Python
|
apache-2.0
| 10,502
|
[
"Gaussian"
] |
d48acc2b1ddd4795bd4ecab75d38989c38aa57a0e8222312d68be1611e07b183
|
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
_adjust_contrast_outputs = ["output"]
def adjust_contrast(images, contrast_factor, min_value, max_value, name=None):
r"""Deprecated. Disallowed in GraphDef version >= 2.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`.
contrast_factor: A `Tensor` of type `float32`.
min_value: A `Tensor` of type `float32`.
max_value: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
result = _op_def_lib.apply_op("AdjustContrast", images=images,
contrast_factor=contrast_factor,
min_value=min_value, max_value=max_value,
name=name)
return result
__adjust_contrastv2_outputs = ["output"]
def _adjust_contrastv2(images, contrast_factor, name=None):
r"""Adjust the contrast of one or more images.
`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
interpreted as `[height, width, channels]`. The other dimensions only
represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, the Op first computes the mean of the image pixels in the
channel and then adjusts each component of each pixel to
`(x - mean) * contrast_factor + mean`.
Args:
images: A `Tensor` of type `float32`. Images to adjust. At least 3-D.
contrast_factor: A `Tensor` of type `float32`.
A float multiplier for adjusting contrast.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. The contrast-adjusted image or images.
"""
result = _op_def_lib.apply_op("AdjustContrastv2", images=images,
contrast_factor=contrast_factor, name=name)
return result
_adjust_hue_outputs = ["output"]
def adjust_hue(images, delta, name=None):
r"""Adjust the hue of one or more images.
`images` is a tensor of at least 3 dimensions. The last dimension is
interpretted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB
colors are first mapped into HSV. A delta is then applied all the hue values,
and then remapped back to RGB colorspace.
Args:
images: A `Tensor` of type `float32`. Images to adjust. At least 3-D.
delta: A `Tensor` of type `float32`. A float delta to add to the hue.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. The hue-adjusted image or images.
"""
result = _op_def_lib.apply_op("AdjustHue", images=images, delta=delta,
name=name)
return result
_adjust_saturation_outputs = ["output"]
def adjust_saturation(images, scale, name=None):
r"""Adjust the saturation of one or more images.
`images` is a tensor of at least 3 dimensions. The last dimension is
interpretted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB
colors are first mapped into HSV. A scale is then applied all the saturation
values, and then remapped back to RGB colorspace.
Args:
images: A `Tensor` of type `float32`. Images to adjust. At least 3-D.
scale: A `Tensor` of type `float32`.
A float scale to add to the saturation.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. The hue-adjusted image or images.
"""
result = _op_def_lib.apply_op("AdjustSaturation", images=images,
scale=scale, name=name)
return result
_crop_and_resize_outputs = ["crops"]
def crop_and_resize(image, boxes, box_ind, crop_size, method=None,
extrapolation_value=None, name=None):
r"""Extracts crops from the input image tensor and bilinearly resizes them (possibly
with aspect ratio change) to a common output size specified by `crop_size`. This
is more general than the `crop_to_bounding_box` op which extracts a fixed size
slice from the input image and does not allow resizing or aspect ratio change.
Returns a tensor with `crops` from the input `image` at positions defined at the
bounding box locations in `boxes`. The cropped boxes are all resized (with
bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified
in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
`[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
which case the sampled crop is an up-down flipped version of the original
image. The width dimension is treated similarly. Normalized coordinates
outside the `[0, 1]` range are allowed, in which case we use
`extrapolation_value` to extrapolate the input image values.
box_ind: A `Tensor` of type `int32`.
A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
crop_size: A `Tensor` of type `int32`.
A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the image
content is not preserved. Both `crop_height` and `crop_width` need to be
positive.
method: An optional `string` from: `"bilinear"`. Defaults to `"bilinear"`.
A string specifying the interpolation method. Only 'bilinear' is
supported for now.
extrapolation_value: An optional `float`. Defaults to `0`.
Value used for extrapolation, when applicable.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
"""
result = _op_def_lib.apply_op("CropAndResize", image=image, boxes=boxes,
box_ind=box_ind, crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name=name)
return result
_crop_and_resize_grad_boxes_outputs = ["output"]
def crop_and_resize_grad_boxes(grads, image, boxes, box_ind, method=None,
name=None):
r"""Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
Args:
grads: A `Tensor` of type `float32`.
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified
in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
`[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
which case the sampled crop is an up-down flipped version of the original
image. The width dimension is treated similarly. Normalized coordinates
outside the `[0, 1]` range are allowed, in which case we use
`extrapolation_value` to extrapolate the input image values.
box_ind: A `Tensor` of type `int32`.
A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
method: An optional `string` from: `"bilinear"`. Defaults to `"bilinear"`.
A string specifying the interpolation method. Only 'bilinear' is
supported for now.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. A 2-D tensor of shape `[num_boxes, 4]`.
"""
result = _op_def_lib.apply_op("CropAndResizeGradBoxes", grads=grads,
image=image, boxes=boxes, box_ind=box_ind,
method=method, name=name)
return result
_crop_and_resize_grad_image_outputs = ["output"]
def crop_and_resize_grad_image(grads, boxes, box_ind, image_size, T,
method=None, name=None):
r"""Computes the gradient of the crop_and_resize op wrt the input image tensor.
Args:
grads: A `Tensor` of type `float32`.
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified
in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
`[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
which case the sampled crop is an up-down flipped version of the original
image. The width dimension is treated similarly. Normalized coordinates
outside the `[0, 1]` range are allowed, in which case we use
`extrapolation_value` to extrapolate the input image values.
box_ind: A `Tensor` of type `int32`.
A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
image_size: A `Tensor` of type `int32`.
A 1-D tensor with value `[batch, image_height, image_width, depth]`
containing the original image size. Both `image_height` and `image_width` need
to be positive.
T: A `tf.DType` from: `tf.float32, tf.half, tf.float64`.
method: An optional `string` from: `"bilinear"`. Defaults to `"bilinear"`.
A string specifying the interpolation method. Only 'bilinear' is
supported for now.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `T`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
"""
result = _op_def_lib.apply_op("CropAndResizeGradImage", grads=grads,
boxes=boxes, box_ind=box_ind,
image_size=image_size, T=T, method=method,
name=name)
return result
_decode_gif_outputs = ["image"]
def decode_gif(contents, name=None):
r"""Decode the first frame of a GIF-encoded image to a uint8 tensor.
GIF with frame or transparency compression are not supported
convert animated GIF from compressed to uncompressed by:
convert $src.gif -coalesce $dst.gif
Args:
contents: A `Tensor` of type `string`. 0-D. The GIF-encoded image.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8`.
4-D with shape `[num_frames, height, width, 3]`. RGB order
"""
result = _op_def_lib.apply_op("DecodeGif", contents=contents, name=name)
return result
_decode_jpeg_outputs = ["image"]
def decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None,
try_recover_truncated=None, acceptable_fraction=None,
dct_method=None, name=None):
r"""Decode a JPEG-encoded image to a uint8 tensor.
The attr `channels` indicates the desired number of color channels for the
decoded image.
Accepted values are:
* 0: Use the number of channels in the JPEG-encoded image.
* 1: output a grayscale image.
* 3: output an RGB image.
If needed, the JPEG-encoded image is transformed to match the requested number
of color channels.
The attr `ratio` allows downscaling the image by an integer factor during
decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
downscaling the image later.
Args:
contents: A `Tensor` of type `string`. 0-D. The JPEG-encoded image.
channels: An optional `int`. Defaults to `0`.
Number of color channels for the decoded image.
ratio: An optional `int`. Defaults to `1`. Downscaling ratio.
fancy_upscaling: An optional `bool`. Defaults to `True`.
If true use a slower but nicer upscaling of the
chroma planes (yuv420/422 only).
try_recover_truncated: An optional `bool`. Defaults to `False`.
If true try to recover an image from truncated input.
acceptable_fraction: An optional `float`. Defaults to `1`.
The minimum required fraction of lines before a truncated
input is accepted.
dct_method: An optional `string`. Defaults to `""`.
string specifying a hint about the algorithm used for
decompression. Defaults to "" which maps to a system-specific
default. Currently valid values are ["INTEGER_FAST",
"INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal
jpeg library changes to a version that does not have that specific
option.)
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8`. 3-D with shape `[height, width, channels]`..
"""
result = _op_def_lib.apply_op("DecodeJpeg", contents=contents,
channels=channels, ratio=ratio,
fancy_upscaling=fancy_upscaling,
try_recover_truncated=try_recover_truncated,
acceptable_fraction=acceptable_fraction,
dct_method=dct_method, name=name)
return result
_decode_png_outputs = ["image"]
def decode_png(contents, channels=None, dtype=None, name=None):
r"""Decode a PNG-encoded image to a uint8 or uint16 tensor.
The attr `channels` indicates the desired number of color channels for the
decoded image.
Accepted values are:
* 0: Use the number of channels in the PNG-encoded image.
* 1: output a grayscale image.
* 3: output an RGB image.
* 4: output an RGBA image.
If needed, the PNG-encoded image is transformed to match the requested number
of color channels.
Args:
contents: A `Tensor` of type `string`. 0-D. The PNG-encoded image.
channels: An optional `int`. Defaults to `0`.
Number of color channels for the decoded image.
dtype: An optional `tf.DType` from: `tf.uint8, tf.uint16`. Defaults to `tf.uint8`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`. 3-D with shape `[height, width, channels]`.
"""
result = _op_def_lib.apply_op("DecodePng", contents=contents,
channels=channels, dtype=dtype, name=name)
return result
_draw_bounding_boxes_outputs = ["output"]
def draw_bounding_boxes(images, boxes, name=None):
r"""Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more bounding
boxes specified by the locations in `boxes`. The coordinates of the each
bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example, if an image is 100 x 200 pixels and the bounding box is
`[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the
bounding box will be `(10, 40)` to `(50, 180)`.
Parts of the bounding box may fall outside the image.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `half`.
4-D with shape `[batch, height, width, depth]`. A batch of images.
boxes: A `Tensor` of type `float32`.
3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
boxes.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`.
4-D with the same shape as `images`. The batch of input images with
bounding boxes drawn on the images.
"""
result = _op_def_lib.apply_op("DrawBoundingBoxes", images=images,
boxes=boxes, name=name)
return result
_encode_jpeg_outputs = ["contents"]
def encode_jpeg(image, format=None, quality=None, progressive=None,
optimize_size=None, chroma_downsampling=None,
density_unit=None, x_density=None, y_density=None,
xmp_metadata=None, name=None):
r"""JPEG-encode an image.
`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
The attr `format` can be used to override the color format of the encoded
output. Values can be:
* `''`: Use a default format based on the number of channels in the image.
* `grayscale`: Output a grayscale JPEG image. The `channels` dimension
of `image` must be 1.
* `rgb`: Output an RGB JPEG image. The `channels` dimension
of `image` must be 3.
If `format` is not specified or is the empty string, a default format is picked
in function of the number of channels in `image`:
* 1: Output a grayscale image.
* 3: Output an RGB image.
Args:
image: A `Tensor` of type `uint8`.
3-D with shape `[height, width, channels]`.
format: An optional `string` from: `"", "grayscale", "rgb"`. Defaults to `""`.
Per pixel image format.
quality: An optional `int`. Defaults to `95`.
Quality of the compression from 0 to 100 (higher is better and slower).
progressive: An optional `bool`. Defaults to `False`.
If True, create a JPEG that loads progressively (coarse to fine).
optimize_size: An optional `bool`. Defaults to `False`.
If True, spend CPU/RAM to reduce size with no quality change.
chroma_downsampling: An optional `bool`. Defaults to `True`.
See http://en.wikipedia.org/wiki/Chroma_subsampling.
density_unit: An optional `string` from: `"in", "cm"`. Defaults to `"in"`.
Unit used to specify `x_density` and `y_density`:
pixels per inch (`'in'`) or centimeter (`'cm'`).
x_density: An optional `int`. Defaults to `300`.
Horizontal pixels per density unit.
y_density: An optional `int`. Defaults to `300`.
Vertical pixels per density unit.
xmp_metadata: An optional `string`. Defaults to `""`.
If not empty, embed this XMP metadata in the image header.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`. 0-D. JPEG-encoded image.
"""
result = _op_def_lib.apply_op("EncodeJpeg", image=image, format=format,
quality=quality, progressive=progressive,
optimize_size=optimize_size,
chroma_downsampling=chroma_downsampling,
density_unit=density_unit,
x_density=x_density, y_density=y_density,
xmp_metadata=xmp_metadata, name=name)
return result
_encode_png_outputs = ["contents"]
def encode_png(image, compression=None, name=None):
r"""PNG-encode an image.
`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
where `channels` is:
* 1: for grayscale.
* 2: for grayscale + alpha.
* 3: for RGB.
* 4: for RGBA.
The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
default or a value from 0 to 9. 9 is the highest compression level, generating
the smallest output, but is slower.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`.
3-D with shape `[height, width, channels]`.
compression: An optional `int`. Defaults to `-1`. Compression level.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`. 0-D. PNG-encoded image.
"""
result = _op_def_lib.apply_op("EncodePng", image=image,
compression=compression, name=name)
return result
_extract_glimpse_outputs = ["glimpse"]
def extract_glimpse(input, size, offsets, centered=None, normalized=None,
uniform_noise=None, name=None):
r"""Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
`offsets` from the input tensor. If the windows only partially
overlaps the inputs, the non overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0
correspond to the minimum and maximum of each height and width
dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
left corner, the lower right corner is located at (1.0, 1.0) and the
center is at (0, 0).
* If the coordinates are not normalized they are interpreted as
numbers of pixels.
Args:
input: A `Tensor` of type `float32`.
A 4-D float tensor of shape `[batch_size, height, width, channels]`.
size: A `Tensor` of type `int32`.
A 1-D tensor of 2 elements containing the size of the glimpses
to extract. The glimpse height must be specified first, following
by the glimpse width.
offsets: A `Tensor` of type `float32`.
A 2-D integer tensor of shape `[batch_size, 2]` containing
the x, y locations of the center of each window.
centered: An optional `bool`. Defaults to `True`.
indicates if the offset coordinates are centered relative to
the image, in which case the (0, 0) offset is relative to the center
of the input images. If false, the (0,0) offset corresponds to the
upper left corner of the input images.
normalized: An optional `bool`. Defaults to `True`.
indicates if the offset coordinates are normalized.
uniform_noise: An optional `bool`. Defaults to `True`.
indicates if the noise should be generated using a
uniform distribution or a Gaussian distribution.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
A tensor representing the glimpses `[batch_size,
glimpse_height, glimpse_width, channels]`.
"""
result = _op_def_lib.apply_op("ExtractGlimpse", input=input, size=size,
offsets=offsets, centered=centered,
normalized=normalized,
uniform_noise=uniform_noise, name=name)
return result
_hsv_to_rgb_outputs = ["output"]
def hsv_to_rgb(images, name=None):
r"""Convert one or more images from HSV to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels. The output is only well defined if the value in `images`
are in `[0,1]`.
See `rgb_to_hsv` for a description of the HSV encoding.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1-D or higher rank. HSV data to convert. Last dimension must be size 3.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`. `images` converted to RGB.
"""
result = _op_def_lib.apply_op("HSVToRGB", images=images, name=name)
return result
_non_max_suppression_outputs = ["selected_indices"]
def non_max_suppression(boxes, scores, max_output_size, iou_threshold=None,
name=None):
r"""Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
Args:
boxes: A `Tensor` of type `float32`.
A 2-D float tensor of shape `[num_boxes, 4]`.
scores: A `Tensor` of type `float32`.
A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A `Tensor` of type `int32`.
A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
iou_threshold: An optional `float`. Defaults to `0.5`.
A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
A 1-D integer tensor of shape `[M]` representing the selected
indices from the boxes tensor, where `M <= max_output_size`.
"""
result = _op_def_lib.apply_op("NonMaxSuppression", boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold, name=name)
return result
_rgb_to_hsv_outputs = ["output"]
def rgb_to_hsv(images, name=None):
r"""Converts one or more images from RGB to HSV.
Outputs a tensor of the same shape as the `images` tensor, containing the HSV
value of the pixels. The output is only well defined if the value in `images`
are in `[0,1]`.
`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1-D or higher rank. RGB data to convert. Last dimension must be size 3.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`. `images` converted to HSV.
"""
result = _op_def_lib.apply_op("RGBToHSV", images=images, name=name)
return result
__random_crop_outputs = ["output"]
def _random_crop(image, size, seed=None, seed2=None, name=None):
r"""Randomly crop `image`.
`size` is a 1-D int64 tensor with 2 elements representing the crop height and
width. The values must be non negative.
This Op picks a random location in `image` and crops a `height` by `width`
rectangle from that location. The random location is picked so the cropped
area will fit inside the original image.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`.
3-D of shape `[height, width, channels]`.
size: A `Tensor` of type `int64`.
1-D of length 2 containing: `crop_height`, `crop_width`..
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 are set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, it is seeded by a
random seed.
seed2: An optional `int`. Defaults to `0`.
An second seed to avoid seed collision.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `image`.
3-D of shape `[crop_height, crop_width, channels].`
"""
result = _op_def_lib.apply_op("RandomCrop", image=image, size=size,
seed=seed, seed2=seed2, name=name)
return result
_resize_area_outputs = ["resized_images"]
def resize_area(images, size, align_corners=None, name=None):
r"""Resize `images` to `size` using area interpolation.
Input images can be of different types but output images are always float.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale input by (new_height - 1) / (height - 1), which
exactly aligns the 4 corners of images and resized images. If false, rescale
by new_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. 4-D with shape
`[batch, new_height, new_width, channels]`.
"""
result = _op_def_lib.apply_op("ResizeArea", images=images, size=size,
align_corners=align_corners, name=name)
return result
_resize_bicubic_outputs = ["resized_images"]
def resize_bicubic(images, size, align_corners=None, name=None):
r"""Resize `images` to `size` using bicubic interpolation.
Input images can be of different types but output images are always float.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale input by (new_height - 1) / (height - 1), which
exactly aligns the 4 corners of images and resized images. If false, rescale
by new_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. 4-D with shape
`[batch, new_height, new_width, channels]`.
"""
result = _op_def_lib.apply_op("ResizeBicubic", images=images, size=size,
align_corners=align_corners, name=name)
return result
_resize_bilinear_outputs = ["resized_images"]
def resize_bilinear(images, size, align_corners=None, name=None):
r"""Resize `images` to `size` using bilinear interpolation.
Input images can be of different types but output images are always float.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale input by (new_height - 1) / (height - 1), which
exactly aligns the 4 corners of images and resized images. If false, rescale
by new_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. 4-D with shape
`[batch, new_height, new_width, channels]`.
"""
result = _op_def_lib.apply_op("ResizeBilinear", images=images, size=size,
align_corners=align_corners, name=name)
return result
__resize_bilinear_grad_outputs = ["output"]
def _resize_bilinear_grad(grads, original_image, align_corners=None,
name=None):
r"""Computes the gradient of bilinear interpolation.
Args:
grads: A `Tensor` of type `float32`.
4-D with shape `[batch, height, width, channels]`.
original_image: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`.
4-D with shape `[batch, orig_height, orig_width, channels]`,
The image tensor that was resized.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale grads by (orig_height - 1) / (height - 1), which
exactly aligns the 4 corners of grads and original_image. If false, rescale by
orig_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `original_image`.
4-D with shape `[batch, orig_height, orig_width, channels]`.
Gradients with respect to the input image. Input image must have been
float or double.
"""
result = _op_def_lib.apply_op("ResizeBilinearGrad", grads=grads,
original_image=original_image,
align_corners=align_corners, name=name)
return result
_resize_nearest_neighbor_outputs = ["resized_images"]
def resize_nearest_neighbor(images, size, align_corners=None, name=None):
r"""Resize `images` to `size` using nearest neighbor interpolation.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`, `half`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale input by (new_height - 1) / (height - 1), which
exactly aligns the 4 corners of images and resized images. If false, rescale
by new_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`. 4-D with shape
`[batch, new_height, new_width, channels]`.
"""
result = _op_def_lib.apply_op("ResizeNearestNeighbor", images=images,
size=size, align_corners=align_corners,
name=name)
return result
__resize_nearest_neighbor_grad_outputs = ["output"]
def _resize_nearest_neighbor_grad(grads, size, align_corners=None, name=None):
r"""Computes the gradient of nearest neighbor interpolation.
Args:
grads: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int32`, `half`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
original input size.
align_corners: An optional `bool`. Defaults to `False`.
If true, rescale grads by (orig_height - 1) / (height - 1), which
exactly aligns the 4 corners of grads and original_image. If false, rescale by
orig_height / height. Treat similarly the width dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grads`.
4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
with respect to the input image.
"""
result = _op_def_lib.apply_op("ResizeNearestNeighborGrad", grads=grads,
size=size, align_corners=align_corners,
name=name)
return result
_sample_distorted_bounding_box_outputs = ["begin", "size", "bboxes"]
_SampleDistortedBoundingBoxOutput = _collections.namedtuple("SampleDistortedBoundingBox",
_sample_distorted_bounding_box_outputs)
def sample_distorted_bounding_box(image_size, bounding_boxes, seed=None,
seed2=None, min_object_covered=None,
aspect_ratio_range=None, area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
r"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.image_summary('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`.
1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`.
3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
seed: An optional `int`. Defaults to `0`.
If either `seed` or `seed2` are set to non-zero, the random number
generator is seeded by the given `seed`. Otherwise, it is seeded by a random
seed.
seed2: An optional `int`. Defaults to `0`.
A second seed to avoid seed collision.
min_object_covered: An optional `float`. Defaults to `0.1`.
The cropped area of the image must contain at least this
fraction of any bounding box supplied. The value of this parameter should be
non-negative. In the case of 0, the cropped area does not need to overlap
any of the bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`.
The cropped area of the image must have an aspect ratio =
width / height within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`.
The cropped area of the image must contain a fraction of the
supplied image within in this range.
max_attempts: An optional `int`. Defaults to `100`.
Number of attempts at generating a cropped region of the image
of the specified constraints. After `max_attempts` failures, return the entire
image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied.
If true, assume an implicit bounding box covering the whole input. If false,
raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
result = _op_def_lib.apply_op("SampleDistortedBoundingBox",
image_size=image_size,
bounding_boxes=bounding_boxes, seed=seed,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
return _SampleDistortedBoundingBoxOutput._make(result)
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "AdjustContrast"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "contrast_factor"
type: DT_FLOAT
}
input_arg {
name: "min_value"
type: DT_FLOAT
}
input_arg {
name: "max_value"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
deprecation {
version: 2
explanation: "Use AdjustContrastv2 instead"
}
}
op {
name: "AdjustContrastv2"
input_arg {
name: "images"
type: DT_FLOAT
}
input_arg {
name: "contrast_factor"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_FLOAT
}
}
op {
name: "AdjustHue"
input_arg {
name: "images"
type: DT_FLOAT
}
input_arg {
name: "delta"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_FLOAT
}
}
op {
name: "AdjustSaturation"
input_arg {
name: "images"
type: DT_FLOAT
}
input_arg {
name: "scale"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_FLOAT
}
}
op {
name: "CropAndResize"
input_arg {
name: "image"
type_attr: "T"
}
input_arg {
name: "boxes"
type: DT_FLOAT
}
input_arg {
name: "box_ind"
type: DT_INT32
}
input_arg {
name: "crop_size"
type: DT_INT32
}
output_arg {
name: "crops"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "method"
type: "string"
default_value {
s: "bilinear"
}
allowed_values {
list {
s: "bilinear"
}
}
}
attr {
name: "extrapolation_value"
type: "float"
default_value {
f: 0
}
}
}
op {
name: "CropAndResizeGradBoxes"
input_arg {
name: "grads"
type: DT_FLOAT
}
input_arg {
name: "image"
type_attr: "T"
}
input_arg {
name: "boxes"
type: DT_FLOAT
}
input_arg {
name: "box_ind"
type: DT_INT32
}
output_arg {
name: "output"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "method"
type: "string"
default_value {
s: "bilinear"
}
allowed_values {
list {
s: "bilinear"
}
}
}
}
op {
name: "CropAndResizeGradImage"
input_arg {
name: "grads"
type: DT_FLOAT
}
input_arg {
name: "boxes"
type: DT_FLOAT
}
input_arg {
name: "box_ind"
type: DT_INT32
}
input_arg {
name: "image_size"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_HALF
type: DT_DOUBLE
}
}
}
attr {
name: "method"
type: "string"
default_value {
s: "bilinear"
}
allowed_values {
list {
s: "bilinear"
}
}
}
}
op {
name: "DecodeGif"
input_arg {
name: "contents"
type: DT_STRING
}
output_arg {
name: "image"
type: DT_UINT8
}
}
op {
name: "DecodeJpeg"
input_arg {
name: "contents"
type: DT_STRING
}
output_arg {
name: "image"
type: DT_UINT8
}
attr {
name: "channels"
type: "int"
default_value {
i: 0
}
}
attr {
name: "ratio"
type: "int"
default_value {
i: 1
}
}
attr {
name: "fancy_upscaling"
type: "bool"
default_value {
b: true
}
}
attr {
name: "try_recover_truncated"
type: "bool"
default_value {
b: false
}
}
attr {
name: "acceptable_fraction"
type: "float"
default_value {
f: 1
}
}
attr {
name: "dct_method"
type: "string"
default_value {
s: ""
}
}
}
op {
name: "DecodePng"
input_arg {
name: "contents"
type: DT_STRING
}
output_arg {
name: "image"
type_attr: "dtype"
}
attr {
name: "channels"
type: "int"
default_value {
i: 0
}
}
attr {
name: "dtype"
type: "type"
default_value {
type: DT_UINT8
}
allowed_values {
list {
type: DT_UINT8
type: DT_UINT16
}
}
}
}
op {
name: "DrawBoundingBoxes"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "boxes"
type: DT_FLOAT
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_FLOAT
}
allowed_values {
list {
type: DT_FLOAT
type: DT_HALF
}
}
}
}
op {
name: "EncodeJpeg"
input_arg {
name: "image"
type: DT_UINT8
}
output_arg {
name: "contents"
type: DT_STRING
}
attr {
name: "format"
type: "string"
default_value {
s: ""
}
allowed_values {
list {
s: ""
s: "grayscale"
s: "rgb"
}
}
}
attr {
name: "quality"
type: "int"
default_value {
i: 95
}
}
attr {
name: "progressive"
type: "bool"
default_value {
b: false
}
}
attr {
name: "optimize_size"
type: "bool"
default_value {
b: false
}
}
attr {
name: "chroma_downsampling"
type: "bool"
default_value {
b: true
}
}
attr {
name: "density_unit"
type: "string"
default_value {
s: "in"
}
allowed_values {
list {
s: "in"
s: "cm"
}
}
}
attr {
name: "x_density"
type: "int"
default_value {
i: 300
}
}
attr {
name: "y_density"
type: "int"
default_value {
i: 300
}
}
attr {
name: "xmp_metadata"
type: "string"
default_value {
s: ""
}
}
}
op {
name: "EncodePng"
input_arg {
name: "image"
type_attr: "T"
}
output_arg {
name: "contents"
type: DT_STRING
}
attr {
name: "compression"
type: "int"
default_value {
i: -1
}
}
attr {
name: "T"
type: "type"
default_value {
type: DT_UINT8
}
allowed_values {
list {
type: DT_UINT8
type: DT_UINT16
}
}
}
}
op {
name: "ExtractGlimpse"
input_arg {
name: "input"
type: DT_FLOAT
}
input_arg {
name: "size"
type: DT_INT32
}
input_arg {
name: "offsets"
type: DT_FLOAT
}
output_arg {
name: "glimpse"
type: DT_FLOAT
}
attr {
name: "centered"
type: "bool"
default_value {
b: true
}
}
attr {
name: "normalized"
type: "bool"
default_value {
b: true
}
}
attr {
name: "uniform_noise"
type: "bool"
default_value {
b: true
}
}
}
op {
name: "HSVToRGB"
input_arg {
name: "images"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_FLOAT
}
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "NonMaxSuppression"
input_arg {
name: "boxes"
type: DT_FLOAT
}
input_arg {
name: "scores"
type: DT_FLOAT
}
input_arg {
name: "max_output_size"
type: DT_INT32
}
output_arg {
name: "selected_indices"
type: DT_INT32
}
attr {
name: "iou_threshold"
type: "float"
default_value {
f: 0.5
}
}
}
op {
name: "RGBToHSV"
input_arg {
name: "images"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_FLOAT
}
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "RandomCrop"
input_arg {
name: "image"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT64
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "seed"
type: "int"
default_value {
i: 0
}
}
attr {
name: "seed2"
type: "int"
default_value {
i: 0
}
}
deprecation {
version: 8
explanation: "Random crop is now pure Python"
}
is_stateful: true
}
op {
name: "ResizeArea"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT32
}
output_arg {
name: "resized_images"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResizeBicubic"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT32
}
output_arg {
name: "resized_images"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResizeBilinear"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT32
}
output_arg {
name: "resized_images"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResizeBilinearGrad"
input_arg {
name: "grads"
type: DT_FLOAT
}
input_arg {
name: "original_image"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_HALF
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResizeNearestNeighbor"
input_arg {
name: "images"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT32
}
output_arg {
name: "resized_images"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResizeNearestNeighborGrad"
input_arg {
name: "grads"
type_attr: "T"
}
input_arg {
name: "size"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT32
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "align_corners"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SampleDistortedBoundingBox"
input_arg {
name: "image_size"
type_attr: "T"
}
input_arg {
name: "bounding_boxes"
type: DT_FLOAT
}
output_arg {
name: "begin"
type_attr: "T"
}
output_arg {
name: "size"
type_attr: "T"
}
output_arg {
name: "bboxes"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT16
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "seed"
type: "int"
default_value {
i: 0
}
}
attr {
name: "seed2"
type: "int"
default_value {
i: 0
}
}
attr {
name: "min_object_covered"
type: "float"
default_value {
f: 0.1
}
}
attr {
name: "aspect_ratio_range"
type: "list(float)"
default_value {
list {
f: 0.75
f: 1.33
}
}
}
attr {
name: "area_range"
type: "list(float)"
default_value {
list {
f: 0.05
f: 1
}
}
}
attr {
name: "max_attempts"
type: "int"
default_value {
i: 100
}
}
attr {
name: "use_image_if_no_bounding_boxes"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
"""
_op_def_lib = _InitOpDefLibrary()
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/tensorflow/python/ops/gen_image_ops.py
|
Python
|
mit
| 56,542
|
[
"Gaussian"
] |
dfd86fdddd731c0a5333e23a40079575a46c8b28d34c9ff5232498b3831c6dee
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Brian Meeker
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Brian Meeker <meeker.brian@gmail.com>
from __future__ import with_statement
import re
from datetime import datetime
from genshi.builder import tag
from trac.core import *
from trac.ticket import TicketSystem, Ticket
from trac.ticket.notification import BatchTicketNotifyEmail
from trac.util.datefmt import utc
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _, tag_
from trac.web import IRequestHandler
from trac.web.chrome import add_warning, add_script_data
class BatchModifyModule(Component):
"""Ticket batch modification module.
This component allows multiple tickets to be modified in one request from
the custom query page. For users with the TICKET_BATCH_MODIFY permission
it will add a [TracBatchModify batch modify] section underneath custom
query results. Users can choose which tickets and fields they wish to
modify.
"""
implements(IRequestHandler)
fields_as_list = ['keywords', 'cc']
list_separator_re = re.compile(r'[;\s,]+')
list_connector_string = ', '
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/batchmodify'
def process_request(self, req):
req.perm.assert_permission('TICKET_BATCH_MODIFY')
comment = req.args.get('batchmod_value_comment', '')
action = req.args.get('action')
new_values = self._get_new_ticket_values(req)
selected_tickets = self._get_selected_tickets(req)
self._save_ticket_changes(req, selected_tickets,
new_values, comment, action)
#Always redirect back to the query page we came from.
req.redirect(req.session['query_href'])
def _get_new_ticket_values(self, req):
"""Pull all of the new values out of the post data."""
values = {}
for field in TicketSystem(self.env).get_ticket_fields():
name = field['name']
if name not in ('id', 'resolution', 'status', 'owner', 'time',
'changetime', 'summary', 'reporter',
'description') and field['type'] != 'textarea':
value = req.args.get('batchmod_value_' + name)
if value is not None:
values[name] = value
return values
def _get_selected_tickets(self, req):
"""The selected tickets will be a comma separated list
in the request arguments."""
selected_tickets = req.args.get('selected_tickets')
if selected_tickets == '':
return []
else:
return selected_tickets.split(',')
def add_template_data(self, req, data, tickets):
data['batch_modify'] = True
data['query_href'] = req.session['query_href'] or req.href.query()
data['action_controls'] = self._get_action_controls(req, tickets)
batch_list_modes = [
{'name': _("add"), 'value': "+"},
{'name': _("remove"), 'value': "-"},
{'name': _("add / remove"), 'value': "+-"},
{'name': _("set to"), 'value': "="},
]
add_script_data(req, batch_list_modes=batch_list_modes,
batch_list_properties=self.fields_as_list)
def _get_action_controls(self, req, tickets):
action_controls = []
ts = TicketSystem(self.env)
tickets_by_action = {}
for t in tickets:
ticket = Ticket(self.env, t['id'])
actions = ts.get_available_actions(req, ticket)
for action in actions:
tickets_by_action.setdefault(action, []).append(ticket)
sorted_actions = sorted(set(tickets_by_action.keys()))
for action in sorted_actions:
first_label = None
hints = []
widgets = []
ticket = tickets_by_action[action][0]
for controller in self._get_action_controllers(req, ticket,
action):
label, widget, hint = controller.render_ticket_action_control(
req, ticket, action)
if not first_label:
first_label = label
widgets.append(widget)
hints.append(hint)
action_controls.append((action, first_label, tag(widgets), hints))
return action_controls
def _get_action_controllers(self, req, ticket, action):
"""Generator yielding the controllers handling the given `action`"""
for controller in TicketSystem(self.env).action_controllers:
actions = [a for w, a in
controller.get_ticket_actions(req, ticket) or []]
if action in actions:
yield controller
def _save_ticket_changes(self, req, selected_tickets,
new_values, comment, action):
"""Save all of the changes to tickets."""
when = datetime.now(utc)
with self.env.db_transaction as db:
for id in selected_tickets:
t = Ticket(self.env, int(id))
_values = new_values.copy()
for field in self.fields_as_list:
if field in new_values:
old = t.values[field] if field in t.values else ''
new = new_values[field]
mode = req.args.get('batchmod_value_' + field +
'_mode')
new2 = req.args.get('batchmod_value_' + field +
'_secondary', '')
_values[field] = self._change_list(old, new, new2,
mode)
controllers = list(self._get_action_controllers(req, t,
action))
for controller in controllers:
_values.update(controller.get_ticket_changes(req, t,
action))
t.populate(_values)
t.save_changes(req.authname, comment, when=when)
for controller in controllers:
controller.apply_action_side_effects(req, t, action)
try:
tn = BatchTicketNotifyEmail(self.env)
tn.notify(selected_tickets, new_values, comment, action,
req.authname)
except Exception, e:
self.log.error("Failure sending notification on ticket batch"
"change: %s", exception_to_unicode(e))
add_warning(req, tag_("The changes have been saved, but an "
"error occurred while sending "
"notifications: %(message)s",
message=to_unicode(e)))
def _change_list(self, old_list, new_list, new_list2, mode):
changed_list = [k.strip()
for k in self.list_separator_re.split(old_list)
if k]
new_list = [k.strip()
for k in self.list_separator_re.split(new_list)
if k]
new_list2 = [k.strip()
for k in self.list_separator_re.split(new_list2)
if k]
if mode == '=':
changed_list = new_list
elif mode == '+':
for entry in new_list:
if entry not in changed_list:
changed_list.append(entry)
elif mode == '-':
for entry in new_list:
while entry in changed_list:
changed_list.remove(entry)
elif mode == '+-':
for entry in new_list:
if entry not in changed_list:
changed_list.append(entry)
for entry in new_list2:
while entry in changed_list:
changed_list.remove(entry)
return self.list_connector_string.join(changed_list)
|
i-rabot/tractogithub
|
tracformatter/trac/ticket/batch.py
|
Python
|
bsd-3-clause
| 8,661
|
[
"Brian"
] |
a30b289426e3090bb8b4567d147680b6d7f3eb6c9a2b18807ff023b02084971b
|
# $Id$
#
# Copyright (c) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" utility functionality for molecular similarity
includes a command line app for screening databases
Sample Usage:
python MolSimilarity.py -d data.gdb -t daylight_sig --idName="Mol_ID" \
--topN=100 --smiles='c1(C=O)ccc(Oc2ccccc2)cc1' --smilesTable=raw_dop_data \
--smilesName="structure" -o results.csv
"""
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
from rdkit.DataStructs.TopNContainer import TopNContainer
import sys,types
from rdkit.six.moves import cPickle
from rdkit.Chem.Fingerprints import FingerprintMols,DbFpSupplier
try:
from rdkit.VLib.NodeLib.DbPickleSupplier import _lazyDataSeq as _dataSeq
except ImportError:
_dataSeq=None
from rdkit import DataStructs
_cvsVersion="$Id$"
idx1 = _cvsVersion.find(':')+1
idx2 = _cvsVersion.rfind('$')
__VERSION_STRING="%s"%(_cvsVersion[idx1:idx2])
def _ConstructSQL(details,extraFields=''):
fields = '%s.%s'%(details.tableName,details.idName)
join = ''
if details.smilesTableName:
if details.smilesName:
fields = fields + ',%s'%(details.smilesName)
join='join %s smi on smi.%s=%s.%s'%(details.smilesTableName,
details.idName,
details.tableName,
details.idName)
if details.actTableName:
if details.actName:
fields = fields + ',%s'%(details.actName)
join = join + 'join %s act on act.%s=%s.%s'%(details.actTableName,
details.idName,
details.tableName,
details.idName)
#data = conn.GetData(fields=fields,join=join)
if extraFields:
fields += ','+extraFields
cmd = 'select %s from %s %s'%(fields,details.tableName,join)
return cmd
def ScreenInDb(details,mol):
try:
probeFp = apply(FingerprintMols.FingerprintMol,(mol,),details.__dict__)
except:
import traceback
FingerprintMols.error('Error: problems fingerprinting molecule.\n')
traceback.print_exc()
return []
if details.dbName and details.tableName:
try:
conn = DbConnect(details.dbName,details.tableName)
if hasattr(details,'dbUser'):
conn.user = details.dbUser
if hasattr(details,'dbPassword'):
conn.password = details.dbPassword
except:
import traceback
FingerprintMols.error('Error: Problems establishing connection to database: %s|%s\n'%(details.dbName,
details.tableName))
traceback.print_exc()
if details.metric not in (DataStructs.TanimotoSimilarity,
DataStructs.DiceSimilarity,
DataStructs.CosineSimilarity):
data = GetFingerprints(details)
res = ScreenFingerprints(details,data,mol)
else:
res = []
if details.metric == DataStructs.TanimotoSimilarity:
func = 'rd_tanimoto'
pkl=probeFp.ToBitString()
elif details.metric == DataStructs.DiceSimilarity:
func = 'rd_dice'
pkl=probeFp.ToBitString()
elif details.metric == DataStructs.CosineSimilarity:
func = 'rd_cosine'
pkl=probeFp.ToBitString()
extraFields="%s(%s,%s) as tani"%(func,DbModule.placeHolder,details.fpColName)
cmd = _ConstructSQL(details,extraFields=extraFields)
if details.doThreshold:
# we need to do a subquery here:
cmd = "select * from (%s) tmp where tani>%f"%(cmd,details.screenThresh)
cmd += " order by tani desc"
if not details.doThreshold and details.topN>0:
cmd += " limit %d"%details.topN
curs = conn.GetCursor()
curs.execute(cmd,(pkl,))
res = curs.fetchall()
return res
def GetFingerprints(details):
""" returns an iterable sequence of fingerprints
each fingerprint will have a _fieldsFromDb member whose first entry is
the id.
"""
if details.dbName and details.tableName:
try:
conn = DbConnect(details.dbName,details.tableName)
if hasattr(details,'dbUser'):
conn.user = details.dbUser
if hasattr(details,'dbPassword'):
conn.password = details.dbPassword
except:
import traceback
FingerprintMols.error('Error: Problems establishing connection to database: %s|%s\n'%(details.dbName,
details.tableName))
traceback.print_exc()
cmd = _ConstructSQL(details,extraFields=details.fpColName)
curs = conn.GetCursor()
#curs.execute(cmd)
#print 'CURSOR:',curs,curs.closed
if _dataSeq:
suppl = _dataSeq(curs,cmd,depickle=not details.noPickle,klass=DataStructs.ExplicitBitVect)
_dataSeq._conn = conn
else:
suppl = DbFpSupplier.ForwardDbFpSupplier(data,fpColName=details.fpColName)
elif details.inFileName:
conn = None
try:
inF = open(details.inFileName,'r')
except IOError:
import traceback
FingerprintMols.error('Error: Problems reading from file %s\n'%(details.inFileName))
traceback.print_exc()
supple = []
done = 0
while not done:
try:
id,fp = cPickle.load(inF)
except:
done = 1
else:
fp._fieldsFromDb = [id]
suppl.append(fp)
else:
suppl = None
return suppl
def ScreenFingerprints(details,data,mol=None,probeFp=None):
""" Returns a list of results
"""
if probeFp is None:
try:
probeFp = apply(FingerprintMols.FingerprintMol,(mol,),details.__dict__)
except:
import traceback
FingerprintMols.error('Error: problems fingerprinting molecule.\n')
traceback.print_exc()
return []
if not probeFp:
return []
res = []
if not details.doThreshold and details.topN>0:
topN = TopNContainer(details.topN)
else:
topN = []
res = []
count = 0
for pt in data:
fp1 = probeFp
if not details.noPickle:
if type(pt) in (types.TupleType,types.ListType):
id,fp = pt
else:
fp = pt
id = pt._fieldsFromDb[0]
score = DataStructs.FingerprintSimilarity(fp1,fp,details.metric)
else:
id,pkl = pt
score = details.metric(fp1,str(pkl))
if topN:
topN.Insert(score,id)
elif not details.doThreshold or \
(details.doThreshold and score>=details.screenThresh):
res.append((id,score))
count += 1
if hasattr(details,'stopAfter') and count >= details.stopAfter:
break
for score,id in topN:
res.append((id,score))
return res
def ScreenFromDetails(details,mol=None):
""" Returns a list of results
"""
if not mol:
if not details.probeMol:
smi = details.probeSmiles
try:
mol = Chem.MolFromSmiles(smi)
except:
import traceback
FingerprintMols.error('Error: problems generating molecule for smiles: %s\n'%(smi))
traceback.print_exc()
return
else:
mol = details.probeMol
if not mol:
return
if details.outFileName:
try:
outF = open(details.outFileName,'w+')
except IOError:
FingerprintMols.error("Error: could not open output file %s for writing\n"%(details.outFileName))
return None
else:
outF = None
if not hasattr(details,'useDbSimilarity') or not details.useDbSimilarity:
data = GetFingerprints(details)
res = ScreenFingerprints(details,data,mol)
else:
res = ScreenInDb(details,mol)
if outF:
for pt in res:
outF.write(','.join([str(x) for x in pt]))
outF.write('\n')
return res
_usageDoc="""
Usage: MolSimilarity.py [args] <fName>
If <fName> is provided and no tableName is specified (see below),
data will be read from the pickled file <fName>. This file should
contain a series of pickled (id,fingerprint) tuples.
NOTE: at the moment the user is responsible for ensuring that the
fingerprint parameters given at run time (used to fingerprint the
probe molecule) match those used to generate the input fingerprints.
Command line arguments are:
- --smiles=val: sets the SMILES for the input molecule. This is
a required argument.
- -d _dbName_: set the name of the database from which
to pull input fingerprint information.
- -t _tableName_: set the name of the database table
from which to pull input fingerprint information
- --smilesTable=val: sets the name of the database table
which contains SMILES for the input fingerprints. If this
information is provided along with smilesName (see below),
the output file will contain SMILES data
- --smilesName=val: sets the name of the SMILES column
in the input database. Default is *SMILES*.
- --topN=val: sets the number of results to return.
Default is *10*.
- --thresh=val: sets the similarity threshold.
- --idName=val: sets the name of the id column in the input
database. Default is *ID*.
- -o _outFileName_: name of the output file (output will
be a CSV file with one line for each of the output molecules
- --dice: use the DICE similarity metric instead of Tanimoto
- --cosine: use the cosine similarity metric instead of Tanimoto
- --fpColName=val: name to use for the column which stores
fingerprints (in pickled format) in the output db table.
Default is *AutoFragmentFP*
- --minPath=val: minimum path length to be included in
fragment-based fingerprints. Default is *1*.
- --maxPath=val: maximum path length to be included in
fragment-based fingerprints. Default is *7*.
- --nBitsPerHash: number of bits to be set in the output
fingerprint for each fragment. Default is *4*.
- --discrim: use of path-based discriminators to hash bits.
Default is *false*.
- -V: include valence information in the fingerprints
Default is *false*.
- -H: include Hs in the fingerprint
Default is *false*.
- --useMACCS: use the public MACCS keys to do the fingerprinting
(instead of a daylight-type fingerprint)
"""
if __name__ == '__main__':
FingerprintMols.message("This is MolSimilarity version %s\n\n"%(__VERSION_STRING))
FingerprintMols._usageDoc=_usageDoc
details = FingerprintMols.ParseArgs()
ScreenFromDetails(details)
|
soerendip42/rdkit
|
rdkit/Chem/Fingerprints/MolSimilarity.py
|
Python
|
bsd-3-clause
| 10,705
|
[
"RDKit"
] |
c8b44f6e8589a318aea8bf2a769ea2d29f8f6f8d8a13c51bb2310c06adac2c0d
|
import numpy as np
from numpy import random
import glob, os
class TrainingManager:
neuralNetwork = ''
class NeuralNetwork:
class Layer:
class Neuron:
netValue = 0.
activationValue = 0.
weights = [] # each neuron should have weights for each of the neurons in the layer before it
weightsAfterErrorSubtracted = []
incomingError = 0.
def __init__(self, numberOfWeights): #note - an input layer neuron will have no weights
self.weights = random.random(int(numberOfWeights))
#print(random.random(int(numberOfWeights)))
neurons = [] #1xN matrix, where N = number of neurons in this layer. Each neuron is a float value between 0 and 1.
#This holds the "activation" values, which are the sum of activations in the previous layer times the weights.
totalLayerError = 0. #used for the backprop, this is the sum of (actual - expected) * sigmoidDerivative(actual) for all outputs.
#or for hidden layers, it's the sum of the above * each of the neuron activations (outputs).
def __init__(self, numberOfNeurons, numberOfWeightsPerNeuron):
for neuron in range(0, numberOfNeurons):
print('In Layer(): number of neurons: ' + str(numberOfNeurons) + ', number of weights per neuron: ' + str(numberOfWeightsPerNeuron))
self.neurons.append(self.Neuron(numberOfWeightsPerNeuron))
layers = []
totalError = 999. #some big number when we first init
outputErrorDerivatives = []
learningRate = 0.5
classNames = [] #These will be strings which correspond to neurons in the output layer. Each neuron will represent an object type.
#The strings will be the names of those object types. The values in those output layer neurons will be the level
#of certainty the neural net has assigned to the input belonging to one of those classes.
def __init__(self, numberOfLayers, numberOfNeuronsPerLayer, totalClasses):
#empty - none of the properties are set yet in this constructor.
differenceInputToOutput = numberOfNeuronsPerLayer - totalClasses #I want one neuron per class in the output layer.
numberOfNeuronsToDropPerLayer = differenceInputToOutput / numberOfLayers
for layer in range(numberOfLayers): #Ultimately, I want the number of neurons per layer to steadily decrease each layer until the output
#matches the total classes I want potentially recognized.
numberOfNeuronsPerLayer -= numberOfNeuronsToDropPerLayer
print('Number of neurons per layer: ' + str(numberOfNeuronsPerLayer))
self.addLayer(numberOfNeuronsPerLayer) #This method can be made more complex by making layers have different numbers of neurons.
#This method is already setup to account for different numbers of neurons in different layers.
def addLayer(self, numberOfNeuronsInLayer):
numberOfNeuronsInLastLayer = 0.
lastLayerIndex = len(self.layers) - 1
print('Last layer index: ' +str(lastLayerIndex))
#Each neuron in this layer should have a weight to every neuron in the layer prior:
if lastLayerIndex > 0:
numberOfNeuronsInLastLayer = len(self.layers[lastLayerIndex].neurons)
layer = self.Layer(numberOfNeuronsInLayer, numberOfNeuronsInLastLayer)
print('appending' + str(layer))
self.layers.append(layer)
def doTrainEpoch(self, input, expectedOutput):
actualOutput = forwardPropagateNetwork(input)
calculateTotalError(expectedOutput, actualOutput)
backpropagateNetwork()
applyBackproppedWeightChanges()
#I found a good reference for doing propagation with numpy.dot multiplication here:
#https://databoys.github.io/Feedforward/
def forwardPropagateNetwork(self):
for layer in range(0, self.layers.len - 2): #not going to forward prop from the output layer, so we stop at one before it.
activations = []
for neuron in self.layers[layer].neurons:
activations.append(neuron.activationValue)
for neuron in self.layers[layer + 1].neurons:
neuron.netValue = np.dot(activations, neuron.weights) #spiffy optimized function that multiplies
#them all together in order and sums them up.
neuron.activationValue = sigmoid(neuron, False) #run the activation function on the neuron for the
#final value - second arg is false because we aren't backpropping
return self.layers[layers.len - 1]
def calculateError(self, expectedOuput, actualOutput):
if (expectedOuput.len != actualOutput.len):
print "Expected and Actual Output lengths do not match."
return
self.totalError = 0.
self.outputErrorDerivatives = []
for output in range(0, len(expectedOuput)-1):
self.totalError += .5*(actualOutput - expectedOutput)**2
self.outputErrorDerivatives.append(actualOutput - expectedOutput)
#Setup the error on the output neurons for the backprop:
outputLayer = self.layers[self.layers.len - 1]
for output in range(0, self.outputErrorDerivatives.len - 1):
error = self.outputErrorDerivates[output]
outputLayer.neurons[output].incomingError = error * sigmoid(error, True)
def backpropagateNetwork(self):
for layer in range(self.layers.len - 1, 1, -1): #We don't backprop all the way to the input layer because that layer doesn't have its
#own weights.
#I visualize weights as belonging to the layer to the right, with the input layer all
#the way to the left.
nextLayer = layer - 1
backpropFromThisLayerToNextLayer(layer, nextLayer)
def backpropFromThisLayerToNextLayer(self, thisLayer, nextLayer):
newWeightsInNextLayer = []
#for neuron in thisLayer.neurons:
for outNeuron in nextLayer.neurons:
incomingErrorSum = 0.
for inNeuron in thisLayer.neuron:
incomingErrorSum += learningRate * inNeuron.incomingError * outNeuron.activationValue
outNeuron.incomingError = incomingErrorSum
#we'll subtract these values from the weights in the next layer once we
#are completely done with the backprop all the way through all the layers.
def applyBackproppedWeightChanges(self):
for layer in range(1, self.layers - 1):
for neuron in layer.neurons:
for weight in neuron.weights:
weight = weight - neuron.incomingError
def printTop5Results(self):
print "Implement your print function, mister!"
n = 1000 #How accurate do we want to take the exponential function (below) - we use the exponential function in the activation function.
e = (1.0 + 1.0/n)**n #note: at this link: https://databoys.github.io/Feedforward/
#It actually does an activation function: sigmoid(x) = 1/(1 + np.exp(-x)) --> the np.exp is the e function to the left.
#and activation derivative = sigmoid(y) - (1 - sigmoid(y))
#straight from: https://databoys.github.io/Feedforward/
#also from: https://databoys.github.io/Feedforward/
#Using one function for both feedforward and backprop was inspired by Siraj Raval videos (thanks Siraj!)
def sigmoid(x, derivative):
sigmoid = 1 / (1 + np.exp(-x))
if (derivative == True):
return sigmoid * (1.0 - sigmoid)
return sigmoid
#Rectified Linear Unit activation function.
#It outputs zero as long as x is zero, and outputs x when x is >= 0.
#What the heck is the derivative of this thing? Maybe I won't use it yet.
def reluActivationFunction(x):
return numpy.maximum(x, 0)
def __init__(self, folderNames):
'''This creates a neural network and puts input into it, runs training, and saves the memory/weight matrix'''
#Constructor for neural network has these parameters: numberOfLayers, numberOfNeuronsPerLayer, totalClasses - in that order.
#Each layer will have progressively less neurons from the input layer to the output layer. The output layer will have only one
#neuron per type of object it needs to recognize (the class).
totalClasses = len(folderNames)
numberOfNeuronsPerLayer = 100
neuralNetwork = self.NeuralNetwork(10, numberOfNeuronsPerLayer, totalClasses)
#TO DO - here is the continuance point...
#This method will create a new network and train it - something to couple training examples with their proper output.
#I've already gotten the images arranged in folders by classes - the class comes from the Google image search term.
#So whatever the folder name is, that should be the expected output. However many folders I have, I should have that many neurons in the
#output layer.
#The training manager then would just grab images at random from the various folders, and in calculating the error, it would expect that neuron
#representing that folder's class to be 1, and the other neurons to be 0.
def trainToConvergence():
network = NeuralNetwork()
network.addLayer(inputLayer)
for x in range(inputLayer.len, 2, 100):
network.addLayer(x)
#Loop until error is under a given threshold or time spent is over a given threshold:
#2. Run imageConvert.py in order to get an image (or batch) from a random folder.
#3. Set the expected output to be the output neuron for that folder/class = 1, and all other output neurons 0.
#4. Move the image into the input layer (imageConvert.py should have it ready to go).
#5. Forward propagate.
#6. Calculate error (easy, considering #3).
#7. Print error.
#8. Backpropagate.
#When finished looping, save the weights.
#Once I have a network trained and the weights saved to file, I can grab the weights and do
#classifying on an arbitrary image to see if it matches any of my trained classes:
def runNeuralNetwork(inputLayer):
#Load the saved weights from file and initialize the network
#set layers to the data gotten from file.
network = NeuralNetwork(layers);
classes = ["dog","cat","monkey","etc"] #need to automate setting these based on input from command line and chromedriver
network.forwardPropagateNetwork(inputLayer)
network.printTop5Results()
#Idea for later: some kind of metadata analysis on the Google images (file names - or resubmit the image to Google image search and
#make sure that the file names returned have the same class name)
def main():
mgr = TrainingManager(["test"])
main()
|
DiginessForever/machineLearning
|
pythonReluNeuralNet.py
|
Python
|
mit
| 10,087
|
[
"NEURON"
] |
281a0909a27b98bab649281b8bddd6b2576c35fefa81c3067bb026f786fc3c83
|
from __future__ import division
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# Fancy plotting
try:
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"crimson",
"greyish"]
colors = sns.xkcd_palette(color_names)
except:
colors = ['b' ,'r', 'y', 'g']
from pybasicbayes.distributions import Regression
from pybasicbayes.util.text import progprint_xrange
from pypolyagamma.distributions import BernoulliRegression
from pylds.models import ZeroInflatedCountLDS, LDS
npr.seed(0)
# Parameters
rho = 0.5 # Sparsity (1-probability of deterministic zero)
D_obs = 10
D_latent = 2
D_input = 0
T = 2000
### True LDS Parameters
mu_init = np.array([0.,1.])
sigma_init = 0.01*np.eye(2)
A = 0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
[np.sin(np.pi/24), np.cos(np.pi/24)]])
B = np.ones((D_latent, D_input))
sigma_states = 0.01*np.eye(2)
C = np.random.randn(D_obs, D_latent)
D = np.zeros((D_obs, D_input))
b = -2.0 * np.ones((D_obs, 1))
### Simulate from a Bernoulli LDS
truemodel = ZeroInflatedCountLDS(
rho=rho,
dynamics_distn=Regression(A=np.hstack((A,B)), sigma=sigma_states),
emission_distn=BernoulliRegression(D_out=D_obs, D_in=D_latent + D_input,
A=np.hstack((C,D)), b=b))
inputs = np.random.randn(T, D_input)
data, stateseq = truemodel.generate(T, inputs=inputs)
dense_data = data.toarray()
true_rate = rho * truemodel.emission_distn.mean(np.hstack((stateseq, inputs)))
### First fit a zero inflated model
zi_model = ZeroInflatedCountLDS(
rho=rho,
dynamics_distn=Regression(nu_0=D_latent + 2,
S_0=D_latent * np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent + D_input)),
K_0=(D_latent + D_input) * np.eye(D_latent + D_input)),
emission_distn=BernoulliRegression(D_out=D_obs, D_in=D_latent + D_input))
zi_model.add_data(data, inputs=inputs)
# Run a Gibbs sampler
N_samples = 500
def gibbs_update(model):
model.resample_model()
return model.log_likelihood(), \
model.states_list[0].gaussian_states, \
model.states_list[0].smooth()
zi_lls, zi_x_smpls, zi_smoothed_obss = \
zip(*[gibbs_update(zi_model) for _ in progprint_xrange(N_samples)])
### Now fit a standard model
std_model = LDS(
dynamics_distn=Regression(nu_0=D_latent + 2,
S_0=D_latent * np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent + D_input)),
K_0=(D_latent + D_input) * np.eye(D_latent + D_input)),
emission_distn=BernoulliRegression(D_out=D_obs, D_in=D_latent + D_input))
std_model.add_data(dense_data, inputs=inputs)
# Run a Gibbs sampler
std_lls, std_x_smpls, std_smoothed_obss = \
zip(*[gibbs_update(std_model) for _ in progprint_xrange(N_samples)])
# Plot the log likelihood over iterations
# plt.figure(figsize=(10,6))
# plt.plot(lls,'-b')
# plt.plot([0,N_samples], truemodel.log_likelihood() * np.ones(2), '-k')
# plt.xlabel('iteration')
# plt.ylabel('log likelihood')
# Plot the smoothed observations
fig = plt.figure(figsize=(10,10))
N_subplots = min(D_obs, 6)
ylims = (-0.1, 1.1)
xlims = (0, min(T,1000))
n_to_plot = np.arange(min(N_subplots, D_obs))
for i,j in enumerate(n_to_plot):
ax = fig.add_subplot(N_subplots,1,i+1)
# Plot spike counts
given_ts = np.where(dense_data[:,j]==1)[0]
ax.plot(given_ts, np.ones_like(given_ts), 'ko', markersize=5)
# Plot the inferred rate
ax.plot([-10], [0], 'ko', lw=2, label="obs.")
ax.plot(zi_smoothed_obss[-1][:, j], '-', color=colors[0], label="zero inflated")
ax.plot(std_smoothed_obss[-1][:, j], '-', color=colors[1], label="standard")
ax.plot(true_rate[:, j], '--k', lw=2, label="true rate")
if i == 0:
plt.legend(loc="upper center", ncol=4, bbox_to_anchor=(0.5, 1.5))
if i == N_subplots - 1:
plt.xlabel('time index')
else:
ax.set_xticklabels([])
ax.set_xlim(xlims)
ax.set_ylim(0, 1.1)
ax.set_ylabel("$x_%d(t)$" % (j+1))
plt.savefig("aux/zeroinflation.png")
plt.show()
|
mattjj/pylds
|
examples/zeroinflated_bernoulli_lds.py
|
Python
|
mit
| 4,376
|
[
"Amber"
] |
8e6f58848192ce0bf837123322d140db98884a176cdd9964c18bf2e1726077fa
|
from django.shortcuts import render_to_response
def docs_view(request):
return render_to_response("documentation/index.html", {})
def structures_docs(request):
return render_to_response("documentation/structures.html", {})
def vasp_docs(request):
return render_to_response("documentation/vasp.html", {})
def pots_docs(request):
return render_to_response("documentation/pots.html", {})
def overview_docs(request):
return render_to_response("documentation/overview.html", {})
def pubs_docs(request):
return render_to_response("documentation/pubs.html", {})
|
wolverton-research-group/qmpy
|
qmpy/web/views/documentation.py
|
Python
|
mit
| 591
|
[
"VASP"
] |
3a19ec945d10cc57023f76f32f2352db16c3de2ea9083e704b24c6c63404409e
|
import numpy as np
import re
import numba
import time
import tempfile
import sys
# Random number generator compatible with C++ and FORTRAN versions
_IA=16807
_IM=2147483647
_IQ=127773
_IR=2836
_NTAB=32
_NDIV=(1+(_IM-1)//_NTAB)
_EPS=3.0e-16
_AM=1.0/_IM
_RNMX=(1.0-_EPS)
@numba.jit(cache=True,fastmath=True)
def _U01(idum,iy,iv):
if idum <= 0 or iy == 0:
if (-idum < 1):
idum=1
else:
idum=-idum
for j in range(_NTAB+7,-1,-1):
k=idum//_IQ
idum=_IA*(idum-k*_IQ)-_IR*k
if idum<0:
idum += _IM
if j < _NTAB:
iv[j] = idum
iy=iv[0]
k=idum//_IQ
idum=_IA*(idum-k*_IQ)-_IR*k
if idum<0:
idum += _IM
j=iy//_NDIV
iy=iv[j]
iv[j]=idum
temp=_AM*iy
if temp > _RNMX:
temp = _RNMX
return idum,iy,temp
@numba.jit(cache=True,fastmath=True)
def _U01_loop(idum,iy,iv,numbers):
for i in range(len(numbers)):
idum,iy,numbers[i] = _U01(idum,iy,iv)
return idum,iy
@numba.jit(cache=True,fastmath=True)
def _Gaussian(idum,iy,iv,switchGaussian,saveGaussian):
if(switchGaussian):
return idum,iy,False,0.0,saveGaussian
else:
while True:
idum,iy,r1=_U01(idum,iy,iv)
idum,iy,r2=_U01(idum,iy,iv)
v1=2.0*r1-1.0
v2=2.0*r2-1.0
rsq=v1*v1+v2*v2
if(rsq<1.0 and rsq>0.0):
break
fac=np.sqrt(-2.0*np.log(rsq)/rsq)
return idum,iy,True,v1*fac,v2*fac
@numba.jit(cache=True,fastmath=True)
def _Gaussian_loop(idum,iy,iv,switchGaussian,saveGaussian,numbers):
for i in range(len(numbers)):
idum,iy,switchGaussian,saveGaussian,numbers[i] = _Gaussian(
idum,iy,iv,switchGaussian,saveGaussian
)
return idum,iy,switchGaussian,saveGaussian
class Random():
def __init__(self,seed=0):
self.switchGaussian=False
self.saveGaussian=0.0
self.iy=0
self.iv=np.zeros(_NTAB, dtype=int)
self.idum=0
self.idum=seed
def U01(self,shape=None):
if shape is None:
self.idum,self.iy,temp = _U01(self.idum,self.iy,self.iv)
return temp
else:
numbers=np.zeros(np.prod(shape))
self.idum,self.iy = _U01_loop(self.idum,self.iy,self.iv,numbers)
return numbers.reshape(shape)
def Gaussian(self,shape=None):
if shape is None:
self.idum,self.iy,self.switchGaussian,self.saveGaussian,temp=_Gaussian(
self.idum,self.iy,self.iv,self.switchGaussian,self.saveGaussian
)
return temp
else:
numbers=np.zeros(np.prod(shape))
self.idum,self.iy,self.switchGaussian,self.saveGaussian=_Gaussian_loop(
self.idum,self.iy,self.iv,self.switchGaussian,self.saveGaussian,numbers
)
return numbers.reshape(shape)
@numba.jit(cache=True,fastmath=True)
def _compute_forces(cell, positions, forcecutoff, neighbors, point, forces):
engconf=0.0
forces.fill(0.0)
forcecutoff2=forcecutoff*forcecutoff
engcorrection=4.0*(1.0/forcecutoff2**6-1.0/forcecutoff2**3)
for i in range(len(positions)):
for j in range(point[i],point[i+1]):
ja=neighbors[j]
distancex=positions[i,0]-positions[ja,0]
distancey=positions[i,1]-positions[ja,1]
distancez=positions[i,2]-positions[ja,2]
distancex-=np.floor(distancex/cell[0]+0.5)*cell[0]
distancey-=np.floor(distancey/cell[1]+0.5)*cell[1]
distancez-=np.floor(distancez/cell[2]+0.5)*cell[2]
distance2=distancex**2+distancey**2+distancez**2
if distance2 <= forcecutoff2:
invdistance2=1.0/distance2
invdistance6=invdistance2*invdistance2*invdistance2
e=4.0*invdistance6*invdistance6-4.0*invdistance6-engcorrection
engconf+=e
fmod=2.0*4.0*(6.0*invdistance6*invdistance6-3.0*invdistance6)*invdistance2
fx=fmod*distancex
fy=fmod*distancey
fz=fmod*distancez
forces[i,0]+=fx
forces[i,1]+=fy
forces[i,2]+=fz
forces[ja,0]-=fx
forces[ja,1]-=fy
forces[ja,2]-=fz
return engconf
@numba.jit(cache=True,fastmath=True)
def _compute_list(cell,positions,listcutoff,nlist,point):
listcutoff2=listcutoff**2
point[0]=0
for i in range(len(positions)):
point[i+1]=point[i]
for j in range(i+1,len(positions)):
distancex=positions[i,0]-positions[j,0]
distancey=positions[i,1]-positions[j,1]
distancez=positions[i,2]-positions[j,2]
distancex-=np.floor(distancex/cell[0]+0.5)*cell[0]
distancey-=np.floor(distancey/cell[1]+0.5)*cell[1]
distancez-=np.floor(distancez/cell[2]+0.5)*cell[2]
distance2=distancex**2+distancey**2+distancez**2
if distance2 <= listcutoff2:
if point[i+1]>=len(nlist):
raise Exception("Verlet list size exceeded\nIncrease maxneighbours")
nlist[point[i+1]]=j
point[i+1]+=1
class SimpleMD:
def __init__(self):
self.iv=np.zeros(32,dtype=int)
self.iy=0
self.iset=0
self.gset=0.0
self.write_positions_first=True
self.write_statistics_first=True
self.write_statistics_last_time_reopened=0
self.write_statistics_fp=None
self.temperature=1.0
self.maxneighbors=1000
self.tstep=0.005
self.friction=0.0
self.forcecutoff=2.5
self.listcutoff=3.0
self.nstep=1
self.nconfig=10
self.nstat=1
self.idum=0
self.wrapatoms=False
self.statfile=""
self.trajfile=""
self.outputfile=""
self.inputfile=""
self.statfile_f=None
def read_input(self,file):
with open(file,"r") as f:
for line in f:
line=re.sub("#.*$","",line)
line=re.sub(" *$","",line)
words=line.split()
if len(words)==0:
continue
key=words[0]
if key=="temperature":
self.temperature=float(words[1])
elif key=="tstep":
self.tstep=float(words[1])
elif key=="friction":
self.friction=float(words[1])
elif key=="forcecutoff":
self.forcecutoff=float(words[1])
elif key=="listcutoff":
self.listcutoff=float(words[1])
elif key=="nstep":
self.nstep=int(words[1])
elif key=="nconfig":
self.nconfig=int(words[1])
self.trajfile=words[2]
elif key=="nstat":
self.nstat=int(words[1])
self.statfile=words[2]
elif key=="wrapatoms":
if re.match("[Tt].*",words[1]):
self.wrapatoms=True
elif key=="maxneighbours":
self.maxneighbors=int(words[1])
elif key=="inputfile":
self.inputfile=words[1]
elif key=="outputfile":
self.outputfile=words[1]
elif key=="idum":
self.idum=int(words[1])
else:
raise Exception("Unknown keyword: "+key)
if len(self.inputfile)==0:
raise Exception("Specify input file")
if len(self.outputfile)==0:
raise Exception("Specify output file")
if len(self.trajfile)==0:
raise Exception("Specify traj file")
if len(self.statfile)==0:
raise Exception("Specify stat file")
def read_positions(self,file):
with open(file,"r") as f:
natoms=int(f.readline())
cell=[float(x) for x in f.readline().split()]
positions=np.loadtxt(f,usecols=(1,2,3))
assert(len(positions)==natoms)
return np.array(cell),np.array(positions)
def randomize_velocities(self,temperature,masses,random):
return np.sqrt(temperature/masses)[:,np.newaxis]*random.Gaussian(shape=(len(masses),3))
# note: this can act on a vector of vectors
def pbc(self,cell,vector):
return vector-np.floor(vector/cell+0.5)*cell
def check_list(self,positions,positions0,listcutoff,forcecutoff):
delta2=(0.5*(listcutoff-forcecutoff))*(0.5*(listcutoff-forcecutoff))
disp2=np.sum((positions-positions0)**2,axis=1)
return np.any(disp2>delta2)
def compute_engkin(self,masses,velocities):
return 0.5*np.sum(masses*np.sum(velocities**2,axis=1))
def thermostat(self,masses,dt,friction,temperature,velocities,engint,random):
c1=np.exp(-friction*dt)
c2=np.sqrt((1.0-c1*c1)*temperature)/np.sqrt(masses)
engint+=0.5*np.sum(masses*np.sum(velocities**2,axis=1))
velocities=c1*velocities+c2[:,np.newaxis]*random.Gaussian(shape=velocities.shape)
engint-=0.5*np.sum(masses*np.sum(velocities**2,axis=1))
return velocities,engint
def write_positions(self,cell,positions,wrapatoms=False):
mode="w"
if self.write_positions_first:
self.write_positions_first = False
mode="w"
else:
mode="a"
with open(self.trajfile,mode) as f:
print("%d" % len(positions), file=f)
print("%f %f %f" % (cell[0], cell[1], cell[2]), file=f)
if wrapatoms:
positions = self.pbc(cell,positions)
np.savetxt(f,positions,fmt="Ar %10.7f %10.7f %10.7f")
def write_final_positions(self,cell,positions,wrapatoms=False):
with open(self.outputfile,"w") as f:
print("%d" % len(positions), file=f)
print("%f %f %f" % (cell[0], cell[1], cell[2]), file=f)
if wrapatoms:
positions = self.pbc(cell,positions)
np.savetxt(f,positions,fmt="Ar %10.7f %10.7f %10.7f")
def write_statistics(self,istep,tstep,natoms,engkin,engconf,engint):
if self.write_statistics_fp is None:
self.write_statistics_fp = open(self.statfile, "w")
if istep-self.write_statistics_last_time_reopened>100:
self.write_statistics_fp.close()
self.write_statistics_fp = open(self.statfile, "a")
self.write_statistics_last_time_reopened=istep
print("%d %f %f %f %f %f" %
(istep,istep*tstep,2.0*engkin/(3.0*natoms),engconf,engkin+engconf,engkin+engconf+engint),
file=self.write_statistics_fp)
def run(self,parameters):
self.read_input(parameters)
cell,positions=self.read_positions(self.inputfile)
random=Random(self.idum)
# masses are hardcoded to 1
masses=np.ones(len(positions))
# energy integral initialized to 0
engint=0.0
# velocities are randomized according to temperature
velocities=self.randomize_velocities(self.temperature,masses,random)
# allocate space for neighbor lists
nlist=np.zeros(self.maxneighbors*len(positions), dtype=int)
point=np.zeros(len(positions)+1, dtype=int)
# neighbour list are computed
_compute_list(cell, positions, self.listcutoff, nlist, point)
print("Neighbour list recomputed at step ",0)
print("List size: ",len(nlist))
# reference positions are saved
positions0=+positions
forces=np.zeros(shape=positions.shape)
# forces are computed before starting md
engconf= _compute_forces(cell, positions, self.forcecutoff, nlist, point, forces)
# here is the main md loop
# Langevin thermostat is applied before and after a velocity-Verlet integrator
# the overall structure is:
# thermostat
# update velocities
# update positions
# (eventually recompute neighbour list)
# compute forces
# update velocities
# thermostat
# (eventually dump output informations)
now=time.time()
for istep in range(self.nstep):
if self.friction>0:
velocities,engint = self.thermostat(
masses,0.5*self.tstep,self.friction,self.temperature,velocities,engint,random)
velocities+=forces*0.5*self.tstep/masses[:,np.newaxis]
positions+=velocities*self.tstep
check_list=self.check_list(positions,positions0,self.listcutoff,self.forcecutoff)
if check_list:
_compute_list(cell, positions, self.listcutoff, nlist, point)
positions0=+positions
print("Neighbour list recomputed at step ",istep)
print("List size: ",len(nlist))
engconf = _compute_forces(cell, positions, self.forcecutoff, nlist, point, forces)
velocities+=forces*0.5*self.tstep/masses[:,np.newaxis]
if self.friction>0.0:
velocities,engint = self.thermostat(
masses,0.5*self.tstep,self.friction,self.temperature,velocities,engint,random)
if (istep+1)%self.nconfig==0:
self.write_positions(cell,positions,self.wrapatoms)
if (istep+1)%self.nstat==0:
engkin = self.compute_engkin(masses,velocities)
self.write_statistics(istep+1,self.tstep,len(positions),engkin,engconf,engint)
self.write_final_positions(cell,positions,self.wrapatoms)
if self.write_statistics_fp is not None:
self.write_statistics_fp.close()
print(time.time()-now)
if __name__ == "__main__":
# read from stdin and store on a temporary file
input = sys.stdin.read()
with tempfile.NamedTemporaryFile("w+t") as tmp:
tmp.write(input)
tmp.flush()
simplemd=SimpleMD()
simplemd.run(tmp.name)
|
GiovanniBussi/simplemd
|
python/simplemd.py
|
Python
|
gpl-3.0
| 14,204
|
[
"Gaussian"
] |
16f54fa4c051d7fdc3f27203f1faea222886e64d11df16e0c12910e6a6eba4c3
|
"""Module symbol-table generator"""
from compiler import ast
from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
from compiler.misc import mangle
import types
import sys
MANGLE_LEN = 256
class Scope:
# XXX how much information do I need about each name?
def __init__(self, name, module, klass=None):
self.name = name
self.module = module
self.defs = {}
self.uses = {}
self.globals = {}
self.params = {}
self.frees = {}
self.cells = {}
self.children = []
# nested is true if the class could contain free variables,
# i.e. if it is nested within another function.
self.nested = None
self.generator = None
self.klass = None
if klass is not None:
for i in range(len(klass)):
if klass[i] != '_':
self.klass = klass[i:]
break
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def mangle(self, name):
if self.klass is None:
return name
return mangle(name, self.klass)
def add_def(self, name):
self.defs[self.mangle(name)] = 1
def add_use(self, name):
self.uses[self.mangle(name)] = 1
def add_global(self, name):
name = self.mangle(name)
if self.uses.has_key(name) or self.defs.has_key(name):
pass # XXX warn about global following def/use
if self.params.has_key(name):
raise SyntaxError, "%s in %s is global and parameter" % \
(name, self.name)
self.globals[name] = 1
self.module.add_def(name)
def add_param(self, name):
name = self.mangle(name)
self.defs[name] = 1
self.params[name] = 1
def get_names(self):
d = {}
d.update(self.defs)
d.update(self.uses)
d.update(self.globals)
return d.keys()
def add_child(self, child):
self.children.append(child)
def get_children(self):
return self.children
def DEBUG(self):
print >> sys.stderr, self.name, self.nested and "nested" or ""
print >> sys.stderr, "\tglobals: ", self.globals
print >> sys.stderr, "\tcells: ", self.cells
print >> sys.stderr, "\tdefs: ", self.defs
print >> sys.stderr, "\tuses: ", self.uses
print >> sys.stderr, "\tfrees:", self.frees
def check_name(self, name):
"""Return scope of name.
The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
"""
if self.globals.has_key(name):
return SC_GLOBAL
if self.cells.has_key(name):
return SC_CELL
if self.defs.has_key(name):
return SC_LOCAL
if self.nested and (self.frees.has_key(name) or
self.uses.has_key(name)):
return SC_FREE
if self.nested:
return SC_UNKNOWN
else:
return SC_GLOBAL
def get_free_vars(self):
if not self.nested:
return ()
free = {}
free.update(self.frees)
for name in self.uses.keys():
if not (self.defs.has_key(name) or
self.globals.has_key(name)):
free[name] = 1
return free.keys()
def handle_children(self):
for child in self.children:
frees = child.get_free_vars()
globals = self.add_frees(frees)
for name in globals:
child.force_global(name)
def force_global(self, name):
"""Force name to be global in scope.
Some child of the current node had a free reference to name.
When the child was processed, it was labelled a free
variable. Now that all its enclosing scope have been
processed, the name is known to be a global or builtin. So
walk back down the child chain and set the name to be global
rather than free.
Be careful to stop if a child does not think the name is
free.
"""
self.globals[name] = 1
if self.frees.has_key(name):
del self.frees[name]
for child in self.children:
if child.check_name(name) == SC_FREE:
child.force_global(name)
def add_frees(self, names):
"""Process list of free vars from nested scope.
Returns a list of names that are either 1) declared global in the
parent or 2) undefined in a top-level parent. In either case,
the nested scope should treat them as globals.
"""
child_globals = []
for name in names:
sc = self.check_name(name)
if self.nested:
if sc == SC_UNKNOWN or sc == SC_FREE \
or isinstance(self, ClassScope):
self.frees[name] = 1
elif sc == SC_GLOBAL:
child_globals.append(name)
elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
else:
if sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
return child_globals
def get_cell_vars(self):
return self.cells.keys()
class ModuleScope(Scope):
__super_init = Scope.__init__
def __init__(self):
self.__super_init("global", self)
class FunctionScope(Scope):
pass
class GenExprScope(Scope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("generator expression<%d>"%i, module, klass)
self.add_param('.0')
def get_names(self):
keys = Scope.get_names(self)
return keys
class LambdaScope(FunctionScope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("lambda.%d" % i, module, klass)
class ClassScope(Scope):
__super_init = Scope.__init__
def __init__(self, name, module):
self.__super_init(name, module, name)
class SymbolVisitor:
def __init__(self):
self.scopes = {}
self.klass = None
# node that define new scopes
def visitModule(self, node):
scope = self.module = self.scopes[node] = ModuleScope()
self.visit(node.node, scope)
visitExpression = visitModule
def visitFunction(self, node, parent):
if node.decorators:
self.visit(node.decorators, parent)
parent.add_def(node.name)
for n in node.defaults:
self.visit(n, parent)
scope = FunctionScope(node.name, self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExpr(self, node, parent):
scope = GenExprScope(self.module, self.klass);
if parent.nested or isinstance(parent, FunctionScope) \
or isinstance(parent, GenExprScope):
scope.nested = 1
self.scopes[node] = scope
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExprInner(self, node, scope):
for genfor in node.quals:
self.visit(genfor, scope)
self.visit(node.expr, scope)
def visitGenExprFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.iter, scope)
for if_ in node.ifs:
self.visit(if_, scope)
def visitGenExprIf(self, node, scope):
self.visit(node.test, scope)
def visitLambda(self, node, parent, assign=0):
# Lambda is an expression, so it could appear in an expression
# context where assign is passed. The transformer should catch
# any code that has a lambda on the left-hand side.
assert not assign
for n in node.defaults:
self.visit(n, parent)
scope = LambdaScope(self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def _do_args(self, scope, args):
for name in args:
if type(name) == types.TupleType:
self._do_args(scope, name)
else:
scope.add_param(name)
def handle_free_vars(self, scope, parent):
parent.add_child(scope)
scope.handle_children()
def visitClass(self, node, parent):
parent.add_def(node.name)
for n in node.bases:
self.visit(n, parent)
scope = ClassScope(node.name, self.module)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
if node.doc is not None:
scope.add_def('__doc__')
scope.add_def('__module__')
self.scopes[node] = scope
prev = self.klass
self.klass = node.name
self.visit(node.code, scope)
self.klass = prev
self.handle_free_vars(scope, parent)
# name can be a def or a use
# XXX a few calls and nodes expect a third "assign" arg that is
# true if the name is being used as an assignment. only
# expressions contained within statements may have the assign arg.
def visitName(self, node, scope, assign=0):
if assign:
scope.add_def(node.name)
else:
scope.add_use(node.name)
# operations that bind new names
def visitFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.list, scope)
self.visit(node.body, scope)
if node.else_:
self.visit(node.else_, scope)
def visitFrom(self, node, scope):
for name, asname in node.names:
if name == "*":
continue
scope.add_def(asname or name)
def visitImport(self, node, scope):
for name, asname in node.names:
i = name.find(".")
if i > -1:
name = name[:i]
scope.add_def(asname or name)
def visitGlobal(self, node, scope):
for name in node.names:
scope.add_global(name)
def visitAssign(self, node, scope):
"""Propagate assignment flag down to child nodes.
The Assign node doesn't itself contains the variables being
assigned to. Instead, the children in node.nodes are visited
with the assign flag set to true. When the names occur in
those nodes, they are marked as defs.
Some names that occur in an assignment target are not bound by
the assignment, e.g. a name occurring inside a slice. The
visitor handles these nodes specially; they do not propagate
the assign flag to their children.
"""
for n in node.nodes:
self.visit(n, scope, 1)
self.visit(node.expr, scope)
def visitAssName(self, node, scope, assign=1):
scope.add_def(node.name)
def visitAssAttr(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
def visitSubscript(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
for n in node.subs:
self.visit(n, scope, 0)
def visitSlice(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
if node.lower:
self.visit(node.lower, scope, 0)
if node.upper:
self.visit(node.upper, scope, 0)
def visitAugAssign(self, node, scope):
# If the LHS is a name, then this counts as assignment.
# Otherwise, it's just use.
self.visit(node.node, scope)
if isinstance(node.node, ast.Name):
self.visit(node.node, scope, 1) # XXX worry about this
self.visit(node.expr, scope)
# prune if statements if tests are false
_const_types = types.StringType, types.IntType, types.FloatType
def visitIf(self, node, scope):
for test, body in node.tests:
if isinstance(test, ast.Const):
if type(test.value) in self._const_types:
if not test.value:
continue
self.visit(test, scope)
self.visit(body, scope)
if node.else_:
self.visit(node.else_, scope)
# a yield statement signals a generator
def visitYield(self, node, scope):
scope.generator = 1
self.visit(node.value, scope)
def list_eq(l1, l2):
return sorted(l1) == sorted(l2)
if __name__ == "__main__":
import sys
from compiler import parseFile, walk
import symtable
def get_names(syms):
return [s for s in [s.get_name() for s in syms.get_symbols()]
if not (s.startswith('_[') or s.startswith('.'))]
for file in sys.argv[1:]:
print file
f = open(file)
buf = f.read()
f.close()
syms = symtable.symtable(buf, file, "exec")
mod_names = get_names(syms)
tree = parseFile(file)
s = SymbolVisitor()
walk(tree, s)
# compare module-level symbols
names2 = s.scopes[tree].get_names()
if not list_eq(mod_names, names2):
print
print "oops", file
print sorted(mod_names)
print sorted(names2)
sys.exit(-1)
d = {}
d.update(s.scopes)
del d[tree]
scopes = d.values()
del d
for s in syms.get_symbols():
if s.is_namespace():
l = [sc for sc in scopes
if sc.name == s.get_name()]
if len(l) > 1:
print "skipping", s.get_name()
else:
if not list_eq(get_names(s.get_namespace()),
l[0].get_names()):
print s.get_name()
print sorted(get_names(s.get_namespace()))
print sorted(l[0].get_names())
sys.exit(-1)
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/compiler/symbols.py
|
Python
|
epl-1.0
| 14,538
|
[
"VisIt"
] |
9b83ef2b9ba1e609224e128f0227376c1186a7b283c3e65b339d52539e25bea6
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver.p4util import temp_circular_import_blocker
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
@classmethod
def molecule_from_string(cls,
molstr,
dtype=None,
name=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
return_dict=False,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none',
verbose=1):
molrec = qcel.molparse.from_string(
molstr=molstr,
dtype=dtype,
name=name,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
return_processed=False,
enable_qm=enable_qm,
enable_efp=enable_efp,
missing_enabled_return_qm=missing_enabled_return_qm,
missing_enabled_return_efp=missing_enabled_return_efp,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec['qm']), molrec
else:
return core.Molecule.from_dict(molrec['qm'])
@classmethod
def molecule_from_arrays(cls,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
missing_enabled_return='error',
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1,
return_dict=False):
"""Construct Molecule from unvalidated arrays and variables.
Light wrapper around :py:func:`~qcelemental.molparse.from_arrays`
that is a full-featured constructor to dictionary representa-
tion of Molecule. This follows one step further to return
Molecule instance.
Parameters
----------
See :py:func:`~qcelemental.molparse.from_arrays`.
Returns
-------
:py:class:`psi4.core.Molecule`
"""
molrec = qcel.molparse.from_arrays(
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
comment=comment,
provenance=provenance,
connectivity=connectivity,
domain='qm',
missing_enabled_return=missing_enabled_return,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
@classmethod
def molecule_from_schema(cls, molschema, return_dict=False, nonphysical=False, verbose=1):
"""Construct Molecule from non-Psi4 schema.
Light wrapper around :py:func:`~psi4.core.Molecule.from_arrays`.
Parameters
----------
molschema : dict
Dictionary form of Molecule following known schema.
return_dict : bool, optional
Additionally return Molecule dictionary intermediate.
nonphysical : bool, optional
Do allow masses outside an element's natural range to pass validation?
verbose : int, optional
Amount of printing.
Returns
-------
mol : :py:class:`psi4.core.Molecule`
molrec : dict, optional
Dictionary representation of instance.
Only provided if `return_dict` is True.
"""
molrec = qcel.molparse.from_schema(molschema, nonphysical=nonphysical, verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.to_arrays = qcdb.Molecule.to_arrays
cls.to_dict = qcdb.Molecule.to_dict
cls.BFS = qcdb.Molecule.BFS
cls.B787 = qcdb.Molecule.B787
cls.scramble = qcdb.Molecule.scramble
cls.from_arrays = molecule_from_arrays
cls.from_string = molecule_from_string
cls.to_string = qcdb.Molecule.to_string
cls.from_schema = molecule_from_schema
cls.to_schema = qcdb.Molecule.to_schema
cls.run_dftd3 = qcdb.Molecule.run_dftd3
cls.format_molecule_for_mol = qcdb.Molecule.format_molecule_for_mol
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
molrec = qcel.molparse.from_string(
geom, enable_qm=True, missing_enabled_return_qm='minimal', enable_efp=True, missing_enabled_return_efp='none')
molecule = core.Molecule.from_dict(molrec['qm'])
molecule.set_name(name)
if 'efp' in molrec:
try:
import pylibefp
except ImportError as e: # py36 ModuleNotFoundError
raise ImportError("""Install pylibefp to use EFP functionality. `conda install pylibefp -c psi4` Or build with `-DENABLE_libefp=ON`""") from e
#print('Using pylibefp: {} (version {})'.format(pylibefp.__file__, pylibefp.__version__))
efpobj = pylibefp.from_dict(molrec['efp'])
# pylibefp.core.efp rides along on molecule
molecule.EFP = efpobj
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
|
dgasmith/psi4
|
psi4/driver/molutil.py
|
Python
|
lgpl-3.0
| 9,365
|
[
"Psi4"
] |
85ec3b5ddb5567b7bd63316475c9a3e6b2d4b16140bb1d4dbb650f22b794912d
|
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
kPluginNodeName = "MitsubaRoughDielectricShader"
kPluginNodeClassify = "/shader/surface"
kPluginNodeId = OpenMaya.MTypeId(0x87009)
class roughdielectric(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mIntIOR = OpenMaya.MObject()
mExtIOR = OpenMaya.MObject()
mInteriorMaterial = OpenMaya.MObject()
mExteriorMaterial = OpenMaya.MObject()
mAlpha = OpenMaya.MObject()
mAlpaUV = OpenMaya.MObject()
mDistribution = OpenMaya.MObject()
mReflectance = OpenMaya.MObject()
mTransmittance = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
mOutTransparency = OpenMaya.MObject()
def compute(self, plug, block):
if plug == roughdielectric.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( roughdielectric.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
elif plug == roughdielectric.mOutTransparency:
outTransHandle = block.outputValue( roughdielectric.mOutTransparency )
outTransHandle.setMFloatVector(OpenMaya.MFloatVector(0.75,0.75,0.75))
outTransHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return roughdielectric()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
try:
roughdielectric.mDistribution = eAttr.create("distribution", "dist")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Beckmann", 0)
eAttr.addField("GGX", 1)
eAttr.addField("Phong", 2)
eAttr.addField("Ashikhmin Shirley", 3)
roughdielectric.mAlpha = nAttr.create("alpha","a", OpenMaya.MFnNumericData.kFloat, 0.1)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughdielectric.mAlpaUV = nAttr.create("alphaUV","uv", OpenMaya.MFnNumericData.k2Float)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.1,0.1)
roughdielectric.mInteriorMaterial = eAttr.create("interiorMaterial", "intmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
roughdielectric.mIntIOR = nAttr.create("interiorIOR","intior", OpenMaya.MFnNumericData.kFloat, 1.5046)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughdielectric.mExteriorMaterial = eAttr.create("exteriorMaterial", "extmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
roughdielectric.mExtIOR = nAttr.create("exteriorIOR","extior", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
roughdielectric.mReflectance = nAttr.createColor("specularReflectance", "sr")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0,1.0,1.0)
roughdielectric.mTransmittance = nAttr.createColor("specularTransmittance","st")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0,1.0,1.0)
roughdielectric.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
roughdielectric.mOutTransparency = nAttr.createColor("outTransparency", "op")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
roughdielectric.addAttribute(roughdielectric.mDistribution)
roughdielectric.addAttribute(roughdielectric.mAlpha)
roughdielectric.addAttribute(roughdielectric.mAlpaUV)
roughdielectric.addAttribute(roughdielectric.mReflectance)
roughdielectric.addAttribute(roughdielectric.mTransmittance)
roughdielectric.addAttribute(roughdielectric.mInteriorMaterial)
roughdielectric.addAttribute(roughdielectric.mIntIOR)
roughdielectric.addAttribute(roughdielectric.mExteriorMaterial)
roughdielectric.addAttribute(roughdielectric.mExtIOR)
roughdielectric.addAttribute(roughdielectric.mOutColor)
roughdielectric.addAttribute(roughdielectric.mOutTransparency)
except:
sys.stderr.write("Failed to add attributes\n")
raise
try:
roughdielectric.attributeAffects (roughdielectric.mTransmittance, roughdielectric.mOutTransparency)
except:
sys.stderr.write("Failed in setting attributeAffects\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
hpd/MitsubaForMaya
|
plug-ins/mitsuba/materials/roughdielectric.py
|
Python
|
mit
| 8,299
|
[
"Amber"
] |
6376c5c951aa1eb7289a5910f0195dab51c1fdeb108d23386876dd470a3da86a
|
"""
"""
import os
from lwr.lwr_client import action_mapper
from lwr.lwr_client import staging
from lwr.lwr_client.staging import LwrOutputs
from lwr.lwr_client.staging.down import ResultsCollector
import logging
log = logging.getLogger(__name__)
def postprocess(job_directory):
# Returns True iff outputs were collected.
try:
staging_config = job_directory.load_metadata("staging_config", None)
if staging_config:
return __collect_outputs(job_directory, staging_config)
finally:
job_directory.write_file("postprocessed", "")
return False
def __collect_outputs(job_directory, staging_config):
collected = True
if "action_mapper" in staging_config:
file_action_mapper = action_mapper.FileActionMapper(config=staging_config["action_mapper"])
client_outputs = staging.ClientOutputs.from_dict(staging_config["client_outputs"])
lwr_outputs = __lwr_outputs(job_directory)
output_collector = LwrServerOutputCollector(job_directory)
results_collector = ResultsCollector(output_collector, file_action_mapper, client_outputs, lwr_outputs)
collection_failure_exceptions = results_collector.collect()
if collection_failure_exceptions:
log.warn("Failures collecting results %s" % collection_failure_exceptions)
collected = False
return collected
class LwrServerOutputCollector(object):
def __init__(self, job_directory):
self.job_directory = job_directory
def collect_output(self, results_collector, output_type, action, name):
# Not using input path, this is because action knows it path
# in this context.
if action.staging_action_local:
return # Galaxy (client) will collect output.
if not name:
# TODO: Would not work on Windows. Any use in allowing
# remote_transfer action for Windows?
name = os.path.basename(action.path)
lwr_path = self.job_directory.calculate_path(name, output_type)
action.write_from_path(lwr_path)
def __lwr_outputs(job_directory):
working_directory_contents = job_directory.working_directory_contents()
output_directory_contents = job_directory.outputs_directory_contents()
return LwrOutputs(
working_directory_contents,
output_directory_contents,
)
__all__ = [postprocess]
|
jmchilton/lwr
|
lwr/managers/staging/postprocess.py
|
Python
|
apache-2.0
| 2,393
|
[
"Galaxy"
] |
6eeda44628be6221d280968d526919479d5be1e621d6d18f766c8619f07f901f
|
"""
@name: PyHouse/src/Modules/Computer/_test/test_Nodes.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2018 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 29, 2015
@Summary:
"""
__updated__ = '2018-02-12'
from twisted.trial import unittest, reporter, runner
from Modules.Computer.Nodes import test as I_test
class Z_Suite(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Nodes(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Nodes ***\n{}\n'.format(l_ret))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/_test/test_Nodes.py
|
Python
|
mit
| 759
|
[
"Brian"
] |
0eabae4fc643d6ac480974814d0aeed75b1a75599172db4d53800dab64bbf3bd
|
from __future__ import division
from builtins import object
import numpy as np
from sporco import signal
def fn(prm):
x = prm[0]
return (x - 0.1)**2
def fnv(prm):
x = prm[0]
return ((x - 0.1)**2, (x - 0.5)**2)
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
def test_01(self):
img = np.random.randn(64, 64)
imgn = signal.spnoise(img, 0.5)
assert imgn.shape == img.shape
def test_02(self):
msk = signal.rndmask((16, 17), 0.25)
assert msk.shape == (16, 17)
def test_03(self):
msk = signal.rndmask((16, 17), 0.25, dtype=np.float32)
assert msk.dtype == np.float32
def test_04(self):
rgb = np.random.randn(64, 64, 3)
gry = signal.rgb2gray(rgb)
assert gry.shape == rgb.shape[0:2]
def test_05(self):
img = np.random.randn(64, 64)
iml, imh = signal.tikhonov_filter(img, 5.0)
assert np.isrealobj(iml) and np.isrealobj(imh)
def test_06(self):
img = np.random.randn(64, 64) + 1j * np.random.randn(64, 64)
iml, imh = signal.tikhonov_filter(img, 5.0)
assert np.iscomplexobj(iml) and np.iscomplexobj(imh)
def test_07(self):
img = np.random.randn(64, 64)
iml, imh = signal.tikhonov_filter(img, 5.0)
assert np.isrealobj(iml) and np.isrealobj(imh)
def test_08(self):
img = np.random.randn(16, 16, 16)
iml, imh = signal.tikhonov_filter(img, 2.0, npd=8)
assert iml.shape == img.shape and imh.shape == img.shape
def test_10(self):
shape = (7, 5, 6)
g = signal.gaussian(shape)
assert g.shape == shape
def test_11(self):
s = np.random.rand(16, 17, 3)
scn, smn, snrm = signal.local_contrast_normalise(s)
assert np.linalg.norm(snrm * scn + smn - s) < 1e-7
def test_12(self):
x = np.random.randn(9, 10)
y = np.random.randn(9, 10)
u = signal.grad(x, 0)
v = signal.gradT(y, 0)
err = np.dot(x.ravel(), v.ravel()) - np.dot(y.ravel(), u.ravel())
assert np.abs(err) < 5e-14
u = signal.grad(x, 1)
v = signal.gradT(y, 1)
err = np.dot(x.ravel(), v.ravel()) - np.dot(y.ravel(), u.ravel())
assert np.abs(err) < 5e-14
def test_11(self):
x = np.random.randn(9, 10)
y = np.random.randn(10, 10)
u = signal.grad(x, 0, zero_pad=True)
v = signal.gradT(y, 0, zero_pad=True)
err = np.dot(x.ravel(), v.ravel()) - np.dot(y.ravel(), u.ravel())
assert np.abs(err) < 1e-14
y = np.random.randn(9, 11)
u = signal.grad(x, 1, zero_pad=True)
v = signal.gradT(y, 1, zero_pad=True)
err = np.dot(x.ravel(), v.ravel()) - np.dot(y.ravel(), u.ravel())
assert np.abs(err) < 1e-14
|
bwohlberg/sporco
|
tests/test_signal.py
|
Python
|
bsd-3-clause
| 2,850
|
[
"Gaussian"
] |
e433583cc0170715c1cb2ba408009e1bba89d9ae979b6b6a45dac600f3ae55f4
|
input_name = '../examples/acoustics/acoustics.py'
output_name = 'test_acoustics.vtk'
from testsBasic import TestInput
class Test( TestInput ):
pass
|
olivierverdier/sfepy
|
tests/test_input_acoustics.py
|
Python
|
bsd-3-clause
| 153
|
[
"VTK"
] |
43d4478528d3573c1384c4b426df5d9cd95b2557b383d11efaa41334e98696c1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import warnings
from operator import itemgetter
from tabulate import tabulate
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i + 1)), i + 1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super().__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super().__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, str) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append("{} {} {}".format(path_index, len(legs), self.degeneracies[i]))
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = "{} {} {}".format(tmp, potential, element)
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
|
dongsenfo/pymatgen
|
pymatgen/io/feff/inputs.py
|
Python
|
mit
| 32,731
|
[
"FEFF",
"pymatgen"
] |
5a53e29c83b9fbcd67a9390bd5cc93ef93809073a7f2ba38ad3fd44bb63cd5c0
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This script compares the diffusion coefficient of a single thermalized particle obtained from the particle's mean square displacement and the auto correlation function of its velocity to the expected value. It uses the Observables/Correlators framework.
"""
from __future__ import division, print_function
import espressomd
from espressomd.accumulators import Correlator
from espressomd.observables import ParticlePositions, ParticleVelocities
import numpy as np
gamma = 2.4
kT = 1.37
dt = 0.05
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
p = system.part.add(pos=(0, 0, 0), id=0)
system.time_step = dt
system.thermostat.set_langevin(kT=kT, gamma=gamma)
system.cell_system.skin = 0.4
system.integrator.run(1000)
pos_obs = ParticlePositions(ids=(0,))
vel_obs = ParticleVelocities(ids=(0,))
c_pos = Correlator(obs1=pos_obs, tau_lin=16, tau_max=100., delta_N=10,
corr_operation="square_distance_componentwise", compress1="discard1")
c_vel = Correlator(obs1=vel_obs, tau_lin=16, tau_max=20., delta_N=1,
corr_operation="scalar_product", compress1="discard1")
system.auto_update_accumulators.add(c_pos)
system.auto_update_accumulators.add(c_vel)
system.integrator.run(1000000)
c_pos.finalize()
c_vel.finalize()
np.savetxt("msd.dat", c_pos.result())
np.savetxt("vacf.dat", c_vel.result())
# Integral of vacf via Green-Kubo
#D= 1/3 int_0^infty <v(t_0)v(t_0+t)> dt
vacf = c_vel.result()
#Integrate with trapezoidal rule
I = np.trapz(vacf[:, 2], vacf[:, 0])
print("Ratio of measured and expected diffusion coefficients from Green-Kubo:",
1. / 3. * I / (kT / gamma))
# Check MSD
msd = c_pos.result()
expected_msd = lambda x: 2. * kT / gamma * x
print("Ratio of expected and measured msd")
print("#time ratio_x ratio_y ratio_z")
for i in range(4, msd.shape[0], 4):
print(msd[i, 0], msd[i, 2:5] / expected_msd(msd[i, 0]))
|
hmenke/espresso
|
samples/diffusion_coefficient.py
|
Python
|
gpl-3.0
| 2,720
|
[
"ESPResSo"
] |
d7780600ae56ea5924b1ccdb9a49d93279e6a683ac49382ab43f2b79206169eb
|
#!/usr/bin/env python
import sys, os, collections, subprocess, sniper_lib, sniper_config
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
class Function:
def __init__(self, eip, name, location):
self.eip = eip
self.name = cppfilt(name).strip()
self.location = location.split(':')
self.img = self.location[0]
self.offset = long(self.location[1])
# link-time address
self.ieip = str(long(eip, 16) - self.offset)
def __str__(self):
return self.name
#return '[%8s] %s' % (self.eip, self.name)
class Call:
def __init__(self, name, eip, stack, data):
self.name = name
self.eip = eip
self.stack = stack
self.data = data
def add(self, data):
for k, v in data.items():
self.data[k] = self.data.get(k, 0) + v
def buildTotal(self, prof):
# Assumes all children have already been visited!
self.children = prof.children[self.stack]
# Add self to global total
for k, v in self.data.items():
prof.totals[k] = prof.totals.get(k, 0) + v
# Add all children to our total
self.total = dict(self.data)
for stack in self.children.copy():
for k, v in prof.calls[stack].total.items():
self.total[k] += v
# Child is to be folded: add it to self, remove from list of children
if prof.calls[stack].folded:
for k, v in prof.calls[stack].data.items():
if k != 'calls':
self.data[k] += v
self.children.remove(stack)
for grandchild in prof.calls[stack].children:
self.children.add(grandchild)
# Fold into parents?
self.folded = prof.foldCall(self)
class Category(Call):
def __init__(self, name):
self.name = name
self.stack = ''
self.data = {}
def printLine(self, prof, obj):
print >> obj, '%6.2f%%\t' % (100 * self.data['nonidle_elapsed_time'] / float(prof.totals['nonidle_elapsed_time'])) + \
'%6.2f%%\t' % (100 * self.data['instruction_count'] / float(prof.totals['instruction_count'])) + \
'%7.2f\t' % (self.data['instruction_count'] / (prof.fs_to_cycles * float(self.data['nonidle_elapsed_time']))) + \
'%7.2f\t' % (1000 * self.data['l2miss'] / float(self.data['instruction_count'])) + \
self.name
class CallPrinter:
def __init__(self, prof, obj, opt_cutoff):
self.prof = prof
self.obj = obj
self.opt_cutoff = opt_cutoff
def printTree(self, stack, offset = 0):
call = self.prof.calls[stack]
self.printLine(call, offset = offset)
for child in sorted(call.children, key = lambda stack: self.prof.calls[stack].total['nonidle_elapsed_time'], reverse = True):
if self.prof.calls[child].total['nonidle_elapsed_time'] / float(self.prof.totals['nonidle_elapsed_time']) < self.opt_cutoff:
break
self.printTree(child, offset = offset + 1)
class CallPrinterDefault(CallPrinter):
def printHeader(self):
print >> self.obj, '%7s\t%7s\t%7s\t%7s\t%7s\t%7s\t%s' % ('calls', 'time', 't.self', 'icount', 'ipc', 'l2.mpki', 'name')
def printLine(self, call, offset):
print >> self.obj, '%7d\t' % call.data['calls'] + \
'%6.2f%%\t' % (100 * call.total['nonidle_elapsed_time'] / float(self.prof.totals['nonidle_elapsed_time'] or 1)) + \
'%6.2f%%\t' % (100 * call.data['nonidle_elapsed_time'] / float(self.prof.totals['nonidle_elapsed_time'] or 1)) + \
'%6.2f%%\t' % (100 * call.total['instruction_count'] / float(self.prof.totals['instruction_count'] or 1)) + \
'%7.2f\t' % (call.total['instruction_count'] / (self.prof.fs_to_cycles * float(call.total['nonidle_elapsed_time'] or 1))) + \
'%7.2f\t' % (1000 * call.total['l2miss'] / float(call.total['instruction_count'] or 1)) + \
' ' * offset + call.name
class CallPrinterAbsolute(CallPrinter):
def printHeader(self):
print >> self.obj, '%7s\t%9s\t%9s\t%9s\t%9s\t%9s\t%s' % ('calls', 'cycles', 'c.self', 'icount', 'i.self', 'l2miss', 'name')
def printLine(self, call, offset):
print >> self.obj, '%7d\t' % call.data['calls'] + \
'%9d\t' % (self.prof.fs_to_cycles * float(call.total['nonidle_elapsed_time'])) + \
'%9d\t' % (self.prof.fs_to_cycles * float(call.data['nonidle_elapsed_time'])) + \
'%9d\t' % call.total['instruction_count'] + \
'%9d\t' % call.data['instruction_count'] + \
'%9d\t' % call.total['l2miss'] + \
' ' * offset + call.name
class Profile:
def __init__(self, resultsdir = '.'):
filename = os.path.join(resultsdir, 'sim.rtntracefull')
if not os.path.exists(filename):
raise IOError('Cannot find trace file %s' % filename)
config = sniper_lib.get_config(resultsdir = resultsdir)
freq = 1e9 * float(sniper_config.get_config(config, 'perf_model/core/frequency'))
self.fs_to_cycles = freq / 1e15
self.functions = {}
self.calls = {}
self.children = collections.defaultdict(set)
self.roots = set()
self.totals = {}
fp = open(filename)
self.headers = fp.readline().strip().split('\t')
for line in fp:
if line.startswith(':'):
eip, name, location = line.strip().split('\t')
eip = eip[1:]
self.functions[eip] = Function(eip, name, location)
else:
line = line.strip().split('\t')
stack = line[0].split(':')
eip = stack[-1]
stack = ':'.join(map(self.translateEip, stack))
data = dict(zip(self.headers[1:], map(long, line[1:])))
if stack in self.calls:
self.calls[stack].add(data)
else:
self.calls[stack] = Call(str(self.functions[eip]), eip, stack, data)
parent = stack.rpartition(':')[0]
self.children[parent].add(stack)
self.roots = set(self.calls.keys())
for parent in self.calls:
for child in self.children[parent]:
self.roots.remove(child)
# Construct a list of calls where each child is ordered before its parent.
calls_ordered = collections.deque()
calls_tovisit = collections.deque(self.roots)
while calls_tovisit:
stack = calls_tovisit.pop()
calls_ordered.appendleft(stack)
calls_tovisit.extend(self.children[stack])
# Now implement a non-recursive version of buildTotal, which requires that each
# function's children have been visited before processing the parent,
# by visiting calls_ordered in left-to-right order.
for stack in calls_ordered:
self.calls[stack].buildTotal(self)
def translateEip(self, eip):
if eip in self.functions:
return self.functions[eip].ieip
else:
return eip
def foldCall(self, call):
if call.name == '.plt':
return True
else:
return False
def write(self, obj = sys.stdout, opt_absolute = False, opt_cutoff = .001):
if opt_absolute:
printer = CallPrinterAbsolute(self, obj, opt_cutoff = opt_cutoff)
else:
printer = CallPrinterDefault(self, obj, opt_cutoff = opt_cutoff)
printer.printHeader()
for stack in sorted(self.roots, key = lambda stack: self.calls[stack].total['nonidle_elapsed_time'], reverse = True):
printer.printTree(stack)
def writeCallgrind(self, obj):
bystatic = dict([ (fn.ieip, Category(fn.eip)) for fn in self.functions.values() ])
for stack in self.calls:
fn = self.functions[self.calls[stack].eip]
bystatic[fn.ieip].add(self.calls[stack].data)
children = {}
for _stack in self.children[stack]:
_ieip = self.functions[self.calls[_stack].eip].ieip
if _ieip not in children:
children[_ieip] = Category(self.calls[_stack].eip)
children[_ieip].add(self.calls[_stack].total)
children[_ieip].calls = self.calls[_stack].data['calls']
bystatic[fn.ieip].children = children
costs = (
('Cycles', 'Cycles', lambda data: long(self.fs_to_cycles * data['nonidle_elapsed_time'])),
('Calls', 'Calls', lambda data: data['calls']),
('Icount', 'Instruction count', lambda data: data['instruction_count']),
('L2', 'L2 load misses', lambda data: data['l2miss']),
)
def formatData(data):
return ' '.join(map(str, [ fn(data) for _, _, fn in costs ]))
print >> obj, 'cmd: Sniper run'
print >> obj, 'positions: instr'
print >> obj, 'events:', ' '.join([ cost for cost, _, _ in costs ])
for cost, desc, _ in costs:
print >> obj, 'event: %s : %s' % (cost, desc)
print >> obj, 'summary:', formatData(self.totals)
print >> obj
for site in sorted(bystatic.values(), key = lambda v: v.data.get('instruction_count',0), reverse=True):
if not site.data:
continue
fn = self.functions[site.name]
print >> obj, 'ob=%s' % fn.location[0]
print >> obj, 'fl=%s' % fn.location[2]
print >> obj, 'fn=%s' % fn.name
print >> obj, '0x%x' % long(fn.ieip), formatData(site.data)
for _site in site.children.values():
_fn = self.functions[_site.name]
print >> obj, 'cob=%s' % _fn.location[0]
print >> obj, 'cfi=%s' % _fn.location[2]
print >> obj, 'cfn=%s' % _fn.name
print >> obj, 'calls=%s 0x%x' % (_site.calls, long(_fn.ieip))
print >> obj, '0x%x' % long(_fn.ieip), formatData(_site.data)
print >> obj
def summarize(self, catnames, catfilters, obj = sys.stdout):
def get_catname(func):
stack = func.stack
while stack:
has_parent = (':' in stack)
# Find category for this function by trying a match against all filters in catfilters
for catname, catfilter in catfilters:
if catfilter(self.calls[stack], self):
if catname:
return catname
elif has_parent:
# catname == None means fold into the parent
# break out of this for loop, and visit parent function
break
else:
# Ignore fold matches for root functions, try to match with another category
continue
# Visit parent function
stack = stack.rpartition(':')[0]
bytype = dict([ (name, Category(name)) for name in catnames ])
for func in self.calls.values():
if not func.folded:
catname = get_catname(func)
bytype[catname].add(func.data)
print >> obj, '%7s\t%7s\t%7s\t%7s' % ('time', 'icount', 'ipc', 'l2.mpki')
for name in catnames:
if bytype[name].data:
bytype[name].printLine(self, obj = obj)
if __name__ == '__main__':
import getopt
def usage():
print '%s [-d <resultsdir (.)> | -o <outputdir>] [--abs]' % sys.argv[0]
sys.exit(1)
HOME = os.path.dirname(__file__)
resultsdir = '.'
outputdir = None
opt_absolute = False
try:
opts, cmdline = getopt.getopt(sys.argv[1:], "hd:o:", ['abs'])
except getopt.GetoptError, e:
# print help information and exit:
print >> sys.stderr, e
usage()
for o, a in opts:
if o == '-h':
usage()
sys.exit()
if o == '-d':
resultsdir = a
if o == '-o':
outputdir = a
if o == '--abs':
opt_absolute = True
prof = Profile(resultsdir)
prof.write(file(os.path.join(outputdir, 'sim.profile'), 'w') if outputdir else sys.stdout, opt_absolute = opt_absolute)
if outputdir:
callgrindfile = os.path.join(outputdir, 'callgrind.out.sniper')
prof.writeCallgrind(file(callgrindfile, 'w'))
gprof2dot_py = os.path.join(HOME, 'gprof2dot.py')
dotbasefile = os.path.join(outputdir, 'sim.profile')
os.system('%s --format=callgrind --output=%s.dot %s' % (gprof2dot_py, dotbasefile, callgrindfile))
import distutils.spawn
if distutils.spawn.find_executable('dot'):
os.system('dot -Tpng %s.dot -o %s.png' % (dotbasefile, dotbasefile))
os.system('dot -Tsvg %s.dot -o %s.svg' % (dotbasefile, dotbasefile))
|
yonggang985/Sniper
|
tools/gen_profile.py
|
Python
|
mit
| 12,074
|
[
"VisIt"
] |
d04dfbe0b7ad01b54fd1969e39a5a1adc95c1ceb801fd46f81738025d41cba24
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from six.moves import cPickle
import os
from MDAnalysisTests.tempdir import TempDir
import numpy as np
from numpy.testing import (
dec,
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
from nose.plugins.attrib import attr
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
PSF, DCD,
PSF_BAD,
PDB_small,
PDB_chainidrepeat,
GRO, TRR,
two_water_gro, two_water_gro_nonames,
TRZ, TRZ_psf,
)
from MDAnalysisTests import parser_not_found
import MDAnalysis as mda
import MDAnalysis.coordinates
from MDAnalysis.topology.base import TopologyReaderBase
class IOErrorParser(TopologyReaderBase):
def parse(self):
raise IOError("Useful information")
class TestUniverseCreation(object):
# tests concerning Universe creation and errors encountered
@staticmethod
def test_load():
# Universe(top, trj)
u = mda.Universe(PSF, PDB_small)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
@staticmethod
def test_make_universe_no_args():
# universe creation without args should work
u = mda.Universe()
assert_(isinstance(u, mda.Universe))
assert_(u.atoms == None)
@staticmethod
def test_Universe_no_trajectory_AE():
# querying trajectory without a trajectory loaded (only topology)
u = make_Universe()
assert_raises(AttributeError, getattr, u, 'trajectory')
@staticmethod
def test_Universe_topology_unrecognizedformat_VE():
assert_raises(ValueError, mda.Universe, 'some.weird.not.pdb.but.converted.xtc')
@staticmethod
def test_Universe_topology_unrecognizedformat_VE_msg():
try:
mda.Universe('some.weird.not.pdb.but.converted.xtc')
except ValueError as e:
assert_('isn\'t a valid topology format' in e.args[0])
else:
raise AssertionError
@staticmethod
def test_Universe_topology_IE():
assert_raises(IOError,
mda.Universe, 'thisfile', topology_format=IOErrorParser)
@staticmethod
def test_Universe_topology_IE_msg():
# should get the original error, as well as Universe error
try:
mda.Universe('thisfile', topology_format=IOErrorParser)
except IOError as e:
assert_('Failed to load from the topology file' in e.args[0])
assert_('Useful information' in e.args[0])
else:
raise AssertionError
@staticmethod
def test_Universe_filename_IE_msg():
# check for non existent file
try:
mda.Universe('thisfile.xml')
except IOError as e:
assert_equal('No such file or directory', e.strerror)
else:
raise AssertionError
@staticmethod
def test_Universe_invalidfile_IE_msg():
# check for invalid file (something with the wrong content)
temp_dir = TempDir()
with open(os.path.join(temp_dir.name, 'invalid.file.tpr'), 'w') as temp_file:
temp_file.write('plop')
try:
mda.Universe(os.path.join(temp_dir.name, 'invalid.file.tpr'))
except IOError as e:
assert_('file or cannot be recognized' in e.args[0])
else:
raise AssertionError
finally:
temp_dir.dissolve()
@staticmethod
def test_Universe_invalidpermissionfile_IE_msg():
# check for file with invalid permissions (eg. no read access)
temp_dir = TempDir()
temp_file = os.path.join(temp_dir.name, 'permission.denied.tpr')
with open(temp_file, 'w'):
pass
os.chmod(temp_file, 0o200)
try:
mda.Universe(os.path.join(temp_dir.name, 'permission.denied.tpr'))
except IOError as e:
assert_('Permission denied' in e.strerror)
else:
raise AssertionError
finally:
temp_dir.dissolve()
@staticmethod
def test_load_new_VE():
u = mda.Universe()
assert_raises(TypeError,
u.load_new, 'thisfile', format='soup')
@staticmethod
def test_universe_kwargs():
u = mda.Universe(PSF, PDB_small, fake_kwarg=True)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_(u.kwargs['fake_kwarg'] is True)
# initialize new universe from pieces of existing one
u2 = mda.Universe(u.filename, u.trajectory.filename,
**u.kwargs)
assert_(u2.kwargs['fake_kwarg'] is True)
assert_equal(u.kwargs, u2.kwargs)
@staticmethod
def test_universe_topology_class_with_coords():
u = mda.Universe(PSF, PDB_small)
u2 = mda.Universe(u._topology, PDB_small)
assert_(isinstance(u2.trajectory, type(u.trajectory)))
assert_equal(u.trajectory.n_frames, u2.trajectory.n_frames)
assert_(u2._topology is u._topology)
class TestUniverse(object):
# older tests, still useful
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_bad_topology(self):
# tests that Universe builds produce the right error message
def bad_load():
return mda.Universe(PSF_BAD, DCD)
assert_raises(ValueError, bad_load)
@attr('issue')
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_new(self):
u = mda.Universe(PSF, DCD)
u.load_new(PDB_small)
assert_equal(len(u.trajectory), 1, "Failed to load_new(PDB)")
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_new_TypeError(self):
u = mda.Universe(PSF, DCD)
def bad_load(uni):
return uni.load_new('filename.notarealextension')
assert_raises(TypeError, bad_load, u)
def test_load_structure(self):
# Universe(struct)
ref = mda.Universe(PSF, PDB_small)
u = mda.Universe(PDB_small)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_almost_equal(u.atoms.positions, ref.atoms.positions)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_multiple_list(self):
# Universe(top, [trj, trj, ...])
ref = mda.Universe(PSF, DCD)
u = mda.Universe(PSF, [DCD, DCD])
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_multiple_args(self):
# Universe(top, trj, trj, ...)
ref = mda.Universe(PSF, DCD)
u = mda.Universe(PSF, DCD, DCD)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_pickle_raises_NotImplementedError(self):
u = mda.Universe(PSF, DCD)
assert_raises(NotImplementedError, cPickle.dumps, u, protocol=cPickle.HIGHEST_PROTOCOL)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_set_dimensions(self):
u = mda.Universe(PSF, DCD)
box = np.array([10, 11, 12, 90, 90, 90])
u.dimensions = np.array([10, 11, 12, 90, 90, 90])
assert_allclose(u.dimensions, box)
def test_chainid_quick_select():
# check that chainIDs get grouped together when making the quick selectors
# this pdb file has 2 segments with chainID A
u = mda.Universe(PDB_chainidrepeat)
for sg in (u.A, u.B):
assert_(isinstance(sg, mda.core.groups.SegmentGroup))
for seg in (u.C, u.D):
assert_(isinstance(seg, mda.core.groups.Segment))
assert_(len(u.A.atoms) == 10)
assert_(len(u.B.atoms) == 10)
assert_(len(u.C.atoms) == 5)
assert_(len(u.D.atoms) == 7)
class TestGuessBonds(object):
"""Test the AtomGroup methed guess_bonds
This needs to be done both from Universe creation (via kwarg) and AtomGroup
It needs to:
- work if all atoms are in vdwradii table
- fail properly if not
- work again if vdwradii are passed.
"""
def setUp(self):
self.vdw = {'A':1.05, 'B':0.4}
def tearDown(self):
del self.vdw
def _check_universe(self, u):
"""Verify that the Universe is created correctly"""
assert_equal(len(u.bonds), 4)
assert_equal(len(u.angles), 2)
assert_equal(len(u.dihedrals), 0)
assert_equal(len(u.atoms[0].bonds), 2)
assert_equal(len(u.atoms[1].bonds), 1)
assert_equal(len(u.atoms[2].bonds), 1)
assert_equal(len(u.atoms[3].bonds), 2)
assert_equal(len(u.atoms[4].bonds), 1)
assert_equal(len(u.atoms[5].bonds), 1)
assert_('guess_bonds' in u.kwargs)
def test_universe_guess_bonds(self):
"""Test that making a Universe with guess_bonds works"""
u = mda.Universe(two_water_gro, guess_bonds=True)
self._check_universe(u)
assert_(u.kwargs['guess_bonds'] is True)
def test_universe_guess_bonds_no_vdwradii(self):
"""Make a Universe that has atoms with unknown vdwradii."""
assert_raises(ValueError, mda.Universe, two_water_gro_nonames, guess_bonds=True)
def test_universe_guess_bonds_with_vdwradii(self):
"""Unknown atom types, but with vdw radii here to save the day"""
u = mda.Universe(two_water_gro_nonames, guess_bonds=True,
vdwradii=self.vdw)
self._check_universe(u)
assert_(u.kwargs['guess_bonds'] is True)
assert_equal(self.vdw, u.kwargs['vdwradii'])
def test_universe_guess_bonds_off(self):
u = mda.Universe(two_water_gro_nonames, guess_bonds=False)
for attr in ('bonds', 'angles', 'dihedrals'):
assert_(not hasattr(u, attr))
assert_(u.kwargs['guess_bonds'] is False)
def _check_atomgroup(self, ag, u):
"""Verify that the AtomGroup made bonds correctly,
and that the Universe got all this info
"""
assert_equal(len(ag.bonds), 2)
assert_equal(len(ag.angles), 1)
assert_equal(len(ag.dihedrals), 0)
assert_equal(len(u.bonds), 2)
assert_equal(len(u.angles), 1)
assert_equal(len(u.dihedrals), 0)
assert_equal(len(u.atoms[0].bonds), 2)
assert_equal(len(u.atoms[1].bonds), 1)
assert_equal(len(u.atoms[2].bonds), 1)
assert_equal(len(u.atoms[3].bonds), 0)
assert_equal(len(u.atoms[4].bonds), 0)
assert_equal(len(u.atoms[5].bonds), 0)
def test_atomgroup_guess_bonds(self):
"""Test an atomgroup doing guess bonds"""
u = mda.Universe(two_water_gro)
ag = u.atoms[:3]
ag.guess_bonds()
self._check_atomgroup(ag, u)
def test_atomgroup_guess_bonds_no_vdwradii(self):
u = mda.Universe(two_water_gro_nonames)
ag = u.atoms[:3]
assert_raises(ValueError, ag.guess_bonds)
def test_atomgroup_guess_bonds_with_vdwradii(self):
u = mda.Universe(two_water_gro_nonames)
ag = u.atoms[:3]
ag.guess_bonds(vdwradii=self.vdw)
self._check_atomgroup(ag, u)
class TestInMemoryUniverse(object):
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_reader_w_timeseries():
universe = mda.Universe(PSF, DCD, in_memory=True)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 98, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_reader_wo_timeseries():
universe = mda.Universe(GRO, TRR, in_memory=True)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(47681, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_reader_w_timeseries_frame_interval():
universe = mda.Universe(PSF, DCD, in_memory=True,
in_memory_step=10)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_reader_wo_timeseries_frame_interval():
universe = mda.Universe(GRO, TRR, in_memory=True,
in_memory_step=3)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(47681, 4, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_existing_universe():
universe = mda.Universe(PDB_small, DCD)
universe.transfer_to_memory()
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 98, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_frame_interval_convention():
universe1 = mda.Universe(PSF, DCD)
array1 = universe1.trajectory.timeseries(skip=10)
universe2 = mda.Universe(PSF, DCD, in_memory=True,
in_memory_step=10)
array2 = universe2.trajectory.timeseries()
assert_equal(array1, array2,
err_msg="Unexpected differences between arrays.")
@staticmethod
def test_slicing_with_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10, stop=20)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_without_start():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(stop=10)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_without_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 88, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_step_without_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(step=2)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 49, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_step_with_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10, stop=30, step=2)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_negative_start():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=-10)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_negative_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(stop=-20)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 78, 3),
err_msg="Unexpected shape of trajectory timeseries")
class TestCustomReaders(object):
"""
Can pass a reader as kwarg on Universe creation
"""
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_reader(self):
# check that reader passing works
u = mda.Universe(TRZ_psf, TRZ, format=MDAnalysis.coordinates.TRZ.TRZReader)
assert_equal(len(u.atoms), 8184)
def test_custom_reader_singleframe(self):
T = MDAnalysis.topology.GROParser.GROParser
R = MDAnalysis.coordinates.GRO.GROReader
u = mda.Universe(two_water_gro, two_water_gro,
topology_format=T, format=R)
assert_equal(len(u.atoms), 6)
def test_custom_reader_singleframe_2(self):
# Same as before, but only one argument to Universe
T = MDAnalysis.topology.GROParser.GROParser
R = MDAnalysis.coordinates.GRO.GROReader
u = mda.Universe(two_water_gro,
topology_format=T, format=R)
assert_equal(len(u.atoms), 6)
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_parser(self):
# topology reader passing works
u = mda.Universe(TRZ_psf, TRZ, topology_format=MDAnalysis.topology.PSFParser.PSFParser)
assert_equal(len(u.atoms), 8184)
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_both(self):
# use custom for both
u = mda.Universe(TRZ_psf, TRZ, format=MDAnalysis.coordinates.TRZ.TRZReader,
topology_format=MDAnalysis.topology.PSFParser.PSFParser)
assert_equal(len(u.atoms), 8184)
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/core/test_universe.py
|
Python
|
gpl-2.0
| 20,011
|
[
"MDAnalysis"
] |
37c6ab474e501312250cb0e330fed2cf5e04b49b03e53655e46499493bf728cd
|
#!/usr/bin/env python
import sys
import os
extras = {}
try:
from setuptools import setup
extras['zip_safe'] = False
if sys.version_info < (2, 6) and os.name != 'java':
extras['install_requires'] = ['multiprocessing']
except ImportError:
from distutils.core import setup
setup(name='futures',
version='2.1.6',
description='Backport of the concurrent.futures package from Python 3.2',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
maintainer='Alex Gronholm',
maintainer_email='alex.gronholm+pypi@nextday.fi',
url='http://code.google.com/p/pythonfutures',
download_url='http://pypi.python.org/pypi/futures/',
packages=['futures', 'concurrent', 'concurrent.futures'],
license='BSD',
classifiers=['License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1'],
**extras
)
|
jboning/pythonfutures
|
setup.py
|
Python
|
bsd-2-clause
| 1,281
|
[
"Brian"
] |
ff4e92e34ccc6b2d93d1ced050ab2e0f5ba9eb59d5f9c77fae807003d088a472
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
platforms = [
'Acorn',
'Alambik',
'Amiga AGA',
'Amiga OCS/ECS',
'Amiga PPC/RTG',
'Amstrad CPC',
'Amstrad Plus',
'Android',
'Apple II',
'Apple II GS',
'Atari Falcon 030',
'Atari Jaguar',
'Atari Lynx',
'Atari ST',
'Atari STe',
'Atari TT 030',
'Atari VCS',
'Atari XL/XE',
'BBC Micro',
'BeOS',
'BK-0010/11M',
'C16/116/plus4',
'C64 DTV',
'Commodore 128',
'Commodore 64',
'Dreamcast',
'Enterprise',
'Flash',
'FreeBSD',
'Gameboy',
'Gameboy Advance',
'Gameboy Color',
'Gamecube',
'GamePark GP2X',
'GamePark GP32',
'Intellivision',
'iOS',
'Java',
'JavaScript',
'Linux',
'MacOS',
'MacOSX',
'MacOSX Intel',
'mIRC',
'Mobile Phone',
'MS-Dos',
'MS-Dos/gus',
'MSX',
'MSX 2',
'MSX 2 plus',
'MSX Turbo-R',
'NEC TurboGrafx/PC Engine',
'NeoGeo Pocket',
'NES/Famicom',
'Nintendo 64',
'Nintendo DS',
'Nintendo Wii',
'Oric',
'PalmOS',
'PHP',
'Playstation',
'Playstation 2',
'Playstation 3',
'Playstation Portable',
'PocketPC',
'Pokemon Mini',
'Raspberry Pi',
'SAM Coupé',
'SEGA Game Gear',
'SEGA Genesis/Mega Drive',
'SEGA Master System',
'SGI/IRIX',
'Sharp MZ',
'SNES/Super Famicom',
'Solaris',
'Spectravideo 3x8',
'Thomson',
'TI-8x',
'TRS-80/CoCo',
'Vectrex',
'VIC 20',
'Virtual Boy',
'Wild',
'Windows',
'Wonderswan',
'XBOX',
'XBOX 360',
'ZX Enhanced',
'ZX Spectrum',
'ZX-81']
|
marcgd/PouetDL
|
platforms.py
|
Python
|
mit
| 1,666
|
[
"Jaguar"
] |
7831eaec947a53697cd343625d9b3849ebad42c9524cdadf8261f718e029e505
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to
# submit large numbers of jobs on supercomputers. It provides a python interface to physical input,
# such as crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential
# programs. It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
# -*- coding: utf-8 -*-
from sys import stdin
from tempfile import NamedTemporaryFile
from py.path import local
from pylada.espresso import Pwscf, read_structure
def check_aluminum_functional(tmpdir, espresso):
from quantities import atomic_mass_unit
assert espresso.control.prefix == "al"
assert espresso.control.outdir == str(tmpdir)
assert espresso.control.pseudo_dir == str(tmpdir.join("pseudos"))
# atomic_species is a a private card, handled entirely by the functional
assert not hasattr(espresso, "atomic_species")
assert len(espresso.species) == 1
assert "Al" in espresso.species
assert espresso.species["Al"].pseudo == "Al.vbc.UPF"
assert abs(espresso.species["Al"].mass - 26.98 * atomic_mass_unit) < 1e-8
assert hasattr(espresso, "k_points")
assert espresso.kpoints.subtitle == "automatic"
assert espresso.kpoints.value == "6 6 6 1 1 1"
def check_aluminum_structure(structure):
from quantities import bohr_radius
from numpy import allclose, array
assert len(structure) == 1
assert structure[0].type == "Al"
assert allclose(structure[0].pos, [0e0, 0, 0])
cell = 0.5 * array([[-1, 0, 1], [0, 1, 1], [-1, 1, 0]], dtype="float64").transpose()
assert allclose(structure.cell, cell)
assert abs(structure.scale - 7.5 * bohr_radius) < 1e-8
pwscf = Pwscf()
with NamedTemporaryFile(mode="w") as file:
file.write(stdin.read())
file.flush()
pwscf.read(file.name)
structure = read_structure(file.name)
check_aluminum_functional(local(), pwscf)
check_aluminum_structure(structure)
pwscf.pseudos_do_exist(structure)
print("JOB IS DONE!")
|
pylada/pylada-light
|
tests/espresso/dummy_pwscf.py
|
Python
|
gpl-3.0
| 2,799
|
[
"CRYSTAL",
"ESPResSo",
"VASP"
] |
194c6af8f9ac494c1d8424aafb4b77e02e6e480a18dffe0b54e866dd60692c32
|
import gzip
import numpy
from orbkit import grid
def cube_creator(data,filename,geo_info,geo_spec,comments='',labels=None,**kwargs):
'''Creates a plain text Gaussian cube file.
**Parameters:**
data : numpy.ndarray, shape=N
Contains the output data.
filename : str
Contains the base name of the output file.
geo_info, geo_spec :
See :ref:`Central Variables` for details.
comments : str, optional
Specifies the second (comment) line of the cube file.
'''
# Shape shall be (Ndrv,Ndata,Nx,Ny,Nz) or (Ndrv,Ndata,Nxyz)
data = numpy.array(data)
dims = 3
ndim = data.ndim
shape = data.shape
if data.ndim < dims:
raise AssertionError('data.ndim < ndim of grid')
elif data.ndim == dims: # 3d data set
data = data[numpy.newaxis]
elif data.ndim > dims + 1:
raise AssertionError('data.ndim > (ndim of grid) +2')
if labels is not None:
if labels == True or labels == 'auto':
labels = list(range(len(data)))
assert len(labels) == len(data)
try:
labels = map(int,labels)
except ValueError:
raise AssertionError('labels has to be list of integers.')
assert data.shape[1:] == tuple(grid.N_), 'The grid does not fit the data.'
if not any([filename.endswith(ext)
for ext in ['cube', 'cb', 'cube.gz', 'cb.gz']]):
filename += '.cube'
# Write the type and the position of the atoms in the header
string = 'orbkit calculation\n'
string += ' %(f)s\n' % {'f': comments}
# How many atoms
string += ('%(at)d' % {'at': (-1)**(labels is not None)*len(geo_info)}).rjust(5)
# Minima
for ii in range(3):
string += ('%(min)0.6f' % {'min': grid.min_[ii]}).rjust(12)
# Number of data points per grid point
if len(data) > 1:
string += ('%d' % len(data)).rjust(12)
for ii in range(3):
string += '\n'
string += ('%(N)d' % {'N': grid.N_[ii]}).rjust(5)
for jj in range(3):
if jj == ii:
string += ('%(dr)0.6f' % {'dr': grid.delta_[ii]}).rjust(12)
else:
string += ('%(dr)0.6f' % {'dr': 0}).rjust(12)
string += '\n'
for ii in range(len(geo_info)):
string += ('%(N)d' % {'N': round(float(geo_info[ii][2]))}).rjust(5)
string += ('%(ch)0.6f' % {'ch': float(geo_info[ii][1])}).rjust(12)
for jj in range(3):
string += ('%(r)0.6f' % {'r': geo_spec[ii][jj]}).rjust(12)
string += '\n'
# If exist, write labels
if labels is not None:
string += ('%(N)d' % {'N': len(data)}).rjust(5)
c = 0
for i,j in enumerate(labels):
c += 1
string += str(j).rjust(5)
if (c % 9 == 8):
string += '\n'
string += '\n'
# Write data
for rr in range(len(grid.x)):
for ss in range(len(grid.y)):
c = 0
for tt in range(len(grid.z)):
for dd in data[:,rr,ss,tt]:
string += ('%(data).5E' % {'data': dd}).rjust(13)
if (c % 6 == 5):
string += '\n'
c += 1
string += '\n'
if filename.endswith('gz'):
with gzip.open(filename, 'wb') as f:
f.write(string.encode('utf-8'))
else:
with open(filename, 'w') as f:
f.write(string)
|
orbkit/orbkit
|
orbkit/output/cube.py
|
Python
|
lgpl-3.0
| 3,130
|
[
"Gaussian"
] |
ab6a8390109081566e5df10cbeae0267f3dfe964f446ae190dd03452a060f197
|
"Demonstrates molecular dynamics with constant energy."
from ase.calculators.emt import EMT
from ase.lattice.cubic import FaceCenteredCubic
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.md.verlet import VelocityVerlet
from ase import units
# Use Asap for a huge performance increase if it is installed
useAsap = True
if useAsap:
from asap3 import EMT
size = 10
else:
size = 3
# Set up a crystal
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], symbol="Cu",
size=(size,size,size), pbc=True)
# Describe the interatomic interactions with the Effective Medium Theory
atoms.set_calculator(EMT())
# Set the momenta corresponding to T=300K
MaxwellBoltzmannDistribution(atoms, 300*units.kB)
# We want to run MD with constant energy using the VelocityVerlet algorithm.
dyn = VelocityVerlet(atoms, 5*units.fs) # 5 fs time step.
#Function to print the potential, kinetic and total energy.
def printenergy(a=atoms): #store a reference to atoms in the definition.
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print ("Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) Etot = %.3feV" %
(epot, ekin, ekin/(1.5*units.kB), epot+ekin))
# Now run the dynamics
dyn.attach(printenergy, interval=10)
printenergy()
dyn.run(200)
|
grhawk/ASE
|
tools/doc/tutorials/md/moldyn2.py
|
Python
|
gpl-2.0
| 1,373
|
[
"ASE",
"CRYSTAL"
] |
2facb2ffa5a3ac692ebc8fb32d6add6f40d82ed1507e14b62e65c748cabe6ecd
|
import numpy as np
from numpy.random import seed
class AdalineGradientDescent(object):
"""
ADAptive LInear NEuron classifier.
Parameters
------------
_learning_rate : float
Learning rate (between 0.0 and 1.0)
_max_iterations : int
Maximum number of iterations over the training dataset.
Attributes
-----------
__weights__ : 1d-array
Weights after fitting.
costs_ : list
Number of misclassifications (updates) in each epoch.
"""
def __init__(self, _learning_rate=0.01, _max_iterations=100):
""" perceptron initialization """
#self.weights_ = np.random.rand(number_of_features+1)*2-1
self._learning_rate = _learning_rate
self._max_iterations = _max_iterations
self.__weights__ = np.zeros(0)
self.costs_ = []
def fit(self, training_data, training_label):
"""
Fit training data to training_data using training_label
Parameters
----------
training_data : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
training_label : array-like, shape = [n_samples]
Target values.
"""
self.__weights__ = np.random.rand(1 + training_data.shape[1]) * 2 - 1
for _ in range(self._max_iterations):
error = 0
net_input = self.__net_input__(training_data)
# activation is just a dummy here.
output = self.__activation__(training_data)
error = (training_label - output)
self.__weights__[1:] += self._learning_rate * training_data.T.dot(error)
self.__weights__[0] += self._learning_rate * error.sum()
cost = (error**2).sum() / 2.0
self.costs_.append(cost)
return self
def __net_input__(self, _x_):
""" Activation Function """
return np.dot(_x_, self.__weights__[1:]) + self.__weights__[0]
def __activation__(self, _x_):
""" Activation """
return self.__net_input__(_x_)
def predict(self, test_data):
"""
Returns the class label.
"""
return np.where(self.__net_input__(test_data) >= 0, 1, -1)
class AdalineStochasticGradientDescent(object):
"""
ADAptive LInear NEuron classifier.
Parameters
------------
_learning_rate : float
Learning rate (between 0.0 and 1.0)
_max_iterations : int
Maximum number of iterations over the training dataset.
Attributes
-----------
__weights__ : 1d-array
Weights after fitting.
costs_ : list
Number of misclassifications (updates) in each epoch.
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent cycles.
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
"""
def __init__(self, _learning_rate=0.01, _max_iterations=100, shuffle=True, random_state=None):
""" perceptron initialization """
#self.weights_ = np.random.rand(number_of_features+1)*2-1
self._learning_rate = _learning_rate
self._max_iterations = _max_iterations
self.__weights__ = np.zeros(0)
self.costs_ = []
self.shuffle_ = shuffle
self.w_initialized = False
if random_state:
seed(random_state)
def fit(self, training_data, training_label):
"""
Fit training data to training_data using training_label
Parameters
----------
training_data : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
training_label : array-like, shape = [n_samples]
Target values.
"""
self._initialize_weights(training_data.shape[1])
for _ in range(self._max_iterations):
if self.shuffle_:
X, y = self._shuffle(training_data, training_label)
cost = []
for x_i, target in zip(X, y):
cost.append(self._update_weights(x_i, target))
avg_cost = sum(cost) / len(y)
self.costs_.append(avg_cost)
return self
def partial_fit(self, training_data, training_label):
"""Fit training data without reinitializing the weights"""
if not self.w_initialized:
self._initialize_weights(training_data.shape[1])
# If there is more than one data, then update weights incremently
# for each of them
if training_label.ravel().shape[0] > 1:
for x_i, target_i in zip(training_data, training_label):
self._update_weights(x_i, target_i)
else:
self._update_weights(training_data, training_label)
return self
def _initialize_weights(self, shape_):
"""Initialize weights to zeros"""
self.__weights__ = np.random.rand(1 +shape_) * 2 - 1
self.w_initialized = True
def _shuffle(self, _x_, _y_):
"""Shuffle training data"""
_r_ = np.random.permutation(len(_y_))
return _x_[_r_], _y_[_r_]
def _update_weights(self, _x_i_, _target_i_):
"""Apply Adaline learning rule to update the weights"""
output = self.__net_input__(_x_i_)
error = (_target_i_ - output)
self.__weights__[1:] += self._learning_rate * _x_i_.dot(error)
self.__weights__[0] += self._learning_rate * error
cost = 0.5 * error**2
return cost
def __net_input__(self, _x_):
""" Activation Function """
return np.dot(_x_, self.__weights__[1:]) + self.__weights__[0]
def __activation__(self, _x_):
""" Activation """
return self.__net_input__(_x_)
def predict(self, test_data):
"""
Returns the class label.
"""
return np.where(self.__net_input__(test_data) >= 0, 1, -1)
|
nitish-tripathi/Simplery
|
ANN/Adaline.py
|
Python
|
mit
| 6,072
|
[
"NEURON"
] |
3d966a908014181c12e5a43ccff8604426fb7774145e01d524ef816ca80aa9ed
|
"""
##############################################################################################
Copyright(C) 2017 Shane Macaulay smacaulay@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program.If not, see<https://www.gnu.org/licenses/>.
##############################################################################################
V.3 CHANGELOG
* Finally Fast Forensics edition(tm)!!
* Improved client side SSL cert checking of server (secure++)
* Faster than doing a dlldump to you're local disk
* Better than doing a dlldump since you get something usefull
* Smarter than doign so since you should have 99/100 of the junk you don't care about out of you're way
* Added heaps of colors!
* Kernel drivers
* Removed some of the slower servers in the back-end to allow for high speed validations (no slow!)
+ Tuned retries and async socket I/O with gevents
* Improved error responses and overall feedback to user
* Metrics and information regarding verification and a few new command line parameters for extra details
+ Block offset that has the modified code
+ Special dump mode for just modified code
* Needful dependencies in the txt file
To use this with volatility place this .py anywhere, ensure you have volatility working.
For example the command line below will simply run the invterojithash against the input memory image
*********************************************************************************************************
python vol.py --plugins=[path-to-folder-where-this-code-is] -f "/path/to/temp/10 ENT 1607-Snapshot1.vmem"
--profile=Win10x64_14393 invterojithash -x
*********************************************************************************************************
I'll be looking to make updates feel free to give me some issues through "github.com/K2"
OPERATIONS: The client script you run perform's a basic sha256 of whatever is in memory with no regard
for relocations or anything. Very simple. All of the heavy lifting magic is done on the server time
on demand integrity hashes are computed based on you're client's described virtual address.
i.e. You say kernel32 is loaded at address X. The server responds and adjusts it's hash database in real time
so there is very little work on the client side.
I haven't written the PE header fixes yet for this code as it's currently done for the PowerShell, in effect
there are so many changes for the PE header, it's like a shotgun blast of bits that need adjusting.
You can setup you're own JIT hash server and host local to perform integrity checks.
// TODO: Add kernel modules/space
// TODO: What about if you submit the modified pages to the server I'll report back a diff view?
Enjoy!
################################################################################################
"""
from gevent import monkey
monkey.patch_all()
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
import volatility.addrspace
import volatility.commands as commands
import volatility.utils as utils
import volatility.win32.tasks as tasks
import volatility.win32.modules as modules
import os, time, base64, sys, threading
import json, urllib, urllib2, urllib3
import gevent, struct, retry, traceback, colorama
import ntpath, certifi
from os import environ
from retry import retry
from gevent import monkey
from struct import unpack
from gevent.pool import Pool
from gevent.queue import Queue
from Crypto.Hash import SHA256
from traceback import print_tb, print_exc
from urllib2 import HTTPError, URLError
from gevent.event import AsyncResult
from colorama import init, AnsiToWin32
from colored import fg, bg, attr
from tqdm import tqdm
from multiprocessing import freeze_support, RLock
class inVteroJitHash(commands.Command):
'''
Use the public free inVtero JIT Page hash server to respond with integrity information.
The JitPageHash service endpoint is running with the json2pdb job.]
"https://pdb2json.azurewebsites.net/api/PageHash/x"
Below is a sample "python.requests" request/response that demonstrates the expected functionality.
The response information is very terse so it's a good idea to maintain some meta-information
across the request since it's pumped into the data render_text method.
---- snip -- snip ---- ( below is copy/pasteable into a python shell to test ) ---- snip -- snip ----
import requests
req_json = {
"HdrHash": "QUTB1TPisyVGMq0do/CGeQb5EKwYHt/vvrMHcKNIUR8=",
"TimeDateStamp": 3474455660,
"AllocationBase": 140731484733440,
"BaseAddress": 140731484737536,
"ImageSize": 1331200,
"ModuleName": "ole32.dll",
"HashSet":[
{
"Address": 140731484798976,
"Hash": "+REyeLCxvwPgNJphE6ubeQVhdg4REDAkebQccTRLYL8="
},
{
"Address": 140731484803072,
"Hash": "xQJiKrNHRW739lDgjA+/1VN1P3VSRM5Ag6OHPFG6594="
},
{
"Address": 140731484807168,
"Hash": "ry9yVHhDQohYTfte0A4iTmNY8gDDfKUmFpxsWF67rtA="
},
{
"Address": 140731484811264,
"Hash": "bk31Su+2qFGhZ8PLN+fMLDy2SqPDMElmj0EZA62LX1c="
},
{
"Address": 140731484815360,
"Hash": "0RyIKfVFnxkhDSpxgzPYx2azGg59ht4TbVr66IXhVp4="
}
]
}
requests.post("https://pdb2json.azurewebsites.net/api/PageHash/x", json=req_json).json()
---- snip -- snip ---- the lines below are the output of the above service call ---- snip -- snip ----
[{u'Address': 140731484733440L, u'HashCheckEquivalant': True},
{u'Address': 140731484798976L, u'HashCheckEquivalant': True},
{u'Address': 140731484803072L, u'HashCheckEquivalant': True},
{u'Address': 140731484807168L, u'HashCheckEquivalant': True},
{u'Address': 140731484811264L, u'HashCheckEquivalant': True},
{u'Address': 140731484815360L, u'HashCheckEquivalant': True}]
'''
#JITHashServer = "http://localhost:7071/api/PageHash/x"
JITHashServer = "https://pdb2json.azurewebsites.net/api/PageHash/x"
# Tune this if you want to hit the server harder
pool = Pool()
greenlits = []
total_miss = {}
null_hash = None
stream = None
VBValidated = 0
ScannedMods = 0
VirtualBlocksChecked = 0
StartTime = time.time()
MissList = []
TotalProgress = []
TotBytesValidated = 0
TotalLastN = 0
TotPercent = 0.0
TotalBar = None
logg = None
DumpFolder = None
headers = {'Content-Type':'application/json', 'Accept':'text/plain'}
http = urllib3.PoolManager(maxsize=512, block=True, headers = headers, cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
def __init__(self, config, *args):
# no color on Windows yet, this keeps the output from looking insane with all the ANSI
if os.name == 'nt':
self.stream = AnsiToWin32(sys.stdout, convert=True).stream
init()
#init(convert=True)
commands.Command.__init__(self, config, *args)
config.add_option('SuperVerbose', short_option='s', help='Display per page validation results.', action='store_true', default=False)
config.add_option('ExtraTotals', short_option='x', help='List of all misses per-module.', action='store_true', default=False)
config.add_option('DumpFolder', short_option='D', help='Dump the failed blocks to a specified folder', default=None)
config.add_option('FailFile', short_option='F', help='Output file containing detailed information about unverifiable memory', default='FailedValidation.txt')
if os.name is not 'nt':
os.system('setterm -cursor off')
else:
os.system('color 0f')
# This method is a huge bit of code that should of been in volatility
# Anyhow, NX bit's need to be checked at every layer of the page table.
# NX also _IS_ supported on IA32 PAE here... it's a real thing.
@classmethod
def is_nxd(cls, vaddr, addr_space):
"""
Is the page for a given virtualaddress to be restricted from execution or not present?
The return value True is something we are ignoring. False means it's present and unrestricted.
Parameters
----------
vaddr : long
A virtual address from IA32PAE or AMD64 compatible address spaces
addr_space : Addrspace
An instance of the address space that contains our page table
Returns
-------
Boolean
True means that the page at address vaddr is ignored based on NX or missing by means of not having the "valid" bit set in the page table
"""
vaddr = long(vaddr)
if isinstance(addr_space, volatility.plugins.addrspaces.amd64.AMD64PagedMemory) is False:
pdpe = addr_space.get_pdpi(vaddr)
if not addr_space.entry_present(pdpe):
return True
pgd = addr_space.get_pgd(vaddr, pdpe)
if not addr_space.entry_present(pgd):
return True
if addr_space.page_size_flag(pgd):
return cls.is_nx(pgd)
else:
pte = addr_space.get_pte(vaddr, pgd)
if not addr_space.entry_present(pte):
return True
return cls.is_nx(pte)
else:
pml4e = addr_space.get_pml4e(vaddr)
if not addr_space.entry_present(pml4e):
return True
pdpe = addr_space.get_pdpi(vaddr, pml4e)
if not addr_space.entry_present(pdpe):
return True
if addr_space.page_size_flag(pdpe):
return cls.is_nx(pdpe)
pgd = addr_space.get_pgd(vaddr, pdpe)
if addr_space.entry_present(pgd):
if addr_space.page_size_flag(pgd):
return cls.is_nx(pgd)
else:
pte = addr_space.get_pte(vaddr, pgd)
if not addr_space.entry_present(pte):
return True
return cls.is_nx(pte)
return True
raise ValueError('The underlying address space does not appear to be supported', type(addr_space), addr_space)
@staticmethod
def is_nx(entry):
"""
Return if the most significant bit is set.
The most significant bit represents the "NO EXECUTE" or "EXECUTION DISABLED" flag for IA32PAE and AMD64 ABI's
Parameters
----------
entry : long
An entry from the page table.
Returns
-------
The status of the NX/XD bit.
"""
return entry & (1 << 63) == (1 << 63)
def mod_get_ptes(self, mod, addr_space):
for vpage in range(mod.DllBase, mod.DllBase + mod.SizeOfImage, 4096):
yield vpage, self.is_nxd(vpage, addr_space)
# return a sha256 from the input bytes, the server is only configured with SHA256
# since it's JIT hash in it's core, we can upgrade this at any time... Future Proof !
def HashPage(self, data):
if data is None:
return "NULL INPUT"
try:
memoryview(data)
except TypeError:
return "NULL INPUT"
sha = SHA256.new()
sha.update(data)
hashB64 = base64.b64encode(sha.digest())
if hashB64 is self.null_hash:
return "NULL INPUT"
return hashB64
# if this is lagging you out dial back the tries/delay... i'm pretty aggressive here
@retry(HTTPError, tries=16, delay=3, backoff=2)
def pozt(self, LocalMod):
rvData = ""
try:
data = LocalMod["json"]
dataEncoded = json.dumps(data)
#req = self.http.request('POST', self.JITHashServer, body=dataEncoded)
req = self.http.urlopen('POST', self.JITHashServer, headers=self.headers, body=dataEncoded)
#response = self.http.urlopen(req)
#rvData = req.data
except HTTPError as inst:
if inst.code == 204:
return rvData
except:
print("{}{}".format(fg("red"), "SERVER FAILED DESPITE MULTIPLE ATTEMPTS"))
print("{}{}{}[{}]".format(fg("navajo_white_1"), "Exception ", fg("light_magenta"), str(sys.exc_info()[0])))
for x in sys.exc_info():
print("{}{}".format(fg("hot_pink_1b"), x))
finally:
a = AsyncResult()
a.set(req.data)
LocalMod["resp"] = a.get(block=True)
req.release_conn()
self.output(LocalMod)
return LocalMod
# Volatility's contract defines this as the entry point for modules. Here we do all of our work and orchastrate our internal async/coroutines through
# the entire execution. The completion routine render_text is for a minimal amount of reporting.
def calculate(self):
self.DumpFolder = (self._config.DumpFolder or None)
self.logg = open(self._config.FailFile, mode="w+", buffering=8192)
self.logg.write("On Windows, use \"type [Filename]\" for best results (Win10) {} JIT hash log file\n".format(fg("cornflower_blue")))
# get the null hash (at runtime in case a different hash is used etc..)
null_page = bytearray(4096)
self.null_hash = self.HashPage(null_page)
addr_space = utils.load_as(self._config)
if isinstance(addr_space, volatility.plugins.addrspaces.intel.IA32PagedMemory) and not isinstance(addr_space, volatility.plugins.addrspaces.intel.IA32PagedMemoryPae):
raise "The memory model of this memory dump dates from the 1990's and does not support execute protection."
outputJobs = None
taski = 0
taskCnt = 0
tasklist = tasks.pslist(addr_space)
for _ in tasks.pslist(addr_space):
taskCnt += 1
print("{}{}{} [{}]{}".format(fg("chartreuse_1"), "pdb2json JIT PageHash calls under way... endpoint ", fg("hot_pink_1b"), self.JITHashServer, fg("sky_blue_1"), attrs=["bold"]))
bformat = "{elapsed}{l_bar}{postfix}{bar}"
self.TotalBar = tqdm(desc="{}TotalProgress".format(fg("cornflower_blue"), total=taskCnt, position=0, mininterval=0.5, bar_format=bformat))
# The timer is reset here since were not counting the coldstartup time
self.StartTime = time.time()
for task in tasklist:
taski += 1
proc_as = task.get_process_address_space()
mods = []
# Volatility workaround as there is not a consistant interface I know of
# to handle AS the same way for kernel & user
if task.UniqueProcessId == 4:
mods = list(modules.lsmod(addr_space))
proc_as = addr_space
else:
mods = list(task.get_load_modules())
TaskName = "[" + task.ImageFileName + "-" + str(task.UniqueProcessId) + "]"
taskBar = tqdm(desc=TaskName, total=len(mods), position=1, leave=False, mininterval=0.5, bar_format=bformat)
p = dict({"Name":TaskName, "Task":task, "TaskBlockCount":0, "ModContext":[], "bar":taskBar})
for mod in mods:
#@ taskBar.set_postfix_str('{} modules'.format(len(mods), refresh=True)
hashAddr = []
hashVal = []
for vpage, nx in self.mod_get_ptes(mod, proc_as):
if(nx):
continue
data = proc_as.read(vpage, 4096)
if data is None or data is self.null_hash:
continue
hashAddr.append(str(vpage))
hashVal.append(self.HashPage(data))
# these statements are yet another workaround for volatility
# for some unknown reason these data structures have never been written into Volatility...
# of course you can acquire the timestamp by reading the nt_header/fileheader/etc but that data is
# significantly lower quality given that it can be modified at any time. The kernel data structure
# remains valid unless the attacker kills the process etc... In any event (hah) since this value has never changed
# I hard coded it here for simplicity. Perhaps I should enforce always using it, will circle back 360 on that.. :O
timevalue = mod.TimeDateStamp
#this should only work for kernel space modules
if timevalue == 0 and task.UniqueProcessId == 4:
timeLoc = self.to_int64(mod.v() + 0x9c)
redInBytes = addr_space.read(timeLoc, 4)
if redInBytes is not None and len(redInBytes) == 4:
timevalue = unpack("<L", redInBytes)[0]
req_hdr = {
"ModuleName": str(mod.FullDllName or ''),
"ImageSize": str(mod.SizeOfImage),
"BaseAddress": str(mod.DllBase),
"AllocationBase": str(mod.DllBase),
"TimeDateStamp": str(int(timevalue)),
"HdrHash": self.HashPage(proc_as.read(mod.DllBase, 4096)),
"HashSet": [{"Address": a, "Hash": h} for a, h in zip(hashAddr, hashVal)]
}
if req_hdr["ModuleName"] is '':
self.logg.write("{}{}{}: Unable to scan anonymous executable memory. {:#x} length: {:#x}{}.\n".format(bg("black"), fg("yellow_2"), TaskName, mod.DllBase, mod.SizeOfImage, fg("cornflower_blue")))
filename = "{}/{}-{:#x}".format(self.DumpFolder, TaskName, mod.DllBase)
open(filename, 'w').close()
for vpage in range(mod.DllBase, mod.DllBase + mod.SizeOfImage, 4096):
data = proc_as.read(vpage, 4096)
if self.DumpFolder is not None and data is not None:
with open(filename, 'ab') as block:
block.write(bytearray(data))
else:
LocalMod = dict({"Module":mod, "Ctx":p, "ModBlockCount":hashAddr.count, "json":req_hdr, "AS":addr_space})
p["TaskBlockCount"] = p["TaskBlockCount"] + len(hashAddr)
taskBar.update(1)
self.pool.spawn(self.pozt, LocalMod)
#= [gevent.spawn(self.pozt, cx) for cx in p["ModContext"]]
#gevent.wait(outputJobs)
self.TotalBar.update(1)
# Ulong64 would be nice, this is a needed workaround
@staticmethod
def to_int64(n):
"""Kludge for 64bit unsigned type"""
n = n & ((1 << 64) - 1)
if n > (1 << 63) - 1:
n -= 1 << 64
return n
@staticmethod
def PercentToColor(Validated):
level = fg("sky_blue_1")
if Validated < 100.0:
level = fg("cornflower_blue")
if Validated < 80.0:
level = fg("light_sky_blue_3a")
if Validated < 60.0:
level = fg("yellow_2")
if Validated < 40.0:
level = fg("purple_1a")
if Validated < 20.0:
level = fg("deep_pink_4c")
if Validated < 5.0:
level = fg("red_1")
return level
# this method is really just a bunch of console I/O reporting on the service calls
def output(self, Local):
"""Output data in a nonstandard but fun and more appealing way."""
bar = Local["Ctx"]["bar"]
try:
addr_space = Local["AS"]
task = Local["Ctx"]["Task"]
req_hdr = Local["json"]
r = Local["resp"]
rj = None
moduleName = ""
if req_hdr.has_key("ModuleName"):
moduleName = req_hdr["ModuleName"]
info = "{}[{:<}]".format(fg("spring_green_2b"), ntpath.basename(moduleName))
self.ScannedMods += 1
ModBlksValidated = 0
modMissedBlocks = []
if r is not None:
if len(r) < 1:
return
rj = json.loads(r)
modPageCount = r.count("{") - 1
if modPageCount == 0:
modPageCount = 1
self.VirtualBlocksChecked += modPageCount
# parse the response in a structured way
if rj is not None:
for rarr in rj:
if rarr["HashCheckEquivalant"] is True:
ModBlksValidated += 1
self.VBValidated += 1
else:
modMissedBlocks.append(long(rarr["Address"]))
if self._config.ExtraTotals is True:
if not self.total_miss.has_key(moduleName):
self.total_miss[moduleName] = (modPageCount, ModBlksValidated)
else:
currCnt = self.total_miss[moduleName]
self.total_miss[moduleName] = (currCnt[0] + modPageCount, currCnt[1] + ModBlksValidated)
validPct = float((ModBlksValidated * 100.0) / modPageCount)
level = self.PercentToColor(validPct)
if modPageCount == 1:
if ModBlksValidated == 1:
level = fg("grey_19")
if ModBlksValidated == 0:
level = fg("grey_35")
infoLine="{:<}{:>6x}/{}{:<6x}{:<}[{:<2.2f}%]{}{}".format(fg("light_steel_blue_1"), ModBlksValidated<<12, fg("white"), modPageCount<<12, level, validPct, fg("light_green"), info)
if validPct < 100.0:
TaskName = Local["Ctx"]["Name"]
self.logg.writelines(("Failures detected: ", infoLine,"\t: ", TaskName, "\r\n", "BlockAddrs: "))
#if self._config.SuperVerbose is True:
for mb in modMissedBlocks:
# by default skip headers
if mb != req_hdr["BaseAddress"]:
self.logg.write("{:#14x} ".format(mb))
if self.DumpFolder is not None:
proc_as = task.get_process_address_space()
if task.UniqueProcessId == 4:
proc_as = addr_space
data = proc_as.read(mb, 4096)
if data is not None:
with open("{}/{}-{:#x}".format(self.DumpFolder, TaskName, mb), 'wb') as block:
block.write(bytearray(data))
self.logg.write('\n')
bar.set_postfix_str('{:<}'.format(infoLine))
bar.update(1)
except:
print_exc()
#update less frequently put this back in
#if self.TotalBar.n > self.TotalLastN:
self.TotBytesValidated = self.VBValidated << 12
self.TotalBytesChecked = self.VirtualBlocksChecked << 12
self.TotPercent = (self.VBValidated * 100.0 / self.VirtualBlocksChecked)
self.TotalLastN = self.TotalBar.n
self.TotalBar.set_postfix_str("{:<}[{:<2.3f}%]{:}[{:,}]{}{}[{:,}]{}".format(self.PercentToColor(self.TotPercent), self.TotPercent, fg("white"), self.TotBytesValidated, fg("sky_blue_1"), "/", self.TotalBytesChecked, fg("light_green")))
def render_text(self, outfd, data):
if os.name is not 'nt':
os.system('setterm -cursor on')
print "{}{}".format(fg("hot_pink_1b"), "Join in progress of any outstanding async operations.")
gevent.joinall(self.pool)
if self.VirtualBlocksChecked == 0:
print ("{}{}".format(fg("yellow_2"), "error, nothing was processed"))
else:
RuntimeSeconds = int(time.time() - self.StartTime)
print ("\r\n\r\n{}{}{}[{}]{}{}".format(fg("sky_blue_1"), "Run Time ", fg("light_green"), str(RuntimeSeconds), fg("sky_blue_1"), " seconds."))
self.TotBytesValidated = self.VBValidated << 12
self.TotalBytesChecked = self.VirtualBlocksChecked << 12
self.TotPercent = (self.VBValidated * 100.0 / self.VirtualBlocksChecked)
print ("{}{}{}[{:,}]{}{}".format(fg("sky_blue_1"), "A total of ", fg("light_green"), self.ScannedMods, fg("sky_blue_1"), " modules scanned."))
print ("{}{}[{:,}]{}{}{}[{:,}]".format("Scanned Pages: ", fg("light_green"), self.VirtualBlocksChecked, fg("sky_blue_1"), ". Pages valid: ", fg("light_green"), self.VBValidated)),
print (" {}[{:2.3f}%]{}{}{}[{:,}]{}{}{}[{:,}]".format(self.PercentToColor(self.TotPercent), self.TotPercent, fg("sky_blue_1"), " assurance. Validated bytes: ", fg("light_green"),self.TotBytesValidated, fg("sky_blue_1"), "/", fg("light_green"), self.TotalBytesChecked))
print ("{}{} {:,} {}".format(fg("white"), "Total I/O throughput:", self.TotalBytesChecked / RuntimeSeconds, "bytes per second."))
for key in self.total_miss:
miss_info = "{}{} - {}".format(fg("hot_pink_1b"), key, self.total_miss[key])
self.logg.writelines((miss_info, "\n"))
if self._config.ExtraTotals is True:
print (miss_info)
if os.name is 'nt':
os.system('color')
|
K2/Scripting
|
inVteroJitHash.py
|
Python
|
agpl-3.0
| 26,762
|
[
"BLAST"
] |
789a20dde08bb3b651f5632bdf2614f0f7c48ab05400279c837b17b831bacacf
|
from __future__ import print_function, division
import os
import numpy as np
from . import get_data_home
from .tools import sql_query
SPECCLASS = ['UNKNOWN', 'STAR', 'GALAXY', 'QSO',
'HIZ_QSO', 'SKY', 'STAR_LATE', 'GAL_EM']
NOBJECTS = 50000
GAL_COLORS_DTYPE = [('u', float),
('g', float),
('r', float),
('i', float),
('z', float),
('specClass', int),
('redshift', float),
('redshift_err', float)]
ARCHIVE_FILE = 'sdss_galaxy_colors.npy'
def fetch_sdss_galaxy_colors(data_home=None, download_if_missing=True):
"""Loader for SDSS galaxy colors.
This function directly queries the sdss SQL database at
http://cas.sdss.org/
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : recarray, shape = (10000,)
record array containing magnitudes and redshift for each galaxy
"""
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_file = os.path.join(data_home, ARCHIVE_FILE)
query_text = ('\n'.join(
("SELECT TOP %i" % NOBJECTS,
" p.u, p.g, p.r, p.i, p.z, s.specClass, s.z, s.zerr",
"FROM PhotoObj AS p",
" JOIN SpecObj AS s ON s.bestobjid = p.objid",
"WHERE ",
" p.u BETWEEN 0 AND 19.6",
" AND p.g BETWEEN 0 AND 20",
" AND s.specClass > 1 -- not UNKNOWN or STAR",
" AND s.specClass <> 5 -- not SKY",
" AND s.specClass <> 6 -- not STAR_LATE")))
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
print("querying for %i objects" % NOBJECTS)
print(query_text)
output = sql_query(query_text)
print("finished.")
data = np.loadtxt(output, delimiter=',',
skiprows=1, dtype=GAL_COLORS_DTYPE)
np.save(archive_file, data)
else:
data = np.load(archive_file)
return data
|
eramirem/astroML
|
astroML/datasets/sdss_galaxy_colors.py
|
Python
|
bsd-2-clause
| 2,595
|
[
"Galaxy"
] |
083d36dc6196342c279d8ea4f2a439f656e2ac5557c7fe661ab0a389961b9e7b
|
# --------------
# USER INSTRUCTIONS
#
# Now you will put everything together.
#
# First make sure that your sense and move functions
# work as expected for the test cases provided at the
# bottom of the previous two programming assignments.
# Once you are satisfied, copy your sense and move
# definitions into the robot class on this page, BUT
# now include noise.
#
# A good way to include noise in the sense step is to
# add Gaussian noise, centered at zero with variance
# of self.bearing_noise to each bearing. You can do this
# with the command random.gauss(0, self.bearing_noise)
#
# In the move step, you should make sure that your
# actual steering angle is chosen from a Gaussian
# distribution of steering angles. This distribution
# should be centered at the intended steering angle
# with variance of self.steering_noise.
#
# Feel free to use the included set_noise function.
#
# Please do not modify anything except where indicated
# below.
from math import *
import random
# --------
#
# some top level parameters
#
max_steering_angle = pi / 4.0 # You do not need to use this value, but keep in mind the limitations of a real car.
bearing_noise = 0.1 # Noise parameter: should be included in sense function.
steering_noise = 0.1 # Noise parameter: should be included in move function.
distance_noise = 5.0 # Noise parameter: should be included in move function.
tolerance_xy = 15.0 # Tolerance for localization in the x and y directions.
tolerance_orientation = 0.25 # Tolerance for orientation.
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks in (y, x) format.
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length = 20.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
# --------
# measurement_prob
# computes the probability of a measurement
#
def measurement_prob(self, measurements):
# calculate the correct measurement
predicted_measurements = self.sense(0) # Our sense function took 0 as an argument to switch off noise.
# compute errors
error = 1.0
for i in range(len(measurements)):
error_bearing = abs(measurements[i] - predicted_measurements[i])
error_bearing = (error_bearing + pi) % (2.0 * pi) - pi # truncate
# update Gaussian
error *= (exp(- (error_bearing ** 2) / (self.bearing_noise ** 2) / 2.0) /
sqrt(2.0 * pi * (self.bearing_noise ** 2)))
return error
def __repr__(self): #allows us to print robot attributes.
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y),
str(self.orientation))
############# ONLY ADD/MODIFY CODE BELOW HERE ###################
# --------
# move:
#
# copy your code from the previous exercise
# and modify it so that it simulates motion noise
# according to the noise parameters
# self.steering_noise
# self.distance_noise
def move(self, motion):
d=motion[1] #distance moved
a=motion[0] #angle of front wheel turn
O = self.orientation #orientation of car
L = self.length #length from back to front axle
res=robot()
res.length = self.length
res.bearing_noise = self.bearing_noise
res.steering_noise = self.steering_noise
res.distance_noise = self.distance_noise
an = random.gauss(a, self.steering_noise)
dn = random.gauss(d, self.distance_noise)
B = (dn / L) * tan(an) #angle of rotation of the car
if abs(B)<0.001: #going nearly straight
res.x = self.x + dn * cos(O)
res.y = self.y + dn * sin(O)
res.orientation = (O + B) % (2 * pi)
else:
R = dn / B #radius of rotation
cx = self.x - R * sin(O) #center points of rotation
cy = self.y + R * cos(O)
res.orientation = (O + B) % (2 * pi)
res.x = cx + R * sin(res.orientation + B)
res.y = cy - R * cos(res.orientation + B)
return res
# --------
# sense:
#
# copy your code from the previous exercise
# and modify it so that it simulates bearing noise
# according to
# self.bearing_noise
def sense(self, add_noise=1):
Z=[]
for l in landmarks:
x=l[1]
y=l[0]
dx = x - self.x
dy = y - self.y
b = atan2(dy,dx)
rb = b - self.orientation
if add_noise==1:
rb+=random.gauss(0.0, self.bearing_noise)
rb %= 2*pi
Z.append(rb)
return Z
#[x=93.476 y=75.186 orient=5.2664]
############## ONLY ADD/MODIFY CODE ABOVE HERE ####################
# --------
#
# extract position from a particle set
#
def get_position(p):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(len(p)):
x += p[i].x
y += p[i].y
# orientation is tricky because it is cyclic. By normalizing
# around the first particle we are somewhat more robust to
# the 0=2pi problem
orientation += (((p[i].orientation - p[0].orientation + pi) % (2.0 * pi))
+ p[0].orientation - pi)
return [x / len(p), y / len(p), orientation / len(p)]
# --------
#
# The following code generates the measurements vector
# You can use it to develop your solution.
#
def generate_ground_truth(motions):
myrobot = robot()
myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
Z = []
T = len(motions)
for t in range(T):
myrobot = myrobot.move(motions[t])
Z.append(myrobot.sense())
#print 'Robot: ', myrobot
return [myrobot, Z]
# --------
#
# The following code prints the measurements associated
# with generate_ground_truth
#
def print_measurements(Z):
T = len(Z)
print 'measurements = [[%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[0][0]), str(Z[0][1]), str(Z[0][2]), str(Z[0][3]))
for t in range(1,T-1):
print ' [%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[t][0]), str(Z[t][1]), str(Z[t][2]), str(Z[t][3]))
print ' [%.8s, %.8s, %.8s, %.8s]]' % \
(str(Z[T-1][0]), str(Z[T-1][1]), str(Z[T-1][2]), str(Z[T-1][3]))
# --------
#
# The following code checks to see if your particle filter
# localizes the robot to within the desired tolerances
# of the true position. The tolerances are defined at the top.
#
def check_output(final_robot, estimated_position):
error_x = abs(final_robot.x - estimated_position[0])
error_y = abs(final_robot.y - estimated_position[1])
error_orientation = abs(final_robot.orientation - estimated_position[2])
error_orientation = (error_orientation + pi) % (2.0 * pi) - pi
correct = error_x < tolerance_xy and error_y < tolerance_xy \
and error_orientation < tolerance_orientation
return correct
def particle_filter(motions, measurements, N=500): # I know it's tempting, but don't change N!
# --------
#
# Make particles
#
p = []
for i in range(N):
r = robot()
r.set_noise(bearing_noise, steering_noise, distance_noise)
p.append(r)
# --------
#
# Update particles
#
for t in range(len(motions)):
# motion update (prediction)
p2 = []
for i in range(N):
p2.append(p[i].move(motions[t]))
p = p2
# measurement update
w = []
for i in range(N):
w.append(p[i].measurement_prob(measurements[t]))
# resampling
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * mw # * 2
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
return get_position(p)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out.
##
## You can test whether your particle filter works using the
## function check_output (see test case 2). We will be using a similar
## function. Note: Even for a well-implemented particle filter this
## function occasionally returns False. This is because a particle
## filter is a randomized algorithm. We will be testing your code
## multiple times. Make sure check_output returns True at least 80%
## of the time.
## --------
## TEST CASES:
##
##1) Calling the particle_filter function with the following
## motions and measurements should return a [x,y,orientation]
## vector near [x=93.476 y=75.186 orient=5.2664], that is, the
## robot's true location.
##
##motions = [[2. * pi / 10, 20.] for row in range(8)]
##measurements = [[4.746936, 3.859782, 3.045217, 2.045506],
## [3.510067, 2.916300, 2.146394, 1.598332],
## [2.972469, 2.407489, 1.588474, 1.611094],
## [1.906178, 1.193329, 0.619356, 0.807930],
## [1.352825, 0.662233, 0.144927, 0.799090],
## [0.856150, 0.214590, 5.651497, 1.062401],
## [0.194460, 5.660382, 4.761072, 2.471682],
## [5.717342, 4.736780, 3.909599, 2.342536]]
##print particle_filter(motions, measurements)
## 2) You can generate your own test cases by generating
## measurements using the generate_ground_truth function.
## It will print the robot's last location when calling it.
##
##
##number_of_iterations = 6
##motions = [[2. * pi / 20, 12.] for row in range(number_of_iterations)]
##
##x = generate_ground_truth(motions)
##final_robot = x[0]
##measurements = x[1]
##estimated_position = particle_filter(motions, measurements)
##print_measurements(measurements)
##print 'Ground truth: ', final_robot
##print 'Particle filter: ', estimated_position
##print 'Code check: ', check_output(final_robot, estimated_position)
|
JamesNewton/cs373ParticleFilter
|
hw3_6_ParticleFilter.py
|
Python
|
gpl-3.0
| 11,789
|
[
"Gaussian"
] |
a2ff7943ddc9821a414f7a2aabe6da9e4c39f99f64f707a6070a5db230609c81
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Simple volume rendering example.
reader = vtk.vtkSLCReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/sphere.slc")
# Create transfer functions for opacity and color
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(0, 0.0)
opacityTransferFunction.AddPoint(30, 0.0)
opacityTransferFunction.AddPoint(80, 0.5)
opacityTransferFunction.AddPoint(255, 0.5)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(64.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(128.0, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(192.0, 0.0, 1.0, 0.0)
colorTransferFunction.AddRGBPoint(255.0, 0.0, 0.2, 0.0)
# Create properties, mappers, volume actors, and ray cast function
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(600, 300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(0.1, 0.2, 0.4)
renWin.Render()
i = 0
while i < 2:
j = 0
while j < 4:
idx = str(i) + "_" + str(j)
exec("volumeMapper_" + idx + " = vtk.vtkVolumeTextureMapper3D()")
eval("volumeMapper_" + idx).SetInputConnection(reader.GetOutputPort())
eval("volumeMapper_" + idx).SetSampleDistance(0.25)
eval("volumeMapper_" + idx).CroppingOn()
eval("volumeMapper_" + idx).SetUseCompressedTexture(1)
eval("volumeMapper_" + idx).SetCroppingRegionPlanes(
17, 33, 17, 33, 17, 33)
exec("volume_" + idx + " = vtk.vtkVolume()")
eval("volume_" + idx).SetMapper(eval("volumeMapper_" + idx))
eval("volume_" + idx).SetProperty(volumeProperty)
exec("userMatrix_" + idx + " = vtk.vtkTransform()")
eval("userMatrix_" + idx).PostMultiply()
eval("userMatrix_" + idx).Identity()
eval("userMatrix_" + idx).Translate(-25, -25, -25)
if (i == 0):
eval("userMatrix_" + idx).RotateX(j * 90 + 20)
eval("userMatrix_" + idx).RotateY(20)
else:
eval("userMatrix_" + idx).RotateX(20)
eval("userMatrix_" + idx).RotateY(j * 90 + 20)
eval("userMatrix_" + idx).Translate(j * 55 + 25, i * 55 + 25, 0)
eval("volume_" + idx).SetUserTransform(eval("userMatrix_" + idx))
ren1.AddViewProp(eval("volume_" + idx))
j += 1
i += 1
volumeMapper_0_0.SetCroppingRegionFlagsToSubVolume()
volumeMapper_0_1.SetCroppingRegionFlagsToCross()
volumeMapper_0_2.SetCroppingRegionFlagsToInvertedCross()
volumeMapper_0_3.SetCroppingRegionFlags(24600)
volumeMapper_1_0.SetCroppingRegionFlagsToFence()
volumeMapper_1_1.SetCroppingRegionFlagsToInvertedFence()
volumeMapper_1_2.SetCroppingRegionFlags(1)
volumeMapper_1_3.SetCroppingRegionFlags(67117057)
ren1.GetCullers().InitTraversal()
culler = ren1.GetCullers().GetNextItem()
culler.SetSortingStyleToBackToFront()
valid = volumeMapper_0_0.IsRenderSupported(volumeProperty, ren1)
if (valid == 0):
print "Required Extensions Not Supported"
sys.exit(0)
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(3.0)
renWin.Render()
def TkCheckAbort (object_binding, event_name):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
iren.Initialize()
#iren.Start()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Rendering/Volume/Testing/Python/volTM3DCompressedCropRegions.py
|
Python
|
mit
| 3,759
|
[
"VTK"
] |
9feec99f8c2ae07154de92b5b0406025d16b051c1cdcc99e7a14adfcf28cd580
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file make_cosmos_cfimage.py Makes a high SNR estimate of noise correlation function in F814W
COSMOS science stacks.
Uses a pickled list of cutout images from empty regions of noise from unrotated coadds in the COSMOS
F814W data, stored in the file NOISEIMFILE (see below). The noise images were cut from the images:
acs_I_095921+0228_unrot_sci_20.fits
acs_I_100210+0246_unrot_sci_20.fits
acs_I_100236+0209_unrot_sci_20.fits
acs_I_100249+0209_unrot_sci_20.fits
acs_I_100303+0152_unrot_sci_20.fits
provided by Alexie Leauthaud.
This list consistes of 92 NumPy arrays of various sizes and shapes, all containing no discernable
objects, visually selected from the science data.
The pickled list of objects is stored at the GREAT3 Dropbox account as it is ~90MB in size, in a
non-public folder. This folder will be happily shared with anyone who is interested, however, so
please just email barnaby.t.p.rowe@gmail.com to request sharing.
The output of stacking and averaging the correlation function from the 92 individual noise fields is
saved to CFIMFILE (see below), and some illustrative plots in linear and log space are generated in
PNG format.
"""
import os
import cPickle
import numpy as np
import galsim
# Subtract off the mean for each field explicitly (bg subtraction never perfect)?
# There does seem to be a consistent positive bg around giving a constant CF of 2.4e-7 (~2% of peak)
# that we might want to remove. See the Pull Request #366 on GalSim's Github site.
SUBTRACT_MEAN=True
NOISEIMFILE = "acs_I_unrot_sci_20_noisearrays.pkl" # Input pickled list filename
if SUBTRACT_MEAN:
CFIMFILE = "acs_I_unrot_sci_20_cf_subtracted.fits" # Output image of the correlation function
else:
CFIMFILE = "acs_I_unrot_sci_20_cf_unsubtracted.fits" # Output image of the correlation function
CFPLOTFILE = "acs_I_unrot_sci_20_cf.png" # Plot (linear) of the output CF
CFLOGPLOTFILE = "acs_I_unrot_sci_20_log10cf.png" # Plot (log) of the output CF
NPIX = 81 # Make an image of the final correlation
# function that is NPIX by NPIX
if not os.path.isfile(CFIMFILE): # If the CFIMFILE already exists skip straight through to the plots
# Read in the pickled images
noiseims = cPickle.load(open(NOISEIMFILE, 'rb'))
# Loop through the images and sum the correlation functions
hst_ncf = None
bd = galsim.BaseDeviate(12345) # Seed is basically unimportant here
for noiseim in noiseims:
noiseim = noiseim.astype(np.float64)
if hst_ncf is None:
# Initialize the HST noise correlation function using the first image
hst_ncf = galsim.CorrelatedNoise(
bd, galsim.ImageViewD(noiseim), correct_periodicity=True,
subtract_mean=SUBTRACT_MEAN)
else:
hst_ncf += galsim.CorrelatedNoise(
bd, galsim.ImageViewD(noiseim), correct_periodicity=True,
subtract_mean=SUBTRACT_MEAN)
hst_ncf /= float(len(noiseims))
# Draw and plot an output image of the resulting correlation function
cfimage = galsim.ImageD(NPIX, NPIX)
hst_ncf.draw(cfimage, dx=1.)
# Save this to the output filename specified in the script header
cfimage.write(CFIMFILE)
else:
cfimage = galsim.fits.read(CFIMFILE)
# Then make nice plots
import matplotlib.pyplot as plt
plt.clf()
plt.pcolor(cfimage.array, vmin=0.)
plt.axis((0, NPIX, 0, NPIX))
plt.colorbar()
plt.set_cmap('hot')
plt.title(r'COSMOS F814W-unrotated-sci noise correlation function')
plt.savefig(CFPLOTFILE)
plt.show()
plt.clf()
plt.pcolor(np.log10(cfimage.array + 1.e-7))
plt.axis((0, NPIX, 0, NPIX))
plt.colorbar()
plt.set_cmap('hot')
plt.title('log10 COSMOS F814W-unrotated-sci noise correlation function')
plt.savefig(CFLOGPLOTFILE)
plt.show()
|
mardom/GalSim
|
devel/external/hst/make_cosmos_cfimage.py
|
Python
|
gpl-3.0
| 4,645
|
[
"Galaxy"
] |
091444b041d039483c5b0fc2b98737791ae7de3a16783759f8975957d9599185
|
import scipy as sc
import matplotlib.pyplot as plt
import schrpy as sch
x = sc.arange(-60, 61, 0.01)
z = sc.load('eigvec.npy')
xo = sch.gaussian(3, 1, 2).func(x)
coeff = sc.matmul(xo.conj(), z)
print(coeff.shape)
print(x.shape)
fig = plt.figure()
fig.add_subplot(211)
plt.plot(sc.absolute(coeff))
plt.plot(sc.real(coeff))
plt.plot(sc.imag(coeff))
fig.add_subplot(212)
x1 = sc.matmul(z.conj(), coeff)
plt.plot(x, sc.absolute(x1))
plt.plot(x, sc.real(x1))
plt.plot(x, sc.imag(x1))
plt.show(fig)
|
tmaeda11235/schroedinger_solver
|
tests/packet.py
|
Python
|
mit
| 494
|
[
"Gaussian"
] |
e770c3dd4f43ff48714e9ffa44a7cb4951d315a9485c979e4cb02933a8ac9e2a
|
from rdkit import six
#-------------------------------------------------------------------------
# Color
#-------------------------------------------------------------------------
class Color:
"""This class is used to represent color. Components red, green, blue
are in the range 0 (dark) to 1 (full intensity)."""
def __init__(self, red=0, green=0, blue=0):
"Initialize with red, green, blue in range [0-1]."
_float = float
d = self.__dict__
d["red"] = _float(red)
d["green"] = _float(green)
d["blue"] = _float(blue)
def __setattr__(self, name, value):
raise TypeError("piddle.Color has read-only attributes")
def __mul__(self,x):
return Color(self.red*x, self.green*x, self.blue*x)
def __rmul__(self,x):
return Color(self.red*x, self.green*x, self.blue*x)
def __div__(self,x):
return Color(self.red/x, self.green/x, self.blue/x)
def __rdiv__(self,x):
return Color(self.red/x, self.green/x, self.blue/x)
def __add__(self,x):
return Color(self.red+x.red, self.green+x.green, self.blue+x.blue)
def __sub__(self,x):
return Color(self.red-x.red, self.green-x.green, self.blue-x.blue)
def __repr__(self):
return "Color(%1.2f,%1.2f,%1.2f)" % (self.red, self.green, self.blue)
def __hash__(self):
return hash( (self.red, self.green, self.blue) )
def __cmp__(self,other):
try:
dsum = 4*self.red-4*other.red + 2*self.green-2*other.green + self.blue-other.blue
except:
return -1
if dsum > 0: return 1
if dsum < 0: return -1
return 0
def toHexRGB(self):
"Convert the color back to an integer suitable for the "
"0xRRGGBB hex representation"
r = int(0xFF * self.red)
g = int(0xFF * self.green)
b = int(0xFF * self.blue)
# print "r= %d, g=%d, b = %d" % (r,b,g)
return (r<<16) + (g<<8) + b
def toHexStr(self):
return "0x%.6x" % self.toHexRGB()
def HexColor(val):
"""This class converts a hex string, or an actual integer number,
into the corresponding color. E.g., in "AABBCC" or 0xAABBCC,
AA is the red, BB is the green, and CC is the blue (00-FF)."""
if isinstance(val,six.string_types):
val = int(val,16)
factor = 1.0 / 255
return Color(factor * ((val >> 16) & 0xFF),
factor * ((val >> 8) & 0xFF),
factor * (val & 0xFF))
# color constants -- mostly from HTML standard
aliceblue = HexColor(0xF0F8FF)
antiquewhite = HexColor(0xFAEBD7)
aqua = HexColor(0x00FFFF)
aquamarine = HexColor(0x7FFFD4)
azure = HexColor(0xF0FFFF)
beige = HexColor(0xF5F5DC)
bisque = HexColor(0xFFE4C4)
black = HexColor(0x000000)
blanchedalmond = HexColor(0xFFEBCD)
blue = HexColor(0x0000FF)
blueviolet = HexColor(0x8A2BE2)
brown = HexColor(0xA52A2A)
burlywood = HexColor(0xDEB887)
cadetblue = HexColor(0x5F9EA0)
chartreuse = HexColor(0x7FFF00)
chocolate = HexColor(0xD2691E)
coral = HexColor(0xFF7F50)
cornflower = HexColor(0x6495ED)
cornsilk = HexColor(0xFFF8DC)
crimson = HexColor(0xDC143C)
cyan = HexColor(0x00FFFF)
darkblue = HexColor(0x00008B)
darkcyan = HexColor(0x008B8B)
darkgoldenrod = HexColor(0xB8860B)
darkgray = HexColor(0xA9A9A9)
darkgreen = HexColor(0x006400)
darkkhaki = HexColor(0xBDB76B)
darkmagenta = HexColor(0x8B008B)
darkolivegreen = HexColor(0x556B2F)
darkorange = HexColor(0xFF8C00)
darkorchid = HexColor(0x9932CC)
darkred = HexColor(0x8B0000)
darksalmon = HexColor(0xE9967A)
darkseagreen = HexColor(0x8FBC8B)
darkslateblue = HexColor(0x483D8B)
darkslategray = HexColor(0x2F4F4F)
darkturquoise = HexColor(0x00CED1)
darkviolet = HexColor(0x9400D3)
deeppink = HexColor(0xFF1493)
deepskyblue = HexColor(0x00BFFF)
dimgray = HexColor(0x696969)
dodgerblue = HexColor(0x1E90FF)
firebrick = HexColor(0xB22222)
floralwhite = HexColor(0xFFFAF0)
forestgreen = HexColor(0x228B22)
fuchsia = HexColor(0xFF00FF)
gainsboro = HexColor(0xDCDCDC)
ghostwhite = HexColor(0xF8F8FF)
gold = HexColor(0xFFD700)
goldenrod = HexColor(0xDAA520)
gray = HexColor(0x808080)
grey = gray
green = HexColor(0x008000)
greenyellow = HexColor(0xADFF2F)
honeydew = HexColor(0xF0FFF0)
hotpink = HexColor(0xFF69B4)
indianred = HexColor(0xCD5C5C)
indigo = HexColor(0x4B0082)
ivory = HexColor(0xFFFFF0)
khaki = HexColor(0xF0E68C)
lavender = HexColor(0xE6E6FA)
lavenderblush = HexColor(0xFFF0F5)
lawngreen = HexColor(0x7CFC00)
lemonchiffon = HexColor(0xFFFACD)
lightblue = HexColor(0xADD8E6)
lightcoral = HexColor(0xF08080)
lightcyan = HexColor(0xE0FFFF)
lightgoldenrodyellow = HexColor(0xFAFAD2)
lightgreen = HexColor(0x90EE90)
lightgrey = HexColor(0xD3D3D3)
lightpink = HexColor(0xFFB6C1)
lightsalmon = HexColor(0xFFA07A)
lightseagreen = HexColor(0x20B2AA)
lightskyblue = HexColor(0x87CEFA)
lightslategray = HexColor(0x778899)
lightsteelblue = HexColor(0xB0C4DE)
lightyellow = HexColor(0xFFFFE0)
lime = HexColor(0x00FF00)
limegreen = HexColor(0x32CD32)
linen = HexColor(0xFAF0E6)
magenta = HexColor(0xFF00FF)
maroon = HexColor(0x800000)
mediumaquamarine = HexColor(0x66CDAA)
mediumblue = HexColor(0x0000CD)
mediumorchid = HexColor(0xBA55D3)
mediumpurple = HexColor(0x9370DB)
mediumseagreen = HexColor(0x3CB371)
mediumslateblue = HexColor(0x7B68EE)
mediumspringgreen = HexColor(0x00FA9A)
mediumturquoise = HexColor(0x48D1CC)
mediumvioletred = HexColor(0xC71585)
midnightblue = HexColor(0x191970)
mintcream = HexColor(0xF5FFFA)
mistyrose = HexColor(0xFFE4E1)
moccasin = HexColor(0xFFE4B5)
navajowhite = HexColor(0xFFDEAD)
navy = HexColor(0x000080)
oldlace = HexColor(0xFDF5E6)
olive = HexColor(0x808000)
olivedrab = HexColor(0x6B8E23)
orange = HexColor(0xFFA500)
orangered = HexColor(0xFF4500)
orchid = HexColor(0xDA70D6)
palegoldenrod = HexColor(0xEEE8AA)
palegreen = HexColor(0x98FB98)
paleturquoise = HexColor(0xAFEEEE)
palevioletred = HexColor(0xDB7093)
papayawhip = HexColor(0xFFEFD5)
peachpuff = HexColor(0xFFDAB9)
peru = HexColor(0xCD853F)
pink = HexColor(0xFFC0CB)
plum = HexColor(0xDDA0DD)
powderblue = HexColor(0xB0E0E6)
purple = HexColor(0x800080)
red = HexColor(0xFF0000)
rosybrown = HexColor(0xBC8F8F)
royalblue = HexColor(0x4169E1)
saddlebrown = HexColor(0x8B4513)
salmon = HexColor(0xFA8072)
sandybrown = HexColor(0xF4A460)
seagreen = HexColor(0x2E8B57)
seashell = HexColor(0xFFF5EE)
sienna = HexColor(0xA0522D)
silver = HexColor(0xC0C0C0)
skyblue = HexColor(0x87CEEB)
slateblue = HexColor(0x6A5ACD)
slategray = HexColor(0x708090)
snow = HexColor(0xFFFAFA)
springgreen = HexColor(0x00FF7F)
steelblue = HexColor(0x4682B4)
tan = HexColor(0xD2B48C)
teal = HexColor(0x008080)
thistle = HexColor(0xD8BFD8)
tomato = HexColor(0xFF6347)
turquoise = HexColor(0x40E0D0)
violet = HexColor(0xEE82EE)
wheat = HexColor(0xF5DEB3)
white = HexColor(0xFFFFFF)
whitesmoke = HexColor(0xF5F5F5)
yellow = HexColor(0xFFFF00)
yellowgreen = HexColor(0x9ACD32)
# special case -- indicates no drawing should be done
transparent = Color(-1, -1, -1)
|
soerendip42/rdkit
|
rdkit/sping/colors.py
|
Python
|
bsd-3-clause
| 8,009
|
[
"RDKit"
] |
d0ee7c0054c80fe5f88816021b824a5874e080354a5d43745421e3936a8da3b9
|
import numpy as np
class AdaptiveFixed(object):
def __init__(self, n_inputs, n_outputs, n_neurons,
input_bits=16, state_bits=16, extra_bits=16,
decoder_offset=0, decoder_bits=16,
seed=None, learning_rate=1e-3,
has_neuron_state=True, smoothing=0):
self.input_bits = input_bits
self.state_bits = state_bits
self.extra_bits = extra_bits
self.decoder_offset = decoder_offset
self.decoder_bits = decoder_bits
assert input_bits + state_bits + extra_bits < 64
self.rng = np.random.RandomState(seed=seed)
self.compute_encoders(n_inputs, n_neurons)
self.initialize_decoders(n_neurons, n_outputs)
self.learning_rate_shift=int(round(np.log2(1.0/learning_rate))) - self.decoder_offset
self.has_neuron_state=has_neuron_state
self.input_max = (1<<self.input_bits) - 1
self.is_spiking = True
if has_neuron_state:
self.state = np.zeros(n_neurons, dtype='int64')
self.smoothing = smoothing
if smoothing > 0:
smoothing_decay = np.exp(-1.0/smoothing)
self.smoothing_shift = -int(np.round(np.log2(1-smoothing_decay)))
self.smoothing_state = np.zeros(n_outputs, dtype='int64')
def step(self, state, error):
state = np.array(state.clip(-self.input_max, self.input_max), dtype='int64')
error = np.array(error.clip(-self.input_max, self.input_max), dtype='int64')
# feed input over the static synapses
current = self.compute_neuron_input(state)
# do the neural nonlinearity
activity = self.neuron(current)
# apply the learned synapses
value = self.compute_output(activity)
# update the synapses with the learning rule
index = np.where(activity>0)
self.decoder[:,index] -= error >> self.learning_rate_shift
dec_max = 1<<(self.decoder_bits-1)-1
dec_min = -(1<<(self.decoder_bits-1))
self.decoder = np.clip(self.decoder, dec_min, dec_max)
return value
def compute_encoders(self, n_inputs, n_neurons):
# generate the static synapses
# NOTE: this algorithm could be changed, and just needs to produce a
# similar distribution of connection weights. Changing this
# distribution slightly changes the class of functions the neural
# network will be good at learning
max_rates = self.rng.uniform(0.5, 1, n_neurons)
intercepts = self.rng.uniform(-1, 1, n_neurons)
gain = max_rates / (1 - intercepts)
bias = -intercepts * gain
enc = self.rng.randn(n_neurons, n_inputs)
enc /= np.linalg.norm(enc, axis=1)[:,None]
encoder = enc * gain[:, None]
self.bias = (bias*(1<<self.state_bits)).astype('int64')
# store sign and shift rather than the encoder
self.sign = np.where(encoder>0, 1, -1)
self.shift1 = np.log2(encoder*(1<<self.extra_bits)*self.sign).astype(int)
self.shift1 += self.state_bits - self.input_bits
def initialize_decoders(self, n_neurons, n_outputs):
self.decoder = np.zeros((n_outputs, n_neurons), dtype='int64')
def compute_neuron_input(self, state):
# this should be able to be reduced to 32 bits (or even 16)
result = self.bias.astype('int64')<<self.extra_bits
for i, s in enumerate(state):
result += (self.sign[:,i]*(s.astype('int64')<<(self.shift1[:,i])))
return result>>self.extra_bits
# the above code approximates the following multiply using shifts
#return np.dot(self.encoder, state) + self.bias
def neuron(self, current):
if self.has_neuron_state:
# this is the accumulator implementation for a spike
self.state = self.state + current
self.state = np.where(self.state<0, 0, self.state)
spikes = np.where(self.state>=(1<<self.state_bits), 1, 0)
self.state[spikes>0] -= 1<<self.state_bits
else:
# this is the rng implementation for a spike
spikes = np.where(self.rng.randint(0,1<<self.state_bits,len(current))<current,
1, 0)
return spikes
def compute_output(self, activity):
decoder_access = self.decoder[:,np.where(activity>0)[0]]
if decoder_access.shape[1]>0:
value = np.sum(decoder_access, axis=1)
else:
value = np.zeros(decoder_access.shape[0], dtype=int)
if self.smoothing:
dv = (value - self.smoothing_state) >> self.smoothing_shift
self.smoothing_state += dv
value = self.smoothing_state
return value.copy() >> self.decoder_offset
|
tcstewar/minimal_adaptive_controller
|
adapt_fixed_small.py
|
Python
|
gpl-3.0
| 4,775
|
[
"NEURON"
] |
4942933640009df28d69e6b0a86ac33abbf22adbe0ef30bf5ddcf07c2efbbd42
|
__author__ = 'jeff'
import numpy as np
class EnergeticNetwork:
"""
The energetic network forms the base class for Hopfield and Boltzmann machines.
"""
def __init__(self, neuron_count):
"""
Construct the network with the specified neuron count.
:param neuron_count: The number of neurons.
"""
# The current state of the thermal network.
self.current_state = [0.0] * neuron_count
# The weights.
self.weights = np.zeros( [neuron_count*neuron_count] )
# The neuron count.
self.neuron_count = neuron_count
def add_weight(self, from_neuron, to_neuron, value):
"""
Add to the specified weight.
:param from_neuron: The from neuron.
:param to_neuron: The to neuron.
:param value: The value to add.
"""
index = (to_neuron * self.neuron_count) + from_neuron
if index >= len(self.weights):
raise IndexError("Out of range: from_neuron: {}, to_neuron: {}".format(from_neuron, to_neuron))
self.weights[index] += value
def calculate_energy(self):
"""
Calculate the energy for the network.
:return:Calculate the current energy for the network. The network will
seek to lower this value.
"""
temp_e = 0
for i in range(0,self.neuron_count):
for j in range(0, self.neuron_count):
if i != j:
temp_e += self.get_weight(i, j) * self.current_state[i] * \
self.current_state[j]
return -1 * temp_e / 2
def clear(self):
"""
Clear any connection weights.
"""
for i in range(0, len(self.weights)):
self.weights[i] = 0
def get_weight(self, from_neuron, to_neuron):
"""
Get a weight.
:param from_neuron: The from neuron.
:param to_neuron: The to neuron.
:return: The weight.
"""
index = (to_neuron * self.neuron_count) + from_neuron
return self.weights[index]
def init(self, neuron_count, weights, output):
"""
Init the network.
:param neuron_count: The neuron count.
:param weights: The weights.
:param output: The output.
"""
if neuron_count != len(output):
raise IndexError("Neuron count({}) must match output count({}).".format(neuron_count, len(output)))
if (neuron_count * neuron_count) != len(weights):
raise(Exception("Weight count({}) must be the square of the neuron count({}).".format(len(weights.length))))
self.neuron_count = neuron_count
self.weights = weights
self.current_state = [0] * neuron_count
for i in range(0,len(self.current_state)):
self.current_state[i] = output[i]
def reset(self, rnd):
"""
Reset the neural network to random weights, this is not used for Hopfield.
:param rnd: Random number generator
"""
for i in range(0, len(self.current_state)):
self.current_state[i] = 0
for i in range(0, len(self.weights)):
self.weights[i] = 0
def set_current_state(self, s):
"""
Set the current state.
:param s: The current state array.
"""
self.current_state[:] = s[:]
def set_weight(self, from_neuron, to_neuron, value):
"""
Set a weight.
:param from_neuron: The from neuron.
:param to_neuron: The to neuron.
:param value: The value to set.
"""
index = (to_neuron * self.neuron_count) + from_neuron
self.weights[index] = value
|
JPMoresmau/aifh
|
vol3/vol3-python-examples/lib/aifh/energetic.py
|
Python
|
apache-2.0
| 3,697
|
[
"NEURON"
] |
7806355474a01641e74e195b80667c29d564de75e63de27ded4962cdfdba5433
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * implement Jeff Whitaker's patch for masked variables.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
import warnings
import weakref
from operator import mul
import mmap as mm
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy._lib.six import integer_types, text_type, binary_type
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`__
for more info.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
Created for a test
>>> time = f.variables['time']
>>> print(time.units)
days since 2008-01-01
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
>>> print(f.history)
Created for a test
"""
def __init__(self, filename, mode='r', mmap=None, version=1):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
mmap = True
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR)
]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, text_type) or isinstance(values, binary_type):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions, attributes=None):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
nvoron23/scipy
|
scipy/io/netcdf.py
|
Python
|
bsd-3-clause
| 33,547
|
[
"NetCDF"
] |
697ad77b05e01975e795e2bad05fb26f71100e59464012eec0fe1488eb5c1083
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
import numpy
import pyscf.pbc.ao2mo
import pyscf.lib
from pyscf.pbc.lib import kpts_helper
DEBUG = 0
class unique_pqr_list:
#####################################################################################
# The following only computes the integrals not related by permutational symmetries.
# Wasn't sure how to do this 'cleanly', but it's fairly straightforward
#####################################################################################
def __init__(self,cell,kpts):
kconserv = kpts_helper.get_kconserv(cell,kpts)
nkpts = len(kpts)
temp = range(0,nkpts)
klist = pyscf.lib.cartesian_prod((temp,temp,temp))
completed = numpy.zeros((nkpts,nkpts,nkpts),dtype=int)
self.operations = numpy.zeros((nkpts,nkpts,nkpts),dtype=int)
self.equivalentList = numpy.zeros((nkpts,nkpts,nkpts,3),dtype=int)
self.nUnique = 0
self.uniqueList = numpy.array([],dtype=int)
ivec = 0
not_done = True
while( not_done ):
current_kvec = klist[ivec]
# check to see if it's been done...
kp = current_kvec[0]
kq = current_kvec[1]
kr = current_kvec[2]
#print "computing ",kp,kq,kr
if completed[kp,kq,kr] == 0:
self.nUnique += 1
self.uniqueList = numpy.append(self.uniqueList,current_kvec)
ks = kconserv[kp,kq,kr]
# Now find all equivalent kvectors by permuting it all possible ways...
# and then storing how its related by symmetry
completed[kp,kq,kr] = 1
self.operations[kp,kq,kr] = 0
self.equivalentList[kp,kq,kr] = current_kvec.copy()
completed[kr,ks,kp] = 1
self.operations[kr,ks,kp] = 1 #.transpose(2,3,0,1)
self.equivalentList[kr,ks,kp] = current_kvec.copy()
completed[kq,kp,ks] = 1
self.operations[kq,kp,ks] = 2 #numpy.conj(.transpose(1,0,3,2))
self.equivalentList[kq,kp,ks] = current_kvec.copy()
completed[ks,kr,kq] = 1
self.operations[ks,kr,kq] = 3 #numpy.conj(.transpose(3,2,1,0))
self.equivalentList[ks,kr,kq] = current_kvec.copy()
ivec += 1
if ivec == len(klist):
not_done = False
self.uniqueList = self.uniqueList.reshape(self.nUnique,-1)
if DEBUG == 1:
print("::: kpoint helper :::")
print("kvector list (in)")
print(" shape = ", klist.shape)
print("kvector list (out)")
print(" shape = ", self.uniqueList.shape)
print(" unique list =")
print(self.uniqueList)
print("transformation =")
for i in range(klist.shape[0]):
pqr = klist[i]
irr_pqr = self.equivalentList[pqr[0],pqr[1],pqr[2]]
print("%3d %3d %3d -> %3d %3d %3d" % (pqr[0],pqr[1],pqr[2],
irr_pqr[0],irr_pqr[1],irr_pqr[2]))
def get_uniqueList(self):
return self.uniqueList
def get_irrVec(self,kp,kq,kr):
return self.equivalentList[kp,kq,kr]
def get_transformation(self,kp,kq,kr):
return self.operations[kp,kq,kr]
######################################################
# for the invec created out of our unique list from
# the irreducible brillouin zone, we transform it to
# arbitrary kp,kq,kr
######################################################
def transform_irr2full(self,invec,kp,kq,kr):
operation = self.get_transformation(kp,kq,kr)
if operation == 0:
return invec
if operation == 1:
return invec.transpose(2,3,0,1)
if operation == 2:
return numpy.conj(invec.transpose(1,0,3,2))
if operation == 3:
return numpy.conj(invec.transpose(3,2,1,0))
|
gkc1000/pyscf
|
pyscf/pbc/mpicc/mpi_kpoint_helper.py
|
Python
|
apache-2.0
| 4,738
|
[
"PySCF"
] |
ff2e0bd11bfe911f743caba37325ebb324de162e6493406a24de136b9fa04f08
|
#!/usr/bin/env python
# Original provided by Mark Olesen
import os,sys
from platform import uname
# from __future__ import print_function
try:
from PyFoam.ThirdParty.six import print_
except ImportError:
def print_(*args):
# Simple workaround to report the failure
for a in args:
sys.stdout.write(str(a))
sys.stdout.write(" ")
sys.stdout.write("\n")
print_("PROBLEM:")
print_("'from PyFoam.ThirdParty.six import print_' did not work. Seems that this is not the correct PyFoam-library\n")
description="""
print_ setup
"""
print_("Machine info:"," | ".join(uname()))
print_()
print_("Python version:",sys.version)
print_()
print_("Python executable:",sys.executable)
print_()
if sys.version_info<(2,3):
print_("\nUnsupported Python-version (at least 2.3). Recommended is 2.6 or 2.7")
elif sys.version_info<(2,4):
print_("\nThis Python version does not support all features needed by PyFoam (get at least 2.4. Recommended is 2.6 or 2.7")
elif sys.version_info<(2,6):
print_("This version may not work anymore due to the port of PyFoam to Python 3")
elif sys.version_info<(3,):
print_("Python 2.7 is one development platform for PyFoam (along with Python 3)")
elif sys.version_info>=(3,):
print_("Python 3 is supported with PyFoam")
try:
print_("PYTHONPATH:", os.environ["PYTHONPATH"])
except KeyError:
print_("PYTHONPATH is not set")
print_()
print_("Location of this utility:",sys.argv[0])
print_()
try:
import PyFoam
import PyFoam.FoamInformation
except ImportError:
print_("PyFoam not in PYTHONPATH or regular search path. Don't see no sense in continuing")
print_("Regular Python search-path:",sys.path)
print_()
sys.exit(-1)
installed=PyFoam.FoamInformation.foamInstalledVersions()
print_("Version", PyFoam.FoamInformation.foamVersion(),
"Fork",PyFoam.FoamInformation.foamFork(),
"of the installed",len(installed),"versions:")
installedKeys=list(installed.keys())
installedKeys.sort()
formatString="%%%ds : %%s" % max([1+len(a[0])+len(a[1]) for a in installedKeys])
for k in installedKeys:
print_(formatString % (k[0]+"-"+k[1],installed[k]))
if PyFoam.FoamInformation.oldAppConvention():
print_(" This version of OpenFOAM uses the old calling convention")
print_()
print_("pyFoam-Version:",PyFoam.versionString())
# hardcodedVersion=(0,6,4,"development")
hardcodedVersion=(0,6,4)
if PyFoam.version()!=hardcodedVersion:
print_("ALERT: Reported version",PyFoam.version(),
"is different from hardcoded version",
hardcodedVersion,"-> probably inconsistent library installation")
print_()
print_("Path where PyFoam was found (PyFoam.__path__) is",PyFoam.__path__)
print_()
print_("Configuration search path:",PyFoam.configuration().configSearchPath())
print_("Configuration files (used):",PyFoam.configuration().configFiles())
libLoc={}
def testLibrary(name,
textMissing=None,
subModule=None,
textThere=None,
minVersion=None,
versionAttribute="__version__"):
global libLoc
print_("%-20s : " % name, end=' ')
try:
module=name
exec("import "+name)
if subModule:
exec("from "+name+" import "+subModule)
module=subModule
print_("Yes", end=' ')
version=None
try:
version=eval(module+"."+versionAttribute)
except AttributeError:
pass
if version:
print_("\t version:",version, end=' ')
if minVersion:
if version<minVersion:
print_("Insufficient version. At least",minVersion,
"recommended for all features",end=' ')
else:
print_("Matches required version",minVersion,end=' ')
if textThere:
print_("\t",textThere, end=' ')
print_()
libLoc[name]=eval(name+'.__file__')
return True
except ImportError:
print_("No", end=' ')
if textMissing:
print_("\t",textMissing, end=' ')
print_()
return False
except RuntimeError:
print_("Problem", end=' ')
if textMissing:
print_("\t",textMissing, end=' ')
print_()
return False
except SyntaxError:
print_("Syntax Error", end=' ')
if textMissing:
print_("\t",textMissing, end=' ')
print_()
return False
except ValueError:
print_("Value Error", end=' ')
if textMissing:
print_("\t",textMissing, end=' ')
print_()
return False
print_("\nInstalled libraries:")
testLibrary("cython","Not used. Maybe will by used later to spped up parts of PyFoam")
testLibrary("cProfile","Not a problem. Can't profile using this library")
testLibrary("docutils","Not necessary. Needed for RestructuredText to HTML conversion")
testLibrary("Gnuplot","Not a problem. Version from ThirdParty is used")
testLibrary("hotshot","Not a problem. Can't profile using this library")
testLibrary("ipdb","Not necessary. Only makes debugging more comfortable")
testLibrary("IPython",
"Not necessary. But the interactive shell may be more comfortable",
minVersion="2.0.0")
testLibrary("matplotlib","Only Gnuplot-plotting possible")
# testLibrary("matplotlib.pyplot","Only Gnuplot-plotting possible")
testLibrary("mercurial","Not a problem. Used for experimental case handling",
subModule="config",versionAttribute="util.version()")
testLibrary("nose","Only needed for running the unit-tests (developers only)")
numpyPresent=testLibrary("numpy","Plotting and data comparison won't work")
if not numpyPresent:
numpypyPresent=testLibrary("numpypy","This workaround for PyPy does not work","This seems to by PyPy")
if numpypyPresent:
numpyPresent=testLibrary("numpy","Does not work in pypy","Numpy works with workaround")
testLibrary("openpyxl","Not a problem. Only used for exporting pandas-data to Excel-files (advanced)")
testLibrary("pandas","Not a problem. Only used for handling of advanced data-handling")
testLibrary("ply","Not a problem. Version from ThirdParty is used")
testLibrary("profile","Not a problem. Can't profile using this library")
testLibrary("psyco","Not a problem. Acceleration not possible")
testLibrary("PyQt4","Only some experimental GUI-stuff relies on this",
subModule="Qt",versionAttribute="QT_VERSION_STR")
testLibrary("PyQt4.Qwt5","Only an alternate plotting back-end")
testLibrary("scipy","Not yet used. Possibly use signal-fitting etc")
testLibrary("Tkinter","Not a problem. Used for the old version of DisplayBlockmesh and some matplotlib-implementations")
testLibrary("twisted","Not yet used. Possibly reimplement MetaServer with it")
testLibrary("vtk","Not a problem. Only used for some utilities",
versionAttribute="VTK_VERSION")
testLibrary("xlwt","Not a problem. Only used for exporting pandas-data to Excel-files",
versionAttribute="__VERSION__")
testLibrary("xlrd","Not a problem. Only used for importing Excel-files to pandas-data",
versionAttribute="__VERSION__")
print_()
print_("Library locations")
for l in sorted(libLoc.keys(),key=lambda a:a.lower()):
print_("%-20s : %s" % (l,libLoc[l]))
from os import path
print_()
print_("Checking additional envirnoment variables")
def checkVar(name,description,additionalCheck):
print_("\nChecking for",name,":",description)
if name in os.environ:
print_(name,"set to",os.environ[name])
if not path.isdir(os.environ[name]):
print_("MISCONFIGURATION:",os.environ[name],"is no directory")
else:
additionalCheck(name)
else:
print_(name,"missing from environment")
def checkPyFoamLocation(name):
expectedPath=path.split(path.split(path.abspath(sys.argv[0]))[0])[0]
if not path.samefile(expectedPath,os.environ[name]):
print_("MISCONFIGURATION: PYFOAM_DIR expected to be",expectedPath)
checkVar("PYFOAM_DIR",
"Location of the PyFoam-installation. Not strictly necessary",
checkPyFoamLocation)
def checkPyFoamSiteLocation(name):
binDir=path.join(os.environ[name],"bin")
etcDir=path.join(os.environ[name],"etc")
libDir=path.join(os.environ[name],"lib")
if not path.isdir(binDir):
print_("MISCONFIGURATION: no directory",binDir,"for site-specific scripts")
else:
found=False
for p in os.environ["PATH"].split(":"):
if path.isdir(p):
if path.samefile(p,binDir):
found=True
break
if not found:
print_("MISCONFIGURATION:",binDir,"is not in the PATH",os.environ["PATH"])
else:
print_("Site-specific scripts should be added to",binDir)
if not path.isdir(etcDir):
print_("MISCONFIGURATION: no directory",etcDir,"for site-specific configurations")
else:
print_("Site-specific configurations can be added to",etcDir)
if not path.isdir(libDir):
print_("MISCONFIGURATION: no directory",libDir,"for site-specific library files")
else:
print_("Site-specific library files can be added to",libDir,
"Do NOT add to PYTHONPATH but import as PyFoam.Site")
checkVar("PYFOAM_SITE_DIR",
"Location of non-PyFoam-disctributions script. Set and used by some Foam-distributions",
checkPyFoamSiteLocation)
# Should work with Python3 and Python2
|
takaakiaoki/PyFoam
|
bin/pyFoamVersion.py
|
Python
|
gpl-2.0
| 9,582
|
[
"VTK"
] |
b1f71465434029bd9fe941633a46694b488337349fd2672aaf640e24e315c553
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import sys
import MDAnalysis
psf = sys.argv[1]
traj = sys.argv[2]
# SUBROUTINES:
# MAIN PROGRAM:
buf = 100
deltaWrite = 10.0
delta_t = 0.002
box_dim = 28.40 # xyz file does not hold this info (even though it does); for NPT, the dcd file will contain this info; must comment out this line
nf = open('einstein.dat', 'w')
# set the universe object
u = MDAnalysis.Universe('%s' %(psf), '%s' %(traj))
nAtoms = len(u.atoms)
nSteps = len(u.trajectory) # this line selects all steps to analyze (WAAAAY TOO MANY)...
maxDeltaStep = nSteps - 1 - buf
print nAtoms, nSteps
coor = np.zeros((nSteps,nAtoms, 3))
dist2AutoCorr = np.zeros((maxDeltaStep))
for ts in u.trajectory[1:]:
i = u.trajectory.frame -2
if i%100==0:
print('Reading step %d from trajectory file \n' %(i))
for atom in range(0,nAtoms):
coor[i][atom] = u.atoms[atom].position
for deltaStep in range(1,maxDeltaStep):
count = 0
sumdist2 = 0.0
if deltaStep%10==0:
print('Working on correlation for deltaStep = %10d\n' %(deltaStep))
for step1 in range(0,nSteps-deltaStep):
step2 = step1+deltaStep
for atom in range(0,nAtoms):
dist2 = 0.0
for i in range(0,3):
dist = coor[step1][atom][i] - coor[step2][atom][i]
if dist <-box_dim/2.0:
dist += box_dim
if dist >box_dim/2.0:
dist -= box_dim
dist2 += dist**2
sumdist2 += dist2
count += 1.0
dist2AutoCorr[deltaStep] = sumdist2/float(count)
nf.write('%10.6f %30.15f \n' %(deltaStep*deltaWrite*0.002, dist2AutoCorr[deltaStep]))
nf.close()
|
rbdavid/MolecDynamics
|
Analysis/MSD/Einstein.py
|
Python
|
mit
| 1,633
|
[
"MDAnalysis"
] |
4abe03b2f25978d3a6655140c958466e4e17f5bb51315275e3d59717a5a64a75
|
import matplotlib.pyplot as plt
from sunpy import map
from sunpy.map import Map
import numpy as np
import os
import copy
from skimage.feature import canny
from skimage.filters import roberts, sobel, scharr, prewitt
from scipy import ndimage
def mask_sun(array,pxval):
#This function will apply a centered filled circular mask
#of radius rad and pxval values to the input array
r=172 #Empirically the optimal radius of sun-masking circle
xc,yc=0,3# A correction of xc,yc pixels, since sun in LASCO image not centered perfectly
a,b=array.shape #The center of the circle
p=2
n=a #number of pixels in the (square) array
a/=2
a-=yc
b/=2
b-=xc
y,x = np.ogrid[-a:n-a, -b:n-b]
mask = x*x + y*y <= r*r #The mask
arr=np.copy(array)
arr[mask]=pxval #fill the circle in the array with the new values
return arr
# A test of loading and playing with LASCO 0.5 level data for the CME on 2014/09/08-2014/09/09
#Data starts at
basepath='/Users/kkozarev/LASCO/LASCO_L05_L1/'
basepath='/Users/kkozarev/LASCO/test/'
files=os.listdir(basepath)
files.pop(0)
# Load the maps for making the background
bmap1=map.Map(basepath+files[0])
bmap2=map.Map(basepath+files[1])
bmap3=map.Map(basepath+files[2])
bmap4=map.Map(basepath+files[3])
# Load the imaging maps
map1=map.Map(basepath+files[4])
map2=map.Map(basepath+files[5])
map3=map.Map(basepath+files[6])
map4=map.Map(basepath+files[7])
map5=map.Map(basepath+files[8])
map6=map.Map(basepath+files[9])
map7=map.Map(basepath+files[9])
#Prepare the background averaged map
bmap=copy.deepcopy(bmap1)
bmap.data.__iadd__(bmap2.data)
bmap.data.__iadd__(bmap3.data)
bmap.data.__iadd__(bmap4.data)
bmap.data = bmap.data / 4
#Divide each map by the background map
bckdiv=0
if bckdiv > 0:
wt = 1.0 #A scaling factor for the background map
map1.data.__idiv__(bmap.data*wt)
map2.data.__idiv__(bmap.data*wt)
map3.data.__idiv__(bmap.data*wt)
map4.data.__idiv__(bmap.data*wt)
map5.data.__idiv__(bmap.data*wt)
map6.data.__idiv__(bmap.data*wt)
#From these tests, it seems the best image to start identifying sharp edges is a base difference one.
pmap1=copy.deepcopy(map1)
pmap1.data-=bmap.data
pmap2=copy.deepcopy(map1)
pmap2.data/=bmap.data
pmap3=copy.deepcopy(map1)
pmap3.data=(pmap3.data-bmap.data)/bmap.data
#TEST1
im=map1.data
im2=map1.data-bmap.data
im2map=copy.deepcopy(map1)
im2map.data=im2
im3=mask_sun(im2,np.median(im2))
im3map=copy.deepcopy(map1)
im3map.data=im3
imr = roberts(im3)
ims = sobel(im3)
#nhim=ndimage.binary_fill_holes(im3)
n = 10
l=128
#Gaussian smooth the data
im4map=copy.deepcopy(map1)
im5map=copy.deepcopy(map1)
gaussim=ndimage.gaussian_filter(im3, sigma=l/(4.*n))
gmp=copy.deepcopy(map1)
gmp.data=gaussim
im4map.data=np.square(gaussim.astype('float'))
imc = canny(im4map.data, sigma=3, low_threshold=10000, high_threshold=20000)
im5map.data=imc
#Try Median-filtering instead
mp=copy.deepcopy(map1)
im6map=copy.deepcopy(map1)
im7map=copy.deepcopy(map1)
im4=ndimage.median_filter(im3,10)
im6map.data=im4
mp.data=np.square(im4.astype('float'))
imc = canny(mp.data, sigma=3, low_threshold=8000, high_threshold=30000)
im7map.data=imc
rbmap=copy.deepcopy(map1)
rbmap.data=roberts(mp.data)
sbmap=copy.deepcopy(map1)
sbmap.data=sobel(mp.data)
#TEST2
#im2=map1.data-bmap.data
l=1024
test=ndimage.gaussian_filter(im, sigma=l/(4.*n))
x=ndimage.median_filter(gaussim)
#TEST3
#plt.imshow(pmap1.data,vmin=-150,vmax=50, cmap=plt.get_cmap('Blues'))
#imc = canny(pmap1.data, sigma=3, low_threshold=5, high_threshold=10)
#imr = roberts(pmap1.data)
#ims = sobel(pmap1.data)
#plt.imshow(abs(pmap1.data),vmin=0,vmax=100, cmap=plt.get_cmap('Blues'))
#plt.show()
#plt.imshow((bmap4.data-bmap.data)/bmap.data,vmin=-150,vmax=50, cmap=plt.get_cmap('Blues'))
|
kkozarev/mwacme
|
src/lasco/lasco_test_lv05_1_data.py
|
Python
|
gpl-2.0
| 3,784
|
[
"Gaussian"
] |
3e9e51980f880654aa1c053783f3781df144c8b1eae63a7566b996472daefdef
|
import os
import pysam
import unittest
import collections
import copy
import array
from TestUtils import checkFieldEqual
SAMTOOLS = "samtools"
WORKDIR = "pysam_test_work"
DATADIR = "pysam_data"
class ReadTest(unittest.TestCase):
def buildRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.qualitystring_to_array("1234") * 10
# todo: create tags
return a
class TestAlignedSegment(ReadTest):
'''tests to check if aligned read can be constructed
and manipulated.
'''
def testEmpty(self):
a = pysam.AlignedSegment()
self.assertEqual(a.query_name, None)
self.assertEqual(a.query_sequence, None)
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), None)
self.assertEqual(a.flag, 0)
self.assertEqual(a.reference_id, 0)
self.assertEqual(a.mapping_quality, 0)
self.assertEqual(a.cigartuples, None)
self.assertEqual(a.tags, [])
self.assertEqual(a.next_reference_id, 0)
self.assertEqual(a.next_reference_start, 0)
self.assertEqual(a.template_length, 0)
def testStrOfEmptyRead(self):
a = pysam.AlignedSegment()
s = str(a)
self.assertEqual(
"None\t0\t0\t0\t0\tNone\t0\t0\t0\tNone\tNone\t[]",
s)
def testSettingTagInEmptyRead(self):
'''see issue 62'''
a = pysam.AlignedSegment()
a.tags = (("NM", 1),)
a.query_qualities = None
self.assertEqual(a.tags, [("NM", 1), ])
def testCompare(self):
'''check comparison functions.'''
a = self.buildRead()
b = self.buildRead()
self.assertEqual(0, a.compare(b))
self.assertEqual(0, b.compare(a))
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
b.tid = 2
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def testHashing(self):
a = self.buildRead()
b = self.buildRead()
self.assertEqual(hash(a), hash(b))
b.tid = 2
self.assertNotEqual(hash(a), hash(b))
def testUpdate(self):
'''check if updating fields affects other variable length data
'''
a = self.buildRead()
b = self.buildRead()
# check qname
b.query_name = "read_123"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345678"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345"
checkFieldEqual(self, a, b)
# check cigar
b.cigartuples = ((0, 10), )
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 10))
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
checkFieldEqual(self, a, b)
# check seq
b.query_sequence = "ACGT"
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 3
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 10
checkFieldEqual(self, a, b, ("query_qualities",))
# reset qual
b = self.buildRead()
# check flags:
for x in (
"is_paired", "is_proper_pair",
"is_unmapped", "mate_is_unmapped",
"is_reverse", "mate_is_reverse",
"is_read1", "is_read2",
"is_secondary", "is_qcfail",
"is_duplicate", "is_supplementary"):
setattr(b, x, True)
self.assertEqual(getattr(b, x), True)
checkFieldEqual(self, a, b, ("flag", x,))
setattr(b, x, False)
self.assertEqual(getattr(b, x), False)
checkFieldEqual(self, a, b)
def testUpdate2(self):
'''issue 135: inplace update of sequence and quality score.
This does not work as setting the sequence will erase
the quality scores.
'''
a = self.buildRead()
a.query_sequence = a.query_sequence[5:10]
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), None)
a = self.buildRead()
s = pysam.qualities_to_qualitystring(a.query_qualities)
a.query_sequence = a.query_sequence[5:10]
a.query_qualities = pysam.qualitystring_to_array(s[5:10])
self.assertEqual(pysam.qualities_to_qualitystring(a.query_qualities), s[5:10])
def testLargeRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 200
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 4 * 200), )
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.qualitystring_to_array("1234") * 200
return a
def testUpdateTlen(self):
'''check if updating tlen works'''
a = self.buildRead()
oldlen = a.template_length
oldlen *= 2
a.template_length = oldlen
self.assertEqual(a.template_length, oldlen)
def testPositions(self):
a = self.buildRead()
self.assertEqual(a.get_reference_positions(),
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59])
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21), (2, 22), (3, 23), (4, 24),
(5, 25), (6, 26), (7, 27), (8, 28), (9, 29),
(None, 30),
(10, 31), (11, 32), (12, 33), (13, 34), (14, 35),
(15, 36), (16, 37), (17, 38), (18, 39), (19, None),
(20, 40), (21, 41), (22, 42), (23, 43), (24, 44),
(25, 45), (26, 46), (27, 47), (28, 48), (29, 49),
(30, 50), (31, 51), (32, 52), (33, 53), (34, 54),
(35, 55), (36, 56), (37, 57), (38, 58), (39, 59)])
self.assertEqual(
a.get_reference_positions(),
[x[1] for x in a.get_aligned_pairs()
if x[0] is not None and x[1] is not None])
# alen is the length of the aligned read in genome
self.assertEqual(a.reference_length,
a.get_aligned_pairs()[-1][0] + 1)
# aend points to one beyond last aligned base in ref
self.assertEqual(a.get_reference_positions()[-1],
a.reference_end - 1)
def testFullReferencePositions(self):
'''see issue 26'''
a = self.buildRead()
a.cigar = [(4, 30), (0, 20), (1, 3), (0, 47)]
self.assertEqual(100,
len(a.get_reference_positions(full_length=True)))
def testBlocks(self):
a = self.buildRead()
self.assertEqual(a.get_blocks(),
[(20, 30), (31, 40), (40, 60)])
def test_get_aligned_pairs_soft_clipping(self):
a = self.buildRead()
a.cigartuples = ((4, 2), (0, 35), (4, 3))
self.assertEqual(a.get_aligned_pairs(),
[(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))] +
[(37, None), (38, None), (39, None)]
)
self.assertEqual(a.get_aligned_pairs(True),
# [(0, None), (1, None)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 35), range(20, 20 + 35))]
# [(37, None), (38, None), (39, None)]
)
def test_get_aligned_pairs_hard_clipping(self):
a = self.buildRead()
a.cigartuples = ((5, 2), (0, 35), (5, 3))
self.assertEqual(a.get_aligned_pairs(),
# No seq, no seq pos
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 35), range(20, 20 + 35))])
def test_get_aligned_pairs_skip(self):
a = self.buildRead()
a.cigarstring = "2M100D38M"
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21)] +
[(None, refpos) for refpos in range(22, 22 + 100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
self.assertEqual(a.get_aligned_pairs(True),
[(0, 20), (1, 21)] +
# [(None, refpos) for refpos in range(21, 21+100)] +
[(qpos, refpos) for (qpos, refpos) in zip(
range(2, 2 + 38),
range(20 + 2 + 100, 20 + 2 + 100 + 38))])
def test_get_aligned_pairs_match_mismatch(self):
a = self.buildRead()
a.cigartuples = ((7, 20), (8, 20))
self.assertEqual(a.get_aligned_pairs(),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
self.assertEqual(a.get_aligned_pairs(True),
[(qpos, refpos) for (qpos, refpos) in zip(
range(0, 0 + 40), range(20, 20 + 40))])
def test_get_aligned_pairs_padding(self):
a = self.buildRead()
a.cigartuples = ((7, 20), (6, 1), (8, 19))
def inner():
a.get_aligned_pairs()
# padding is not bein handled right now
self.assertRaises(NotImplementedError, inner)
def test_get_aligned_pairs(self):
a = self.buildRead()
a.query_sequence = "A" * 9
a.cigarstring = "9M"
a.set_tag("MD", "9")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'A'), (5, 25, 'A'),
(6, 26, 'A'), (7, 27, 'A'), (8, 28, 'A')])
a.set_tag("MD", "4C4")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'), (5, 25, 'A'),
(6, 26, 'A'), (7, 27, 'A'), (8, 28, 'A')])
a.cigarstring = "5M2D4M"
a.set_tag("MD", "4C^TT4")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'),
(None, 25, 'T'), (None, 26, 'T'),
(5, 27, 'A'), (6, 28, 'A'), (7, 29, 'A'), (8, 30, 'A')]
)
a.cigarstring = "5M2D2I2M"
a.set_tag("MD", "4C^TT2")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'c'),
(None, 25, 'T'), (None, 26, 'T'),
(5, None, None), (6, None, None),
(7, 27, 'A'), (8, 28, 'A')]
)
def test_get_aligned_pairs_skip_reference(self):
a = self.buildRead()
a.query_sequence = "A" * 10
a.cigarstring = "5M1N5M"
a.set_tag("MD", "10")
self.assertEqual(
a.get_aligned_pairs(with_seq=True),
[(0, 20, 'A'), (1, 21, 'A'), (2, 22, 'A'),
(3, 23, 'A'), (4, 24, 'A'), (None, 25, None),
(5, 26, 'A'), (6, 27, 'A'), (7, 28, 'A'),
(8, 29, 'A'), (9, 30, 'A')])
self.assertEqual(
a.get_aligned_pairs(with_seq=False),
[(0, 20), (1, 21), (2, 22),
(3, 23), (4, 24), (None, 25),
(5, 26), (6, 27), (7, 28),
(8, 29), (9, 30)])
self.assertEqual(
a.get_aligned_pairs(matches_only=True, with_seq=False),
[(0, 20), (1, 21),
(2, 22), (3, 23),
(4, 24), (5, 26),
(6, 27), (7, 28),
(8, 29), (9, 30)])
def testNoSequence(self):
'''issue 176: retrieving length without query sequence
with soft-clipping.
'''
a = self.buildRead()
a.query_sequence = None
a.cigarstring = "20M"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "20M1S"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "1S20M"
self.assertEqual(a.query_alignment_length, 20)
a.cigarstring = "1S20M1S"
self.assertEqual(a.query_alignment_length, 20)
class TestCigarStats(ReadTest):
def testStats(self):
a = self.buildRead()
a.cigarstring = None
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = "10M"
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = "10M2I2M"
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
for i, x in enumerate("MIDNSHP=X"):
a.cigarstring = "2{}".format(x)
expected = [[0] * 11, [0] * 11]
expected[0][i] = 2
expected[1][i] = 1
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
expected)
a.cigarstring = "10M"
a.set_tag("NM", 5)
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
a.cigarstring = None
self.assertEqual(
[list(x) for x in a.get_cigar_stats()],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
class TestAlignedPairs(unittest.TestCase):
filename = os.path.join(DATADIR, "example_aligned_pairs.bam")
def testReferenceBases(self):
"""reference bases should always be the same nucleotide
"""
reference_bases = collections.defaultdict(list)
with pysam.AlignmentFile(self.filename) as inf:
for c in inf.pileup():
for r in c.pileups:
for read, ref, base in r.alignment.get_aligned_pairs(
with_seq=True):
if ref is None:
continue
reference_bases[ref].append(base.upper())
for x, y in reference_bases.items():
self.assertEqual(len(set(y)), 1)
class TestTags(ReadTest):
def testMissingTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XP")
def testEmptyTag(self):
a = self.buildRead()
self.assertRaises(KeyError, a.get_tag, "XT")
def testSetTag(self):
a = self.buildRead()
self.assertEqual(False, a.has_tag("NM"))
a.set_tag("NM", 2)
self.assertEqual(True, a.has_tag("NM"))
self.assertEqual(a.get_tag("NM"), 2)
a.set_tag("NM", 3)
self.assertEqual(a.get_tag("NM"), 3)
a.set_tag("NM", None)
self.assertEqual(False, a.has_tag("NM"))
# check if deleting a non-existing tag is fine
a.set_tag("NM", None)
a.set_tag("NM", None)
def testArrayTags(self):
read = self.buildRead()
supported_dtypes = "bhBHf"
unsupported_dtypes = "lLd"
for dtype in supported_dtypes:
key = "F" + dtype
read.set_tag(key, array.array(dtype, range(10)))
ary = read.get_tag(key)
for dtype in unsupported_dtypes:
key = "F" + dtype
self.assertRaises(ValueError,
read.set_tag,
key,
array.array(dtype, range(10)))
def testAddTagsType(self):
a = self.buildRead()
a.tags = None
self.assertEqual(a.tags, [])
a.setTag('X1', 5.0)
a.setTag('X2', "5.0")
a.setTag('X3', 5)
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5)]))
# test setting float for int value
a.setTag('X4', 5, value_type='d')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0)]))
# test setting int for float value - the
# value will be rounded.
a.setTag('X5', 5.2, value_type='i')
self.assertEqual(sorted(a.tags),
sorted([('X1', 5.0),
('X2', "5.0"),
('X3', 5),
('X4', 5.0),
('X5', 5)]))
# test setting invalid type code
self.assertRaises(ValueError, a.setTag, 'X6', 5.2, 'g')
def testTagsUpdatingFloat(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')])
a.tags += [('XC', 5.0)]
self.assertEqual(a.tags,
[('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ('XC', 5.0)])
def testAddTags(self):
a = self.buildRead()
a.tags = [('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]
self.assertEqual(sorted(a.tags),
sorted([('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U')]))
a.setTag('X1', 'C')
self.assertEqual(sorted(a.tags),
sorted([('X1', 'C'), ('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
a.setTag('X2', 5)
self.assertEqual(sorted(a.tags),
sorted([('X2', 5), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add with replacement
a.setTag('X2', 10)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
# add without replacement
a.setTag('X2', 5, replace=False)
self.assertEqual(sorted(a.tags),
sorted([('X2', 10), ('X1', 'C'),
('X2', 5),
('NM', 1), ('RG', 'L1'),
('PG', 'P1'), ('XT', 'U'), ]))
def testTagParsing(self):
'''test for tag parsing
see http://groups.google.com/group/pysam-user-group/browse_thread/thread/67ca204059ea465a
'''
samfile = pysam.AlignmentFile(
os.path.join(DATADIR, "ex8.bam"),
"rb")
for entry in samfile:
before = entry.get_tags()
entry.set_tags(before)
after = entry.get_tags()
self.assertEqual(after, before)
def testMDTagMatchOnly(self):
a = self.buildRead()
# Substitutions only
a.cigarstring = "21M"
a.query_sequence = "A" * 21
a.set_tag('MD', "5C0T0G05C0G0T5")
self.assertEqual(
"AAAAActgAAAAAcgtAAAAA",
a.get_reference_sequence())
a.cigarstring = "21M"
a.query_sequence = "A" * 21
a.set_tag('MD', "5CTG5CGT5")
self.assertEqual(
"AAAAActgAAAAAcgtAAAAA",
a.get_reference_sequence())
a.cigarstring = "11M"
a.query_sequence = "A" * 11
a.set_tag('MD', "CTG5CGT")
self.assertEqual(
"ctgAAAAAcgt",
a.get_reference_sequence())
def testMDTagInsertions(self):
a = self.buildRead()
# insertions are silent in the reference sequence
a.cigarstring = "5M1I5M"
a.query_sequence = "A" * 5 + "C" + "A" * 5
a.set_tag('MD', "10")
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
a.cigarstring = "1I10M"
a.query_sequence = "C" * 1 + "A" * 10
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
a.cigarstring = "10M1I"
a.query_sequence = "A" * 10 + "C" * 1
self.assertEqual(
a.get_reference_sequence(),
"A" * 10)
def testMDTagDeletions(self):
a = self.buildRead()
a.cigarstring = "5M1D5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "5^C5")
self.assertEqual(
"A" * 5 + "C" + "A" * 5,
a.get_reference_sequence())
a.cigarstring = "5M3D5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "5^CCC5")
self.assertEqual(
"A" * 5 + "C" * 3 + "A" * 5,
a.get_reference_sequence())
def testMDTagRefSkipping(self):
a = self.buildRead()
a.cigarstring = "5M1N5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "10")
self.assertEqual(
"A" * 10,
a.get_reference_sequence())
a.cigarstring = "5M3N5M"
a.query_sequence = "A" * 10
a.set_tag('MD', "10")
self.assertEqual(
"A" * 10,
a.get_reference_sequence())
def testMDTagSoftClipping(self):
a = self.buildRead()
# softclipping
a.cigarstring = "5S5M1D5M5S"
a.query_sequence = "G" * 5 + "A" * 10 + "G" * 5
a.set_tag('MD', "5^C5")
self.assertEqual(
"A" * 5 + "C" + "A" * 5,
a.get_reference_sequence())
# all together
a.cigarstring = "5S5M1D5M1I5M5S"
a.query_sequence = "G" * 5 + "A" * 16 + "G" * 5
a.set_tag('MD', "2C2^T10")
self.assertEqual(
"AAcAATAAAAAAAAAA",
a.get_reference_sequence())
def testMDTagComplex(self):
a = self.buildRead()
a.cigarstring = "5S5M1I2D5M5S"
a.query_sequence = "G" * 5 + "A" * 11 + "G" * 5
a.set_tag('MD', "2C2^TC5")
self.assertEqual(
"AAcAATCAAAAA",
a.get_reference_sequence())
a.cigarstring = "5S5M2D1I5M5S"
a.query_sequence = "G" * 5 + "A" * 11 + "G" * 5
a.set_tag('MD', "2C2^TC5")
self.assertEqual(
"AAcAATCAAAAA",
a.get_reference_sequence())
# insertion in reference overlapping deletion in reference
# read: AACCCCA---AAA
# ref: AA----AGGGAAA
a.cigarstring = "2M4I1M3D3M"
a.set_tag("MD", "3^GGG3")
a.query_sequence = "AACCCCAAAA"
self.assertEqual(
"AAAGGGAAA",
a.get_reference_sequence())
a.cigarstring = "5M2D2I2M"
a.set_tag("MD", "4C^TT2")
a.query_sequence = "A" * 9
self.assertEqual(
"AAAAcTTAA",
a.get_reference_sequence())
class TestCopy(ReadTest):
def testCopy(self):
a = self.buildRead()
b = copy.copy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
def testDeepCopy(self):
a = self.buildRead()
b = copy.deepcopy(a)
# check if a and be are the same
self.assertEqual(a, b)
# check if they map to different objects
a.query_name = 'ReadA'
b.query_name = 'ReadB'
self.assertEqual(a.query_name, 'ReadA')
self.assertEqual(b.query_name, 'ReadB')
class TestAsString(unittest.TestCase):
def testAsString(self):
with open(os.path.join(DATADIR, "ex2.sam")) as samf:
reference = [x[:-1] for x in samf if not x.startswith("@")]
with pysam.AlignmentFile(
os.path.join(DATADIR, "ex2.bam"), "r") as pysamf:
for s, p in zip(reference, pysamf):
self.assertEqual(s, p.tostring(pysamf))
if __name__ == "__main__":
unittest.main()
|
TyberiusPrime/pysam
|
tests/AlignedSegment_test.py
|
Python
|
mit
| 25,907
|
[
"pysam"
] |
b2691163db02576632ef480c14ecc594494f7b072db6cd35eb91c95d9bb6d19a
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PTransform and descendants.
A PTransform is an object describing (not executing) a computation. The actual
execution semantics for a transform is captured by a runner object. A transform
object always belongs to a pipeline object.
A PTransform derived class needs to define the expand() method that describes
how one or more PValues are created by the transform.
The module defines a few standard transforms: FlatMap (parallel do),
GroupByKey (group by key), etc. Note that the expand() methods for these
classes contain code that will add nodes to the processing graph associated
with a pipeline.
As support for the FlatMap transform, the module also defines a DoFn
class and wrapper class that allows lambda functions to be used as
FlatMap processing functions.
"""
from __future__ import absolute_import
import copy
import inspect
import operator
import os
import sys
from google.protobuf import wrappers_pb2
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import typehints
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.trivial_inference import instance_to_type
from apache_beam.typehints.typehints import validate_composite_type_param
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
__all__ = [
'PTransform',
'ptransform_fn',
'label_from_callable',
]
class _PValueishTransform(object):
"""Visitor for PValueish objects.
A PValueish is a PValue, or list, tuple, dict of PValuesish objects.
This visits a PValueish, contstructing a (possibly mutated) copy.
"""
def visit(self, node, *args):
return getattr(
self,
'visit_' + node.__class__.__name__,
lambda x, *args: x)(node, *args)
def visit_list(self, node, *args):
return [self.visit(x, *args) for x in node]
def visit_tuple(self, node, *args):
return tuple(self.visit(x, *args) for x in node)
def visit_dict(self, node, *args):
return {key: self.visit(value, *args) for (key, value) in node.items()}
class _SetInputPValues(_PValueishTransform):
def visit(self, node, replacements):
if id(node) in replacements:
return replacements[id(node)]
return super(_SetInputPValues, self).visit(node, replacements)
class _MaterializedDoOutputsTuple(pvalue.DoOutputsTuple):
def __init__(self, deferred, pvalue_cache):
super(_MaterializedDoOutputsTuple, self).__init__(
None, None, deferred._tags, deferred._main_tag)
self._deferred = deferred
self._pvalue_cache = pvalue_cache
def __getitem__(self, tag):
return self._pvalue_cache.get_unwindowed_pvalue(self._deferred[tag])
class _MaterializePValues(_PValueishTransform):
def __init__(self, pvalue_cache):
self._pvalue_cache = pvalue_cache
def visit(self, node):
if isinstance(node, pvalue.PValue):
return self._pvalue_cache.get_unwindowed_pvalue(node)
elif isinstance(node, pvalue.DoOutputsTuple):
return _MaterializedDoOutputsTuple(node, self._pvalue_cache)
return super(_MaterializePValues, self).visit(node)
class GetPValues(_PValueishTransform):
def visit(self, node, pvalues=None):
if pvalues is None:
pvalues = []
self.visit(node, pvalues)
return pvalues
elif isinstance(node, (pvalue.PValue, pvalue.DoOutputsTuple)):
pvalues.append(node)
else:
super(GetPValues, self).visit(node, pvalues)
class _ZipPValues(_PValueishTransform):
"""Pairs each PValue in a pvalueish with a value in a parallel out sibling.
Sibling should have the same nested structure as pvalueish. Leaves in
sibling are expanded across nested pvalueish lists, tuples, and dicts.
For example
ZipPValues().visit({'a': pc1, 'b': (pc2, pc3)},
{'a': 'A', 'b', 'B'})
will return
[('a', pc1, 'A'), ('b', pc2, 'B'), ('b', pc3, 'B')]
"""
def visit(self, pvalueish, sibling, pairs=None, context=None):
if pairs is None:
pairs = []
self.visit(pvalueish, sibling, pairs, context)
return pairs
elif isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
pairs.append((context, pvalueish, sibling))
else:
super(_ZipPValues, self).visit(pvalueish, sibling, pairs, context)
def visit_list(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, (list, tuple)):
for ix, (p, s) in enumerate(zip(
pvalueish, list(sibling) + [None] * len(pvalueish))):
self.visit(p, s, pairs, 'position %s' % ix)
else:
for p in pvalueish:
self.visit(p, sibling, pairs, context)
def visit_tuple(self, pvalueish, sibling, pairs, context):
self.visit_list(pvalueish, sibling, pairs, context)
def visit_dict(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, dict):
for key, p in pvalueish.items():
self.visit(p, sibling.get(key), pairs, key)
else:
for p in pvalueish.values():
self.visit(p, sibling, pairs, context)
class PTransform(WithTypeHints, HasDisplayData):
"""A transform object used to modify one or more PCollections.
Subclasses must define an expand() method that will be used when the transform
is applied to some arguments. Typical usage pattern will be:
input | CustomTransform(...)
The expand() method of the CustomTransform object passed in will be called
with input as an argument.
"""
# By default, transforms don't have any side inputs.
side_inputs = ()
# Used for nullary transforms.
pipeline = None
# Default is unset.
_user_label = None
def __init__(self, label=None):
super(PTransform, self).__init__()
self.label = label
@property
def label(self):
return self._user_label or self.default_label()
@label.setter
def label(self, value):
self._user_label = value
def default_label(self):
return self.__class__.__name__
def with_input_types(self, input_type_hint):
"""Annotates the input type of a PTransform with a type-hint.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
validate_composite_type_param(input_type_hint,
'Type hints for a PTransform')
return super(PTransform, self).with_input_types(input_type_hint)
def with_output_types(self, type_hint):
"""Annotates the output type of a PTransform with a type-hint.
Args:
type_hint: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
validate_composite_type_param(type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_output_types(type_hint)
def type_check_inputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'input')
def infer_output_type(self, unused_input_type):
return self.get_type_hints().simple_output_type(self.label) or typehints.Any
def type_check_outputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'output')
def type_check_inputs_or_outputs(self, pvalueish, input_or_output):
hints = getattr(self.get_type_hints(), input_or_output + '_types')
if not hints:
return
arg_hints, kwarg_hints = hints
if arg_hints and kwarg_hints:
raise TypeCheckError(
'PTransform cannot have both positional and keyword type hints '
'without overriding %s._type_check_%s()' % (
self.__class__, input_or_output))
root_hint = (
arg_hints[0] if len(arg_hints) == 1 else arg_hints or kwarg_hints)
for context, pvalue_, hint in _ZipPValues().visit(pvalueish, root_hint):
if pvalue_.element_type is None:
# TODO(robertwb): It's a bug that we ever get here. (typecheck)
continue
if hint and not typehints.is_consistent_with(pvalue_.element_type, hint):
at_context = ' %s %s' % (input_or_output, context) if context else ''
raise TypeCheckError(
'%s type hint violation at %s%s: expected %s, got %s' % (
input_or_output.title(), self.label, at_context, hint,
pvalue_.element_type))
def _infer_output_coder(self, input_type=None, input_coder=None):
"""Returns the output coder to use for output of this transform.
Note: this API is experimental and is subject to change; please do not rely
on behavior induced by this method.
The Coder returned here should not be wrapped in a WindowedValueCoder
wrapper.
Args:
input_type: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint for the input type, or None if not available.
input_coder: Coder object for encoding input to this PTransform, or None
if not available.
Returns:
Coder object for encoding output of this PTransform or None if unknown.
"""
# TODO(ccy): further refine this API.
return None
def _clone(self, new_label):
"""Clones the current transform instance under a new label."""
transform = copy.copy(self)
transform.label = new_label
return transform
def expand(self, input_or_inputs):
raise NotImplementedError
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s(PTransform)%s%s%s' % (
self.__class__.__name__,
' label=[%s]' % self.label if (hasattr(self, 'label') and
self.label) else '',
' inputs=%s' % str(self.inputs) if (hasattr(self, 'inputs') and
self.inputs) else '',
' side_inputs=%s' % str(self.side_inputs) if self.side_inputs else '')
def _check_pcollection(self, pcoll):
if not isinstance(pcoll, pvalue.PCollection):
raise error.TransformError('Expecting a PCollection argument.')
if not pcoll.pipeline:
raise error.TransformError('PCollection not part of a pipeline.')
def get_windowing(self, inputs):
"""Returns the window function to be associated with transform's output.
By default most transforms just return the windowing function associated
with the input PCollection (or the first input if several).
"""
# TODO(robertwb): Assert all input WindowFns compatible.
return inputs[0].windowing
def __rrshift__(self, label):
return _NamedPTransform(self, label)
def __or__(self, right):
"""Used to compose PTransforms, e.g., ptransform1 | ptransform2."""
if isinstance(right, PTransform):
return _ChainedPTransform(self, right)
return NotImplemented
def __ror__(self, left, label=None):
"""Used to apply this PTransform to non-PValues, e.g., a tuple."""
pvalueish, pvalues = self._extract_input_pvalues(left)
pipelines = [v.pipeline for v in pvalues if isinstance(v, pvalue.PValue)]
if pvalues and not pipelines:
deferred = False
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import pipeline
from apache_beam.options.pipeline_options import PipelineOptions
# pylint: enable=wrong-import-order, wrong-import-position
p = pipeline.Pipeline(
'DirectRunner', PipelineOptions(sys.argv))
else:
if not pipelines:
if self.pipeline is not None:
p = self.pipeline
else:
raise ValueError('"%s" requires a pipeline to be specified '
'as there are no deferred inputs.'% self.label)
else:
p = self.pipeline or pipelines[0]
for pp in pipelines:
if p != pp:
raise ValueError(
'Mixing value from different pipelines not allowed.')
deferred = not getattr(p.runner, 'is_eager', False)
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import Create
# pylint: enable=wrong-import-order, wrong-import-position
replacements = {id(v): p | 'CreatePInput%s' % ix >> Create(v)
for ix, v in enumerate(pvalues)
if not isinstance(v, pvalue.PValue) and v is not None}
pvalueish = _SetInputPValues().visit(pvalueish, replacements)
self.pipeline = p
result = p.apply(self, pvalueish, label)
if deferred:
return result
# Get a reference to the runners internal cache, otherwise runner may
# clean it after run.
cache = p.runner.cache
p.run().wait_until_finish()
return _MaterializePValues(cache).visit(result)
def _extract_input_pvalues(self, pvalueish):
"""Extract all the pvalues contained in the input pvalueish.
Returns pvalueish as well as the flat inputs list as the input may have to
be copied as inspection may be destructive.
By default, recursively extracts tuple components and dict values.
Generally only needs to be overriden for multi-input PTransforms.
"""
# pylint: disable=wrong-import-order
from apache_beam import pipeline
# pylint: enable=wrong-import-order
if isinstance(pvalueish, pipeline.Pipeline):
pvalueish = pvalue.PBegin(pvalueish)
def _dict_tuple_leaves(pvalueish):
if isinstance(pvalueish, tuple):
for a in pvalueish:
for p in _dict_tuple_leaves(a):
yield p
elif isinstance(pvalueish, dict):
for a in pvalueish.values():
for p in _dict_tuple_leaves(a):
yield p
else:
yield pvalueish
return pvalueish, tuple(_dict_tuple_leaves(pvalueish))
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type, constructor):
cls._known_urns[urn] = parameter_type, constructor
def to_runner_api(self, context):
from apache_beam.runners.api import beam_runner_api_pb2
urn, typed_param = self.to_runner_api_parameter(context)
return beam_runner_api_pb2.FunctionSpec(
urn=urn,
parameter=proto_utils.pack_Any(typed_param))
@classmethod
def from_runner_api(cls, proto, context):
if proto is None or not proto.urn:
return None
parameter_type, constructor = cls._known_urns[proto.urn]
return constructor(
proto_utils.unpack_Any(proto.parameter, parameter_type),
context)
def to_runner_api_parameter(self, context):
return (urns.PICKLED_TRANSFORM,
wrappers_pb2.BytesValue(value=pickler.dumps(self)))
@staticmethod
def from_runner_api_parameter(spec_parameter, unused_context):
return pickler.loads(spec_parameter.value)
PTransform.register_urn(
urns.PICKLED_TRANSFORM,
wrappers_pb2.BytesValue,
PTransform.from_runner_api_parameter)
class _ChainedPTransform(PTransform):
def __init__(self, *parts):
super(_ChainedPTransform, self).__init__(label=self._chain_label(parts))
self._parts = parts
def _chain_label(self, parts):
return '|'.join(p.label for p in parts)
def __or__(self, right):
if isinstance(right, PTransform):
# Create a flat list rather than a nested tree of composite
# transforms for better monitoring, etc.
return _ChainedPTransform(*(self._parts + (right,)))
return NotImplemented
def expand(self, pval):
return reduce(operator.or_, self._parts, pval)
class PTransformWithSideInputs(PTransform):
"""A superclass for any PTransform (e.g. FlatMap or Combine)
invoking user code.
PTransforms like FlatMap invoke user-supplied code in some kind of
package (e.g. a DoFn) and optionally provide arguments and side inputs
to that code. This internal-use-only class contains common functionality
for PTransforms that fit this model.
"""
def __init__(self, fn, *args, **kwargs):
if isinstance(fn, type) and issubclass(fn, WithTypeHints):
# Don't treat Fn class objects as callables.
raise ValueError('Use %s() not %s.' % (fn.__name__, fn.__name__))
self.fn = self.make_fn(fn)
# Now that we figure out the label, initialize the super-class.
super(PTransformWithSideInputs, self).__init__()
if (any([isinstance(v, pvalue.PCollection) for v in args]) or
any([isinstance(v, pvalue.PCollection) for v in kwargs.itervalues()])):
raise error.SideInputError(
'PCollection used directly as side input argument. Specify '
'AsIter(pcollection) or AsSingleton(pcollection) to indicate how the '
'PCollection is to be used.')
self.args, self.kwargs, self.side_inputs = util.remove_objects_from_args(
args, kwargs, pvalue.AsSideInput)
self.raw_side_inputs = args, kwargs
# Prevent name collisions with fns of the form '<function <lambda> at ...>'
self._cached_fn = self.fn
# Ensure fn and side inputs are picklable for remote execution.
self.fn = pickler.loads(pickler.dumps(self.fn))
self.args = pickler.loads(pickler.dumps(self.args))
self.kwargs = pickler.loads(pickler.dumps(self.kwargs))
# For type hints, because loads(dumps(class)) != class.
self.fn = self._cached_fn
def with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
"""Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs:
FlatMap().with_input_types(int, int, bool)
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
super(PTransformWithSideInputs, self).with_input_types(input_type_hint)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)
def type_check_inputs(self, pvalueish):
type_hints = self.get_type_hints().input_types
if type_hints:
args, kwargs = self.raw_side_inputs
def element_type(side_input):
if isinstance(side_input, pvalue.AsSideInput):
return side_input.element_type
return instance_to_type(side_input)
arg_types = [pvalueish.element_type] + [element_type(v) for v in args]
kwargs_types = {k: element_type(v) for (k, v) in kwargs.items()}
argspec_fn = self._process_argspec_fn()
bindings = getcallargs_forhints(argspec_fn, *arg_types, **kwargs_types)
hints = getcallargs_forhints(argspec_fn, *type_hints[0], **type_hints[1])
for arg, hint in hints.items():
if arg.startswith('%unknown%'):
continue
if hint is None:
continue
if not typehints.is_consistent_with(
bindings.get(arg, typehints.Any), hint):
raise TypeCheckError(
'Type hint violation for \'%s\': requires %s but got %s for %s'
% (self.label, hint, bindings[arg], arg))
def _process_argspec_fn(self):
"""Returns an argspec of the function actually consuming the data.
"""
raise NotImplementedError
def make_fn(self, fn):
# TODO(silviuc): Add comment describing that this is meant to be overriden
# by methods detecting callables and wrapping them in DoFns.
return fn
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self.fn.default_label())
class CallablePTransform(PTransform):
"""A class wrapper for a function-based transform."""
def __init__(self, fn):
# pylint: disable=super-init-not-called
# This is a helper class for a function decorator. Only when the class
# is called (and __call__ invoked) we will have all the information
# needed to initialize the super class.
self.fn = fn
self._args = ()
self._kwargs = {}
def display_data(self):
res = {'fn': (self.fn.__name__
if hasattr(self.fn, '__name__')
else self.fn.__class__),
'args': DisplayDataItem(str(self._args)).drop_if_default('()'),
'kwargs': DisplayDataItem(str(self._kwargs)).drop_if_default('{}')}
return res
def __call__(self, *args, **kwargs):
super(CallablePTransform, self).__init__()
self._args = args
self._kwargs = kwargs
return self
def expand(self, pcoll):
# Since the PTransform will be implemented entirely as a function
# (once called), we need to pass through any type-hinting information that
# may have been annotated via the .with_input_types() and
# .with_output_types() methods.
kwargs = dict(self._kwargs)
args = tuple(self._args)
try:
if 'type_hints' in inspect.getargspec(self.fn).args:
args = (self.get_type_hints(),) + args
except TypeError:
# Might not be a function.
pass
return self.fn(pcoll, *args, **kwargs)
def default_label(self):
if self._args:
return '%s(%s)' % (
label_from_callable(self.fn), label_from_callable(self._args[0]))
return label_from_callable(self.fn)
def ptransform_fn(fn):
"""A decorator for a function-based PTransform.
Experimental; no backwards-compatibility guarantees.
Args:
fn: A function implementing a custom PTransform.
Returns:
A CallablePTransform instance wrapping the function-based PTransform.
This wrapper provides an alternative, simpler way to define a PTransform.
The standard method is to subclass from PTransform and override the expand()
method. An equivalent effect can be obtained by defining a function that
an input PCollection and additional optional arguments and returns a
resulting PCollection. For example::
@ptransform_fn
def CustomMapper(pcoll, mapfn):
return pcoll | ParDo(mapfn)
The equivalent approach using PTransform subclassing::
class CustomMapper(PTransform):
def __init__(self, mapfn):
super(CustomMapper, self).__init__()
self.mapfn = mapfn
def expand(self, pcoll):
return pcoll | ParDo(self.mapfn)
With either method the custom PTransform can be used in pipelines as if
it were one of the "native" PTransforms::
result_pcoll = input_pcoll | 'Label' >> CustomMapper(somefn)
Note that for both solutions the underlying implementation of the pipe
operator (i.e., `|`) will inject the pcoll argument in its proper place
(first argument if no label was specified and second argument otherwise).
"""
return CallablePTransform(fn)
def label_from_callable(fn):
if hasattr(fn, 'default_label'):
return fn.default_label()
elif hasattr(fn, '__name__'):
if fn.__name__ == '<lambda>':
return '<lambda at %s:%s>' % (
os.path.basename(fn.func_code.co_filename),
fn.func_code.co_firstlineno)
return fn.__name__
return str(fn)
class _NamedPTransform(PTransform):
def __init__(self, transform, label):
super(_NamedPTransform, self).__init__(label)
self.transform = transform
def __ror__(self, pvalueish, _unused=None):
return self.transform.__ror__(pvalueish, self.label)
def expand(self, pvalue):
raise RuntimeError("Should never be expanded directly.")
|
dhalperi/beam
|
sdks/python/apache_beam/transforms/ptransform.py
|
Python
|
apache-2.0
| 25,541
|
[
"VisIt"
] |
d7448b41058cfc6d22dbe80e471f27f486e59089da85e970a34f00db79df5a1f
|
"""
Functions to help identify, extract, align and construct a phylogeny for a target gene.
The primary purpose of this module is for a user to confirm orthology of their gene, not to understand in detail the phylogenetic relationships between all closely related proteins.
"""
from math import floor
import os
import re
from Bio import AlignIO
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import numpy as np
####### Classes ################################
class Consensus():
def __init__(self, fastafile):
self.fastafile = fastafile
# collect all sequences and find longest:
self.all_seqs = {}
maxlen = 0
for defline, seq, species in get_gene_fastas(fastafile=self.fastafile):
self.all_seqs[defline] = seq
if len(seq) > maxlen:
maxlen = len(seq)
self.maxlen = maxlen
# initiate creation of consensus sequences
self.consensus_pc()
def __rep__(self):
return '%r' % self.fastafile
def __str__(self):
return "Number of sequences: %d\nMax alignment sequence length = %d" % (len(self.all_seqs), self.maxlen)
def consensus_pc(self, keep_gaps=False):
# iterate through each position and get percentage of highest represented marker
self.consensus = {}
self.consensus_pc = {}
for i in range(self.maxlen):
counts = {'A':0, 'B':0, 'C':0, 'D':0, 'E':0, 'F':0, 'G':0,
'H':0, 'I':0, 'J':0, 'K':0, 'L':0, 'M':0, 'N':0,
'O':0, 'P':0, 'Q':0, 'R':0, 'S':0, 'T':0, 'U':0,
'V':0, 'W':0, 'X':0, 'Y':0, 'Z':0, '-':0, 'null':0,
}
for d in self.all_seqs:
try:
counts[self.all_seqs[d][i].upper()] += 1
except KeyError:
counts['null'] += 1
except IndexError:
counts['null'] += 1
if not keep_gaps:
gaps = counts['-']
del counts['-']
nulls = counts['null']
del counts['null']
max_aa_count = max(counts.values())
self.consensus[i] = [ aa for aa in counts if counts[aa] == max_aa_count ]
self.consensus_pc[i] = (1.0 * max_aa_count / (sum(counts.values())))
def make_sliding_consensus(self, window=20, window_pc=False):
"""
Creates the global average consensus across a specified sliding window distance
"""
if window_pc:
window = len(self.consensus_pc.values()) * window / 100.0
self.sliding_cons = {}
for i in range(len(self.consensus_pc.values())):
start = int(i - window / 2.0)
if start < 0 :
start = 0 # negative values will mess up the slicing
end = int(i + window / 2.0)
self.sliding_cons[i] = np.mean( self.consensus_pc.values()[start:end])
def make_local_sliders(self, window=20, window_pc=False):
"""
Creates the average level of consensus for each sequence across the specified
window size.
NB: if keep_gaps is false for the consensus, then a sequence with a perfect
consensus match, but a few gaps, will have lower scores in those positions
flanking the gaps, as a gap will not be considered a consensus sequence, and
therefore score 0 when calculating the percentage.
"""
if window_pc:
window = len(self.consensus_pc.values()) * window / 100.0
self.sliding_local = { seq:[] for seq in self.all_seqs }
for seq in self.all_seqs:
for i,bp in enumerate(self.all_seqs[seq]):
start = int(i - window / 2.0)
if start < 0 :
start = 0 # negative values will mess up the slicing
end = int(i + window / 2.0)
# calculate what percentage of local sites match the consensus
# (note the consideration for multiple sequences --> "l in c"
idx_pc = sum( 1 for l,c in zip(
self.all_seqs[seq][start:end],
self.consensus.values()[start:end],
) if l in c
) / float(end - start)
self.sliding_local[seq].append(idx_pc)
class HMMer():
"""
A class for parsing HMMer results files.
this class efficiently parses the hmmer result file, making all
the different elements available for use.
INPUT:
handle to hmmer result file.
ATTRIBUTES:
query: query name
target: list of significant target matches
domain_seq: dictionary of domain sequences for each domain for each target
domain_prb: domain probability scores for each alignment
stats: indexed by target -->
{'eval' : Evalue for target
'score' : HMM score for target
'bias' : target bias
'dom_e' : e-value for best domain
'dom_s' : score for best domain
'dom_b' : bias for best domain
'dom_exp': number of expected domains
'dom_no' : actual number of domains found
'desc' : description of target (from original defline)
}
domain_stats: indexed by target then by domain (starting at 1) -->
{'score'
'bias'
'c-eval'
'i-eval'
'hmmfrom'
'hmmto'
'alifrom'
'alito'
'envfrom'
'envto'
'acc'
}
"""
def __init__(self, hmmer_handle):
self.query = None
self.stats = {}
self.targets = []
self.domain_seq = {} # the sequence of each domain
self.domain_aln = {} # the alignment summary for each domain
self.domain_prb = {} # the probability score for each domain
self.domain_stats = {} # the result scores for each domain (indexed by target then dom)
# initialise variables:
current_target = None
for line in hmmer_handle:
if len(line) == 0 or line[0] == '#':
continue
else:
cols = line.split()
if len(cols) == 0:
continue
if cols[0:5] == ['Domain', 'annotation', 'for', 'each', 'sequence']:
in_complete = False
in_annotation = True
elif cols[1:3] == ['inclusion', 'threshold']:
in_complete = False
in_annotation = False
elif not self.query and cols[0] == 'Query:':
self.query = cols[1]
in_complete = True
in_annotation = False
if in_complete:
if is_number(cols[0]):
# load the search scores into the stats dic
self.stats[cols[8]] = { 'eval' :float(cols[0]),
'score' :float(cols[1]),
'bias' :float(cols[2]),
'dom_e' :float(cols[3]),
'dom_s' :float(cols[4]),
'dom_b' :float(cols[5]),
'dom_exp':float(cols[6]),
'dom_no' :int(cols[7]),
'desc' :" ".join(cols[9:])}
self.targets.append(cols[8])
else:
continue
elif in_annotation:
if cols[0] == '>>': # ie, new target reached
# update stats for last domain
if current_target and domain_counter > 0:
self.domain_seq[current_target][domain_counter] = tseq
self.domain_prb[current_target][domain_counter] = prob
# initialise variables and libraries
current_target = cols[1]
domain_counter = 0
self.domain_stats[current_target] = {}
self.domain_seq[current_target] = {}
self.domain_prb[current_target] = {}
elif cols[0] == '==':
# update stats for last domain
if current_target and domain_counter > 0:
self.domain_seq[current_target][domain_counter] = tseq
self.domain_prb[current_target][domain_counter] = prob
# update counter
domain_counter += 1
# reset variables
tseq = ""
algn = ""
prob = ""
elif cols[:4] == ['Internal', 'pipeline', 'statistics', 'summary:']:
# end of file wrap up #
# update stats for last domain
if current_target and domain_counter > 0:
self.domain_seq[current_target][domain_counter] = tseq
self.domain_prb[current_target][domain_counter] = prob
break
elif is_number(cols[0]) and domain_counter == 0: # parse domain result table
self.domain_stats[current_target][int(cols[0])] = {'score':float(cols[2]),
'bias' :float(cols[3]),
'c-eval' :float(cols[4]),
'i-eval' :float(cols[5]),
'hmmfrom' :int(cols[6]),
'hmmto' :int(cols[7]),
'alifrom' :int(cols[9]),
'alito' :int(cols[10]),
'envfrom' :int(cols[12]),
'envto' :int(cols[13]),
'acc' :float(cols[15])}
else:
if cols[0] == self.query: # query sequence
continue
elif cols[0] == current_target:
tseq += cols[2]
elif cols[-1] == 'PP': # probability scores
prob += cols[0]
## TODO: Figure out how to efficiently parse the alignment string
else:
continue
hmmer_handle.close()
def __rep__(self):
return "%r" % self.query
def __str__(self):
return "%s:\n%s targets" % (self.query, len(self.targets))
####### File conversion ########################
def phylipise(species, number, size=8):
padding = size - len(species) - len(str(number))
if padding < 1:
unpaddedname = "%s%s" % (species, number)
shortname = unpaddedname[:10]
else:
shortname = "%s%s%s" % (species, "0" * padding, number)
return shortname
def make_phylip(fastaalignment, logfile):
"Convert a fasta file alignment to phylip format"
phylip_alignment = logfile[:-3] + 'phylip'
input_handle = open(fastaalignment, 'rb')
output_handle = open(phylip_alignment, 'w')
alignment = AlignIO.read( input_handle, "fasta")
AlignIO.write(alignment, output_handle, "phylip")
input_handle.close()
output_handle.close()
return phylip_alignment
def fix_leaky_pipes(genename):
return genename.replace("|", "\|")
def remove_illegal_characters(defline):
"""RaXML does not allow certain characters in the taxon name (as determined by the
defline in the fasta file). This function takes the first whitespace separated
'word', and converts all remaining illegal characters into underscores."""
illegal_chars = [":", ",", ")", "(", ";", "]", "[", "'" ]
if re.match(">", defline):
newname = defline[1:].split()[0]
else:
newname = defline.split()[0]
for char in illegal_chars:
newname = newname.replace(char, "_")
return newname
####### fasta file operations ##################
def count_genes(genes=[], fastafile=None):
"evaluates the number of genes provided between a gene list and a fasta file"
genenum = 0
if not isinstance(genes,list):
genes = [genes]
genes = [ g for g in genes if g != '' ]
if fastafile:
# count number of genes provided:
handle = os.popen("grep -c '^>' " + fastafile)
result = re.search("(\d*)", handle.readline())
handle.close()
if result:
try:
genenum = int(result.group(1))
except ValueError:
genenum = 2
print "ValueError calculating genenum"
"""
putting one will ensure hmmer model is built if there is an error
counting the number of genes in the fasta file
"""
else:
genenum = 2
print "No result found for genenum"
return len(genes), genenum
def parsefasta(fastafile, verbalise=lambda *a: None):
"""
creates a generator that yields each successive sequence and defline.
"""
handle = open(fastafile, 'rb')
seq = ""
for line in handle:
if line[0] == '>':
seq = seq.replace(" ","")
if is_validfasta(seq, verbalise=verbalise):
yield defline, seq
defline = line.strip()
seq = ""
else:
seq += line.strip()
else:
seq = seq.replace(" ","")
if is_validfasta(seq, verbalise=verbalise):
yield defline, seq
def is_validfasta(seq, verbalise=lambda *a: None):
if len(seq) == 0:
return False
badcharacters = re.findall('[\(\)\!\@\#\$\%\^\&\>\<\\\|\/\:\;]',seq)
if badcharacters:
verbalise("R", "Invalid characters found in fastafile: %s" % " ".join(set(badcharacters)))
return False
else:
return True
def find_gene(fastafile, gene, verbalise=lambda *a: None):
genename = fix_leaky_pipes(gene)
for defline, seq in parsefasta(fastafile):
if re.search( '(\W)?' + genename + '([\W\s].*)?$', defline): # '[\s\|\$]'
return defline, seq
else:
return None, None
def find_genes(fastafiles, genes, verbalise=lambda *a: None):
"""
This function allows extraction of multiple genes, hopefully to speed up large
iterative searches that relied on the older find_gene function.
NB: this function must return a different format than the old one, as it is dealing
with multiple genes. It therefore returns a dictionary keyed by defline.
The iterations will stop as soon as the number of entries saved in the dictionary
is the same as the number of entries provided. If it reaches the end of the file, then
it will return the dictionary as it stands (which may be empty, if no matches were
found).
the duplicates feature has only limited insurance - it will only flag duplicates found
while still searching through the fasta files, but because the search ends once the
size of the dictionary matches the number of genes requested, any duplicates that
exist AFTER the last sequence parsed will not be identified as duplicates.
"""
if isinstance(genes, str):
genes = [genes]
genenames = [ fix_leaky_pipes(gene) for gene in genes ]
if isinstance(fastafiles, str):
fastafiles = [fastafiles]
dup_idx = {}
duplicates = [] # to store genenames that are found more than once
genedic = {}
for fastafile in fastafiles:
for defline, seq in parsefasta(fastafile):
for g in genenames:
if re.search( '(\W)?' + g + '([\W\s].*)?$', defline): # '[\s\|\$]'
# duplicate insurance check:
if g in dup_idx:
duplicates.append(g)
dup_idx[g].append(defline)
else:
dup_idx[g] = [defline]
genedic[defline] = seq
if len(genedic) == len(genenames):
return genedic
else:
# remove duplicates that were found:
for g in duplicates:
for defline in dup_idx[g]:
del genedic[defline]
return genedic
def get_gene_fastas(genes=None, fastafile=None,
startpos=0, endpos=None,
specieslist = [], species=None,
comment=None, short=False,
dbpaths={}, verbalise=(lambda *a: None)):
"""
Can either be given as a transcript name to be searched within the peptide databases,
or can be a fasta file.
Function used to return the species as the third element in the tuple, but no longer
does so. The third None still exists so the dependent functions don't break. This will
hopefully be removed in a future upgrade.
"""
if genes:
if dbpaths=={} or specieslist == []:
verbalise("R", "No database or specieslist provided :(")
yield None, None, None
raise StopIteration
# extract all sequences:
if species in specieslist:
reportedspecies = species
try:
seqdic = find_genes(dbpaths[species + '_lpep'], genes, verbalise=verbalise)
except KeyError:
seqdic = find_genes(species, genes, verbalise=verbalise)
else:
seqdic = find_genes([dbpaths[sp + '_lpep'] for sp in specieslist],
genes,
verbalise=verbalise)
for defline, seq in seqdic.items():
# create fasta file from extracted sequence:
if seq:
if short:
name = phylipise(defline, short)
defline = ">%s %s" % (name, comment)
elif comment:
defline = "%s %s" % (defline, comment)
else:
defline = "%s" % (defline)
yield defline, seq[startpos:endpos], None
if fastafile:
for defline, seq in parsefasta(fastafile, verbalise=verbalise):
yield defline, seq[startpos:endpos], None
if not genes and not fastafile:
yield None, None, None
def rank_scores(homologlist, thresh1=0, thresh2=None, genename=None, outfile=None, showplot=False):
yvalues = sorted([val[1] for val in homologlist.values()], reverse=True)
plt.plot(yvalues)
score_cutoff = thresh1 * max(yvalues)
sample_cutoff = sum(1 for s in yvalues if s >= thresh1 * max(yvalues))
plt.axhline( score_cutoff , color='r' )
if thresh2:
plt.axhline( thresh2 * max(yvalues) , color='r' )
plt.axvline( sample_cutoff -1 , color='g' )
plt.text(sample_cutoff + 1,score_cutoff + 10 , "(%d,%d)" % (sample_cutoff,score_cutoff) )
plt.xlabel("Gene rank")
plt.ylabel("Phmmer score")
plt.title("Ranking of phmmer scores for alignment with %s" % genename)
if outfile:
plt.savefig(outfile, format='png')
if showplot:
plt.show()
else:
plt.close()
def find_holes(seq):
allholes = re.findall('([A-Za-z]+)(-{50,}[A-Za-z])', seq)
dists = []
for hole in allholes:
dists.append(len(hole[0]))
dists.append(len(hole[1]))
return dists
def find_biggest_hole(seq):
allholes = re.findall('-+', seq)
if len(allholes) > 0:
biggesthole = len(sorted(allholes)[-1])
pattern = '(.+)-{' + str(biggesthole) + '}(.+)'
bigsearch = re.search(pattern, seq)
return len(bigsearch.group(1)), biggesthole, len(bigsearch.group(2))
else:
return 0,0,len(seq)
def get_pcmatch(seq):
if len(seq) == 0:
return 0, 0
minigaps = len(re.findall('-', seq))
width = len(seq)
matches = width - minigaps
assert matches >= 0
pcmatch = 1.0 * matches / width
return width, pcmatch
def display_alignment(fastafile, conversiondic={}, outfile=None, showplot=True,
gapthresh=0.05, domain_prb=None, domain_stats=None):
fig = build_alignment(fastafile, conversiondic, gapthresh=gapthresh,
domain_prb=domain_prb, domain_stats=domain_stats)
if outfile:
fig.savefig(outfile, format='png')
if showplot:
fig.show()
else:
plt.close()
def get_graphing_name(defline, conversiondic={}, truncate_name=False):
namesearch = re.search('^>*(\S+)', defline)
if namesearch:
genename = namesearch.group(1)
else:
genename = defline.strip()
if genename in conversiondic:
fullname = conversiondic[genename][0]
else:
fullname = genename
if truncate_name and len(fullname) > 11:
graphingname = "...".join([genename[:6],genename[-5:]])
else:
graphingname = fullname
return graphingname
def build_alignment(fastafile, conversiondic={}, img_width=10, gapthresh=0.05,
truncate_name=False, graph_style='consensus',
domain_prb=None, domain_stats=None, window=20):
"""
Draws an alignment graph with coloring providing one of several different schemes.
graph_style can be 'consensus', 'amino', 'domains', or 'block' ('pfam' coming soon!)
'block':
Draw an alignment graph in the vein of BLAST alignment results on NCBI.
colour scale represents the percentage of alignment positions filled with actual
sequence, but does not represent the fit of that alignment. This is indicated by
adding a consensus bar at the bottom - high consensus meaning most amino acids/base
pairs are identical in a given sliding window.
'domains':
Colored regions indicate the HMM alignment domains, with the color related to the
probability score at that position.
"""
# set similarity-based color scheme:
nrml = mpl.colors.Normalize(vmin=0, vmax=1)
sm = plt.cm.ScalarMappable(cmap=cm.jet, norm=nrml)
sm._A = []
#get consensus percentages for each position:
consensus = Consensus(fastafile)
# calculate sliding average:
consensus.make_sliding_consensus(window)
sliding_colors = sm.to_rgba(consensus.sliding_cons.values())
consensus.make_local_sliders(window)
# setting color maps:
if graph_style == 'amino':
graph_title = "Peptide alignment showing amino acid sequence"
# color based on the peptide sequence
acma = {'A':[200,200,200,256], 'B':[0,0,0,256], 'C':[230,230,0,256],
'D':[230,10,10,256],
'E':[230,10,10,256], 'F':[50,50,170,256], 'G':[235,235,235,256],
'H':[130,130,210,256],'I':[15,130,15,256], 'J':[0,0,0,256],
'K':[20,90,255,256], 'L':[15,130,15,256], 'M':[230,230,0,256],
'N':[0,220,220,256], 'O':[0,0,0,256], 'P':[220,150,130,256],
'Q':[0,220,220,256], 'R':[20,90,255,256], 'S':[250,150,0,256],
'T':[250,150,0,256], 'U':[0,0,0,256], 'V':[15,130,15,256],
'W':[180,90,180,256], 'X':[0,0,0,256], 'Y':[50,50,170,256],
'Z':[0,0,0,256], '-':[256,256,256,0], 'null':[256,256,256,0],
}
acm = {}
for aa in acma:
acm[aa] = [ n/256.0 for n in acma[aa] ]
# assign colors to each sequence based on amino acid sequence:
colorme = { k:[] for k in consensus.all_seqs }
for defline, seq in consensus.all_seqs.items():
for aa in seq:
colorme[defline].append(acm[aa])
elif graph_style == 'consensus':
graph_title = "Peptide alignment with similarity to consensus"
# assign colors to each sequence based on percentage consensus:
colorme = { k:[] for k in consensus.all_seqs }
for defline, seq in consensus.all_seqs.items():
for i,pc in enumerate(consensus.sliding_local[defline]):
if consensus.all_seqs[defline][i] == '-':
colorme[defline].append((1.0,1.0,1.0,0.0))
else:
colorme[defline].append(sm.to_rgba(pc))
elif graph_style == 'domain':
graph_title = "Peptide alignment with HMMer domain probabilities"
# assign colors to each sequence based on domain match probability:
colorme = { k:[] for k in consensus.all_seqs }
# more error catching to do:
#if colorme.keys()[0] not in domain_prb:
# colorme = { k[1:]:[] for k in consensus.all_seqs }
for defline, seq in consensus.all_seqs.items():
# create consensus probabilities:
probstring = {}
# catch error from the unknown passing of different gene names (to be tracked)
if defline not in domain_stats:
domain_stats = { '>'+k:v for k,v in domain_stats.items() }
domain_prb = { '>'+k:v for k,v in domain_prb.items() }
# create lists of probabilites that span the whole sequence:
for dom,dom_stats in domain_stats[defline].items():
domstart = dom_stats['alifrom']
dom_end = dom_stats['alito']
probs = probstr_to_floats(domain_prb[defline][dom])
probstring[dom] = [0] * (domstart - 1) + probs + \
[0] * (len(seq.replace("-","")) - dom_end)
# merge the probabilities (by keeping the best match in all positions)
maxprobs = [ max(t) for t in zip(*probstring.values()) ]
# create a sliding window average for smoother viewing:
slidingprobs = list(np.convolve(maxprobs, np.ones((window,))/window)[(window-1):])
# assign the color for each position in the aligned sequence:
for i, aa in enumerate(seq):
if aa == '-':
colorme[defline].append((1.0,1.0,1.0,0.0))
else:
colorme[defline].append(sm.to_rgba(slidingprobs.pop(0)))
if graph_style in ['consensus', 'amino', 'domain']:
# get coords for alignment (also sort sequences alphabetically):
graphingnames = {
defline:get_graphing_name(
defline,
conversiondic,
True
) for defline in consensus.all_seqs }
keynames = sorted([ (graphingnames[d],d) for d in colorme ],
reverse=True,
key=lambda x: x[0])
name_pos = np.arange(len(colorme)) + 0.5
y_frame = { k:y for k,y in zip(keynames, name_pos)}
# set all plotting values into lists for loading into plt.barh:
y_pos, lefts, widths, colors, bh_lefts, bh_widths = [], [], [], [], [], []
for ((gname, defline), namepos) in y_frame.items():
y_pos += [namepos] * len(consensus.all_seqs[defline])
lefts += range(len(consensus.all_seqs[defline]))
widths += [1] * len(consensus.all_seqs[defline])
colors += colorme[defline]
elif graph_style == 'block':
graph_title = "Peptide alignment with gap densities"
# the original gap-based color scheme:
# find gaps:
graph_points = {}
for defline, seq in consensus.all_seqs.items():
# determine the smallest reportable gap size is:
repgap = int(gapthresh * len(seq))
# get distances and coverage percentages
points = re.search('^(-*)(\S+[A-Za-z])(-*)$', seq)
if points:
pattern = '-{' + str(repgap) + ',}'
fragments = re.findall(pattern, points.group(2))
# set starting pos to beginning of matching sequence:
spos = len(points.group(1))
if len(fragments) > 0:
"""
dists is a list of tuples, each tuple containing the start position of
a large gap, the length of the gap, the start of the preceding non-gap
fragment, its width and the % match.
"""
dists = []
for frag in fragments:
nextgap = seq.find(frag, spos)
width, pcmatch = get_pcmatch(seq[spos:nextgap])
dists.append((nextgap,len(frag), spos, width, pcmatch))
spos = nextgap + len(frag)
else:
lastfrag = points.group(3)
nextgap = len(seq) - len(lastfrag)
width, pcmatch = get_pcmatch(seq[spos:nextgap])
dists.append((0,0, spos, width, pcmatch))
else:
width, pcmatch = get_pcmatch(points.group(2))
dists = [(0,0,spos, width, pcmatch)]
else:
dists = [(0,0,0,1,0)]
# get name (convert if possible):
graphingname = get_graphing_name(defline, conversiondic, True)
graph_points[graphingname] = dists
# get coords for alignment:
keynames = sorted(graph_points.keys(), reverse=True)
name_pos = np.arange(len(graph_points)) + 0.5
y_frame = { k:y for k,y in zip(keynames, name_pos)}
y_pos, lefts, widths, colors, bh_lefts, bh_widths = [], [], [], [], [], []
for k in keynames:
for dists in graph_points[k]:
y_pos.append(y_frame[k])
lefts.append(dists[2])
widths.append(dists[3])
colors.append(sm.to_rgba(dists[4]))
#bh_lefts.append(dists[0])
#bh_widths.append(dists[1])
keynames = [ (n,n) for n in keynames ] # to make compatible with amino and consensus
# plot graph:
"""
if 30 > len(keynames) :
fig = plt.figure(figsize=(img_width,img_width*1))
elif 60 > len(keynames) >= 30:
fig = plt.figure(figsize=(img_width,img_width*2))
elif 90 > len(keynames) >= 60:
fig = plt.figure(figsize=(img_width,img_width*3))
else:
"""
fig = plt.figure(figsize=(img_width,int(len(keynames)/3) + 3))
# plot alignments:
ax1 = plt.subplot2grid((12,10),(0,0), colspan=9, rowspan=9)
plt.barh(left=lefts,
width=widths,
bottom=y_pos,
height=0.8,
color=colors,
edgecolor=colors)
#if graph_style == 'block':
# plt.barh(left=bh_lefts, width=bh_widths, bottom=y_pos, height=0.8, color='white',
# alpha=0.5)
plt.yticks(name_pos + 0.4, [k[0] for k in keynames])
plt.xlabel("position (aa)")
plt.title(graph_title)
plt.tight_layout()
# plot legend:
ax2 = plt.subplot2grid((12,10),(0,9), colspan=1,rowspan=5)
cb1 = mpl.colorbar.ColorbarBase(ax2, cmap=cm.jet, norm=nrml, orientation='vertical')
plt.tight_layout()
# plot consensus colors:
ax3 = plt.subplot2grid((12,10),(9,0), colspan=9,rowspan=2)
size = consensus.maxlen
plt.barh(left=range(size), bottom=[1]*size,
height=[0.8]*size, width=[1]*size,
color=sliding_colors,
edgecolor=sliding_colors,)
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.xlabel("20 aa sliding global average consensus (%)")
plt.tight_layout()
return fig
####### hmmer functions ########################
def parse_the_hmmer(handle):
"""
####### DEPRECATED ######
parses the protein matches from a hmmer search and returns a dictionary of peptides
and their associated score and p-value.
"""
parse_dic = {}
lcount = 0
collected = 0
for line in handle:
lcount += 1
if line[0] in ['#', 'Q', 'D', 'S']:
continue
elif len(line) < 2:
continue
elif line[0] == '>':
break
elif line.split()[1] in ['hits', 'inclusion', 'annotation']:
break
else:
try:
score = float(line.split()[1])
pvalue = eval(line.split()[0])
except ValueError:
continue
else:
parse_dic[line.split()[8]] = (pvalue, score)
handle.close()
return parse_dic
####### Phylogeny creation/manipulation ########
def rename_newick(raxml_final, conversiondic={}):
#replace short names in newick file with full names
if os.path.exists(raxml_final):
handle = open(raxml_final, 'rb')
newfile = raxml_final[:-3] + "scored.nwk"
newhandle = open(newfile, 'w')
for line in handle: # should only be one line in file
for shortname in conversiondic:
line = re.sub(shortname,
conversiondic[shortname][0] + "_" + str(conversiondic[shortname][1]),
line )
newhandle.write(line)
handle.close()
newhandle.close()
else:
newfile = None
return newfile
################################################
def is_number(s):
try:
float(s)
except ValueError:
return False
else:
return True
def probstr_to_floats(s):
"converts a HMMer result file probability string to a list of probabilites between 0 and 1"
numlist = []
for x in s:
if is_number(x):
numlist.append(float(x)/10)
elif x == '*':
numlist.append(1)
elif x == '.':
pass
else:
raise TypeError('unsupported character: %r' % x)
return numlist
def main():
pass
if __name__ == '__main__':
main()
|
oxpeter/ortholotree
|
orthomods/internal.py
|
Python
|
gpl-2.0
| 34,324
|
[
"BLAST"
] |
d696879c60e45bc967a681e512af54d905575d8a8ae5457857d31c533468941f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Hybrid LFP scheme example script, applying the methodology with the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
Synopsis of the main simulation procedure:
1. Loading of parameterset
a. network parameters
b. parameters for hybrid scheme
2. Set up file destinations for different simulation output
3. network simulation
a. execute network simulation using NEST (www.nest-initiative.org)
b. merge network output (spikes, currents, voltages)
4. Create a object-representation that uses sqlite3 of all the spiking output
5. Iterate over post-synaptic populations:
a. Create Population object with appropriate parameters for
each specific population
b. Run all computations for populations
c. Postprocess simulation output of all cells in population
6. Postprocess all cell- and population-specific output data
7. Create a tarball for all non-redundant simulation output
The full simulation can be evoked by issuing a mpirun call, such as
mpirun -np 64 python cellsim16pops.py
Given the size of the network and demands for the multi-compartment LFP-
predictions using the present scheme, running the model on nothing but a large-
scale compute facility is strongly discouraged.
'''
from cellsim16popsParams_modified_spontan_inh import multicompartment_params, \
point_neuron_network_params
import os
import numpy as np
from time import time
import neuron # NEURON compiled with MPI must be imported before NEST and mpi4py
# to avoid NEURON being aware of MPI.
import nest # Import not used, but done in order to ensure correct execution
import nest_simulation
from hybridLFPy import PostProcess, Population, CachedNetwork, setup_file_dest
import nest_output_processing
import lfpykit
# set some seed values
SEED = 12345678
SIMULATIONSEED = 12345678
np.random.seed(SEED)
##########################################################################
# PARAMETERS
##########################################################################
# Full set of parameters including network parameters
params = multicompartment_params()
# set up the file destination
setup_file_dest(params, clearDestination=True)
###############################################################################
# MAIN simulation procedure
###############################################################################
# tic toc
tic = time()
######## Perform network simulation ######################################
# initiate nest simulation with only the point neuron network parameter class
networkParams = point_neuron_network_params()
nest_simulation.sli_run(parameters=networkParams,
fname='microcircuit.sli',
verbosity='M_INFO')
# preprocess the gdf files containing spiking output, voltages, weighted and
# spatial input spikes and currents:
nest_output_processing.merge_gdf(networkParams,
raw_label=networkParams.spike_recorder_label,
file_type='dat',
fileprefix=params.networkSimParams['label'],
skiprows=3)
nest_output_processing.merge_gdf(networkParams,
raw_label=networkParams.voltmeter_label,
file_type='dat',
fileprefix='voltages',
skiprows=3)
nest_output_processing.merge_gdf(
networkParams,
raw_label=networkParams.weighted_input_spikes_label,
file_type='dat',
fileprefix='population_input_spikes',
skiprows=3)
# spatial input currents
# nest_output_processing.create_spatial_input_spikes_hdf5(networkParams,
# fileprefix='depth_res_input_spikes-')
# create tar file archive of <raw_nest_output_path> folder as .dat files are
# no longer needed. Also removes .dat files
nest_output_processing.tar_raw_nest_output(params.raw_nest_output_path,
delete_files=True)
# Create an object representation of the simulation output that uses sqlite3
networkSim = CachedNetwork(**params.networkSimParams)
toc = time() - tic
print('NEST simulation and gdf file processing done in %.3f seconds' % toc)
# Set up LFPykit measurement probes for LFPs and CSDs
probes = []
probes.append(lfpykit.RecExtElectrode(cell=None, **params.electrodeParams))
probes.append(
lfpykit.LaminarCurrentSourceDensity(
cell=None,
**params.CSDParams))
####### Set up populations ###############################################
# iterate over each cell type, and create populationulation object
for i, y in enumerate(params.y):
# create population:
pop = Population(
# parent class
cellParams=params.yCellParams[y],
rand_rot_axis=params.rand_rot_axis[y],
simulationParams=params.simulationParams,
populationParams=params.populationParams[y],
y=y,
layerBoundaries=params.layerBoundaries,
probes=probes,
savelist=params.savelist,
savefolder=params.savefolder,
dt_output=params.dt_output,
POPULATIONSEED=SIMULATIONSEED + i,
# daughter class kwargs
X=params.X,
networkSim=networkSim,
k_yXL=params.k_yXL[y],
synParams=params.synParams[y],
synDelayLoc=params.synDelayLoc[y],
synDelayScale=params.synDelayScale[y],
J_yX=params.J_yX[y],
tau_yX=params.tau_yX[y],
recordSingleContribFrac=params.recordSingleContribFrac,
)
# run population simulation and collect the data
pop.run()
pop.collect_data()
# object no longer needed
del pop
####### Postprocess the simulation output ################################
# reset seed, but output should be deterministic from now on
np.random.seed(SIMULATIONSEED)
# do some postprocessing on the collected data, i.e., superposition
# of population LFPs, CSDs etc
postproc = PostProcess(y=params.y,
dt_output=params.dt_output,
probes=probes,
savefolder=params.savefolder,
mapping_Yy=params.mapping_Yy,
savelist=params.savelist
)
# run through the procedure
postproc.run()
# create tar-archive with output for plotting
postproc.create_tar_archive()
# tic toc
print('Execution time: %.3f seconds' % (time() - tic))
|
espenhgn/hybridLFPy
|
examples/Hagen_et_al_2016_cercor/cellsim16pops_modified_spontan_inh.py
|
Python
|
gpl-3.0
| 6,665
|
[
"NEURON"
] |
a5d6ec822d089c37b76e88b7c241f352edddeb67775442866973d6ecba1f65c0
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing CHARMM, implemented as an easyblock
@author: Ward Poelmans (Ghent University)
"""
# TODO: add support for more QC software (q-chem, gamess, ...)
import shutil
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
import easybuild.tools.toolchain as toolchain
# Possible systemsizes for CHARMM
KNOWN_SYSTEM_SIZES = ['huge', 'xxlarge', 'xlarge', 'large', 'medium', 'small', 'xsmall', 'reduce']
class EB_CHARMM(EasyBlock):
"""
Support for building/installing CHARMM
"""
@staticmethod
def extra_options():
"""Add extra easyconfig parameters custom to CHARMM."""
extra_vars = {
'build_options': ["FULL", "Specify the options to the build script", CUSTOM],
'system_size': ["medium", "Specify the supported systemsize: %s" % ', '.join(KNOWN_SYSTEM_SIZES), CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for CHARMM."""
super(EB_CHARMM, self).__init__(*args, **kwargs)
self.arch = 'UNKNOWN'
def configure_step(self):
# Clean out old dir but don't create new one
self.cfg['dontcreateinstalldir'] = True
if self.toolchain.comp_family() == toolchain.INTELCOMP:
self.arch = "em64t"
else:
self.arch = "gnu"
super(EB_CHARMM, self).make_dir(self.installdir, True, dontcreateinstalldir=True)
def build_step(self, verbose=False):
"""Start the actual build"""
if self.cfg['system_size'] not in KNOWN_SYSTEM_SIZES:
raise EasyBuildError("Unknown system size '%s' specified, known: %s", self.cfg['system_size'], KNOWN_SYSTEM_SIZES)
self.log.info("Building for size: %s" % self.cfg['system_size'])
self.log.info("Build options from the easyconfig: %s" % self.cfg['build_options'])
build_options = self.cfg['build_options']
# FFTW and MKL are mutally exclusive
if get_software_root("FFTW"):
self.log.info("Using FFTW")
build_options += " FFTW"
else:
self.log.info("Not using FFTW")
if get_software_root("imkl"):
self.log.info("Using the MKL")
build_options += " MKL"
else:
self.log.info("Not using MKL")
# Currently, only support for g09 added
if get_software_root("Gaussian") and 'g09' in get_software_version('Gaussian'):
self.log.info("Using g09")
build_options += " G09"
else:
self.log.info("Not using g09")
if self.toolchain.options.get('usempi', None):
self.log.info("Using MPI")
# M means use MPI and MPIF90 means let mpif90 handle all MPI stuff
build_options += " M MPIF90"
# By default, CHARMM uses gfortran. We need to specify if we want ifort
if self.toolchain.comp_family() == toolchain.INTELCOMP:
build_options += " IFORT"
cmd = "./install.com %s %s %s" % (self.arch, self.cfg['system_size'], build_options)
(out, _) = run_cmd(cmd, log_all=True, simple=False, log_output=verbose)
return out
def test_step(self):
"""Run the testsuite"""
if self.toolchain.options.get('usempi', None):
cmd = "cd test && ./test.com M %s %s" % (self.cfg['parallel'], self.arch)
else:
cmd = "cd test && ./test.com %s" % self.arch
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def sanity_check_step(self):
"""Custom sanity check for CHARMM."""
custom_paths = {
'files': [],
'dirs': [],
}
if self.toolchain.options.get('usempi', None):
custom_paths['files'].append('exec/%s_M/charmm' % self.arch)
else:
custom_paths['files'].append('exec/%s/charmm' % self.arch)
super(EB_CHARMM, self).sanity_check_step(custom_paths=custom_paths)
def install_step(self):
"""Copy the build directory to the install path"""
self.log.info("Copying CHARMM dir %s to %s" % (self.cfg['start_dir'], self.installdir))
try:
shutil.copytree(self.cfg['start_dir'], self.installdir)
except OSError, err:
raise EasyBuildError("Failed to copy CHARMM dir to install dir: %s", err)
def make_module_req_guess(self):
"""Custom guesses for environment variable PATH for CHARMM."""
guesses = super(EB_CHARMM, self).make_module_req_guess()
if self.toolchain.options.get('usempi', None):
suffix = "_M"
else:
suffix = ""
guesses.update({
'PATH': ['exec/%s%s' % (self.arch, suffix)],
})
return guesses
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/c/charmm.py
|
Python
|
gpl-2.0
| 6,104
|
[
"CHARMM",
"GAMESS",
"Gaussian",
"Q-Chem"
] |
28eb8d3f454d38c97eaa8dc6de58283da95c08e995408c9d83d7de5ce62338ba
|
r"""``sphobjinv`` *version definition module*.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
**Author**
Brian Skinn (bskinn@alum.mit.edu)
**File Created**
18 Mar 2019
**Copyright**
\(c) Brian Skinn 2016-2022
**Source Repository**
https://github.com/bskinn/sphobjinv
**Documentation**
https://sphobjinv.readthedocs.io/en/latest
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
__version__ = "2.2.2b1.dev0"
|
bskinn/sphobjinv
|
src/sphobjinv/version.py
|
Python
|
mit
| 521
|
[
"Brian"
] |
d1d3522d2aa01e2a09ac1318f80eb283a30e60127d71dc1839829605d7fa322f
|
#!/usr/bin/env python
# This example shows how to extract portions of an unstructured grid
# using vtkExtractUnstructuredGrid. vtkConnectivityFilter is also used
# to extract connected components.
#
# The data found here represents a blow molding process. Blow molding
# requires a mold and parison (hot, viscous plastic) which is shaped
# by the mold into the final form. The data file contains several steps
# in time for the analysis.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create a reader to read the unstructured grid data. We use a
# vtkDataSetReader which means the type of the output is unknown until
# the data file is read. So we follow the reader with a
# vtkCastToConcrete and cast the output to vtkUnstructuredGrid.
reader = vtk.vtkDataSetReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
reader.SetScalarsName("thickness9")
reader.SetVectorsName("displacement9")
castToUnstructuredGrid = vtk.vtkCastToConcrete()
castToUnstructuredGrid.SetInputConnection(reader.GetOutputPort())
warp = vtk.vtkWarpVector()
warp.SetInputConnection(castToUnstructuredGrid.GetOutputPort())
# The connectivity filter extracts the first two regions. These are
# know to represent the mold.
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(warp.GetOutputPort())
connect.SetExtractionModeToSpecifiedRegions()
connect.AddSpecifiedRegion(0)
connect.AddSpecifiedRegion(1)
moldMapper = vtk.vtkDataSetMapper()
moldMapper.SetInputConnection(reader.GetOutputPort())
moldMapper.ScalarVisibilityOff()
moldActor = vtk.vtkActor()
moldActor.SetMapper(moldMapper)
moldActor.GetProperty().SetColor(.2, .2, .2)
moldActor.GetProperty().SetRepresentationToWireframe()
# Another connectivity filter is used to extract the parison.
connect2 = vtk.vtkConnectivityFilter()
connect2.SetInputConnection(warp.GetOutputPort())
connect2.SetExtractionModeToSpecifiedRegions()
connect2.AddSpecifiedRegion(2)
# We use vtkExtractUnstructuredGrid because we are interested in
# looking at just a few cells. We use cell clipping via cell id to
# extract the portion of the grid we are interested in.
extractGrid = vtk.vtkExtractUnstructuredGrid()
extractGrid.SetInputConnection(connect2.GetOutputPort())
extractGrid.CellClippingOn()
extractGrid.SetCellMinimum(0)
extractGrid.SetCellMaximum(23)
parison = vtk.vtkGeometryFilter()
parison.SetInputConnection(extractGrid.GetOutputPort())
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(parison.GetOutputPort())
normals2.SetFeatureAngle(60)
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 0.66667)
parisonMapper = vtk.vtkPolyDataMapper()
parisonMapper.SetInputConnection(normals2.GetOutputPort())
parisonMapper.SetLookupTable(lut)
parisonMapper.SetScalarRange(0.12, 1.0)
parisonActor = vtk.vtkActor()
parisonActor.SetMapper(parisonMapper)
# graphics stuff
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(parisonActor)
ren.AddActor(moldActor)
ren.SetBackground(1, 1, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(60)
ren.GetActiveCamera().Roll(-90)
ren.GetActiveCamera().Dolly(2)
ren.ResetCameraClippingRange()
renWin.SetSize(500, 375)
iren.Initialize()
renWin.Render()
iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/VisualizationAlgorithms/Python/ExtractUGrid.py
|
Python
|
gpl-3.0
| 3,365
|
[
"VTK"
] |
bff09c6a7c58b225711cceef669b31e36c6f951b237dfccb0784d2e81fbae995
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.collection Utilities to load and use data from EAGLE SKIRT-run result collections.
#
# The facilities in this module allow loading one or more EAGLE SKIRT-run result collections,
# extracting the desired galaxy properties, and calculating observables from these properties.
# ----------------------------------------------------------------------
import pickle
import types
import os.path
import numpy as np
from . import config
from ..core.filter.broad import BroadBandFilter
from ..core.basics.greybody import Bnu, GreyBody, kappa350_Cortese, kappa350_Zubko
# ----------------------------------------------------------------------
## An instance of the Collection class represents the contents of a particular collection of EAGLE SKIRT-run results.
# It offers various facilities for querying galaxy properties.
class Collection:
## The constructor loads the contents of the specified collection and optionally reads extra data fields
# from column text files.
# The collection name should \em not include the directory (which is taken from eagle.config) nor the
# postfix "_info_collection.dat". The optional collection label can be provided as a short identifier,
# for example for use in the legend of a plot. If a list of file names is provided, the constructor adds
# information from these text data files to the collection. The first column in each file contains a GalaxyID,
# and subsequent columns contain the extra fields for that galaxy in the order specified by the field names.
# The specified files are simply concatenated.
def __init__(self, collection_name, collection_label=None, file_names=None, field_names=None):
self._name = collection_name
self._label = collection_label if collection_label is not None else collection_name
# load the collection
infilepath = os.path.join(config.collections_path, collection_name+"_info_collection.dat")
infile = open(infilepath, "r")
self._info = pickle.load(infile)
infile.close()
# construct an index on galayid
self._ids = { } # key = galaxy id, value = index in collection
index = 0
for galaxyid in self._info["galaxy_id"]:
self._ids[galaxyid] = index
index+=1
if (file_names is not None) and (field_names is not None):
# read the extra fields from the input files, and store them keyed on galaxy id
galaxies = {}
for filename in file_names:
for row in np.loadtxt(filename):
galaxies[ int(row[0]) ] = row[1:]
# create appropriate collection entries for the new fields
for field in field_names:
self._info[field] = np.zeros_like(self._info["skirt_run_id"])
# copy the field values for each galaxy in the collection
index = 0
for galaxyid in self._info["galaxy_id"]:
for field, value in zip(field_names, galaxies[galaxyid]):
self._info[field][index] = value
index+=1
# replace infinities (signifying a non-detection) by NaNs
for value in self._info.values():
value[np.isinf(value)] = np.nan
## This function returns the collection name.
def name(self):
return self._name
## This function returns the collection label.
def label(self):
return self._label
## This function returns a set containing all galaxy ids in the collection.
def galaxy_ids(self):
return set(self._ids.keys())
## This function returns a set containing the names of the properties provided in the collection.
def property_names(self):
return set(self._info.keys())
## This function returns an array with the values of the specified property for each galaxy in the specified
# list of galaxy ids, in the same order. An error is raised if the galaxy id and/or the property are not available.
def property_values(self, property_name, galaxy_ids):
result = np.zeros(len(galaxy_ids))
index = 0
for galaxy_id in galaxy_ids:
result[index] = self._info[property_name][self._ids[galaxy_id]]
index+=1
return result
## This function returns a dictionary with key-value pairs for all properties in the collection.
# The key is the property name, and the value is a single-dimensional array with
# the values of that property for all galaxies in the collection, in arbitrary order.
def all_property_values(self):
return self._info
# ----------------------------------------------------------------------
## An instance of the CollectionSet class manages the contents of one or more EAGLE SKIRT-run result collections.
# It allows querying the properties in all collections for the set of common galaxies.
class CollectionSet:
## The constructor loads the specified collections, optionally reading extra data fields from text files,
# and prints some statistics. The arguments are similar to those for the Collection constructor, except
# that the first two should be lists of equal length (if provided).
def __init__(self, collection_names, collection_labels=None, file_names=None, field_names=None):
# load the collections
if isinstance(collection_names,types.StringTypes):
collection_names = [ collection_names ]
if collection_labels is None:
collection_labels = [ None ] * len(collection_names)
self._collections = [ Collection(name,label,file_names,field_names) \
for name,label in zip(collection_names,collection_labels) ]
# find the set of common galaxies and the set of common properties
if len(self._collections) > 1:
self._ids = sorted(reduce(lambda x,y: x&y, [ c.galaxy_ids() for c in self._collections ]))
self._props = reduce(lambda x,y: x&y, [ c.property_names() for c in self._collections ])
else:
self._ids = sorted(self._collections[0].galaxy_ids())
self._props = self._collections[0].property_names()
# print the number of common galaxies
print "Loaded a set of {} collections with {} common galaxies and {} common properties" \
.format(len(self._collections), len(self._ids), len(self._props))
## This function returns a two-dimensional array with the values of the specified property for all common galaxies
# in all collections of the set. The index on the first axis iterates over the collections, the index on the last
# axis iterates over the galaxies, in order of increasing galaxy id.
def property_values(self, property_name):
return np.vstack([ c.property_values(property_name,self._ids) for c in self._collections ])
## This function returns a dictionary with key-value pairs for all properties that are common to the
# collections in the set. The key is the property name, and the value is a two-dimensional array with
# the values of that property for all common galaxies in all collections of the set. The index on the
# first axis iterates over the collections, the index on the last axis iterates over the galaxies,
# in order of increasing galaxy id.
def all_property_values(self):
return { p:self.property_values(p) for p in self._props }
# ----------------------------------------------------------------------
## An instance of the CollectionData class encapsulates the information contained in a Collection or in a
# CollectionSet instance, offering the following benefits:
# - optionally include 'stripped' property names for a specific instrument (i.e. omitting the instrument name);
# - access (common) properties for all (common) galaxies using Python property syntax;
# - calculate a number of extra observable galaxy properties from the basic properties.
class CollectionData:
## The constructor retrieves the relevant information from the specified Collection or CollectionSet instance,
# and adds stripped property names for the specified instrument, if any.
def __init__(self, collection, instrument=None):
# get the info dictionary
self._info = collection.all_property_values()
# add stripped instrument keys if requested
if instrument is not None:
for key in self._info.keys():
if key.startswith("instr_"):
segments = key.split("_")
if segments[1]==instrument:
segments.pop(1)
cleankey = "_".join(segments)
self._info[cleankey] = self._info[key]
## This function ensures that the data array for property "some_property" in CollectionData instance cd
# can be accessed through the regular Python cd.some_property syntax. For a single collection, the function
# returns a single-dimensional array with the values of that property for all galaxies in the collection.
# For multiple collections, the function returns a two-dimensional array with the values of that property
# for all common galaxies in all collections of the set. The index on the first axis iterates over the
# collections, the index on the last axis iterates over the galaxies.
def __getattr__(self, name):
return self._info[name]
## This function returns log10 of stellar mass (in solar units) according to Zibetti et al 2009, table B1,
# using color g-i and i-band luminosity
def log_stellar_mass_as_zibetti(self):
color = self.instr_magnitude_sdss_g - self.instr_magnitude_sdss_i # AB color g - i
logUpsi = -0.963 + 1.032*color # Upsilon in solar units (coefficients a_i and b_i for color g-i in table B1)
logLi = (4.54 - self.instr_magnitude_sdss_i) / 2.5 # solar AB magnitude in i band is 4.54
return logUpsi + logLi
## This function returns dust temperature (in K) and mass (in Msun) for best fit with Herschel 160, 250, 350, 500 data points
# of the specified flux type (default is 'limited'), using beta=2 and kappa=kappa350_Cortese
def dust_temperature_and_mass_from_grey_body_fit(self, fluxtype='limited'):
# get the Herschel 160, 250, 350, 500 wavelengths
waves = np.array( [ BroadBandFilter(fs).pivotwavelength() for fs in ("Pacs.red","SPIRE.PSW","SPIRE.PMW","SPIRE.PLW")] )
sigmas = np.array(( 3,1,1,3 )) # pacs is less sensitive; longer wavelength fluxes are harder to measure
# get the Herschel 160, 250, 350, 500 datapoints
fluxstring = '''[ self.instr_fluxdensity_pacs_red_{0}, self.instr_fluxdensity_spire_psw_{0},
self.instr_fluxdensity_spire_pmw_{0}, self.instr_fluxdensity_spire_plw_{0} ]'''.format(fluxtype)
fluxes = eval(fluxstring)
# setup an iterator over the galaxies, specifying two to-be-allocated output arrays for T and M
it = np.nditer([None, None, self.setup_distance_instrument] + fluxes,
op_flags = [['writeonly','allocate'],['writeonly','allocate'],['readonly'],
['readonly'], ['readonly'], ['readonly'], ['readonly']])
# do the fit, iterating over the galaxies
for Ti,Mi,di,f160i,f250i,f350i,f500i in it:
greybody = GreyBody(di, 2, kappa350_Cortese)
#greybody = GreyBody(di, 2, kappa350_Zubko)
it[0],it[1] = greybody.fit(waves, (f160i,f250i,f350i,f500i), sigmas)
# return the two result arrays T and M allocated by the iterator
return it.operands[0:2]
## This function returns dust temperature (in K) for best fit with Herschel 160, 250, 350, 500 data points
def dust_temperature_from_grey_body_fit(self, fluxtype='limited'):
return self.dust_temperature_and_mass_from_grey_body_fit(fluxtype)[0]
## This function returns log10 of dust mass (in Msun) for best fit with Herschel 160, 250, 350, 500 data points
def log_dust_mass_from_grey_body_fit(self, fluxtype='limited'):
return log_if_positive(self.dust_temperature_and_mass_from_grey_body_fit(fluxtype)[1])
## This function returns fraction of total observed dust mass contributed by HII regions (in range 0..1),
# calculated from the continuum fluxes through a best fit with Herschel 160, 250, 350, 500 data points
def dust_fraction_in_hii_regions(self):
T,Mhii = self.dust_temperature_and_mass_from_grey_body_fit("hii_continuum")
T,Mother = self.dust_temperature_and_mass_from_grey_body_fit("other_continuum")
return divide_if_positive(Mhii, Mhii+Mother)
## This function returns log10 of dust mass (in Msun) according to Cortese et al 2012, appendix B, using beta=2 for extended sources
def log_dust_mass_as_cortese(self):
x = log_divide_if_positive(self.instr_fluxdensity_spire_psw_limited,self.instr_fluxdensity_spire_plw_limited)
logMFD = 16.880 - 1.559*x + 0.160*x**2 - 0.079*x**3 - 0.363*x**4
logD = np.log10(self.setup_distance_instrument/1e6)
logF = log_if_positive(self.instr_fluxdensity_spire_pmw_limited)
logDust = logMFD + 2*logD + logF - 11.32
#logDust += np.log10( kappa350_Cortese / kappa350_Zubko ) # compensate for kappa assumed in Cortese vs Zubko
return logDust
## This function returns log10 of dust mass (in Msun) based on the dust temperature probed in the dust grid and the 350 micron flux
def log_dust_mass_from_grid_temperature(self):
Msun = 1.9891e30 # solar mass in kg
pc = 3.08567758e16 # parsec in m
f350 = self.instr_fluxdensity_spire_pmw_limited * 1e-26 # W/m2
D = self.setup_distance_instrument * pc # m
T = self.probe_average_temperature_dust # K
T[T<1] = 1
return log_divide_if_positive(f350*D*D, kappa350_Cortese * Bnu(350,T) * Msun)
# ----------------------------------------------------------------------
## This function returns a CollectionData instance encapsulating the common properties of the common galaxies
# for the collections with the specified names, optionally including extra data fields from column text files,
# and providing stripped property names for a specific instrument.
# It is simply a convenience function combing the CollectionSet and CollectionData constructors.
def load_collections(collection_names, file_names=None, field_names=None, instrument=None):
return CollectionData(CollectionSet(collection_names, file_names=file_names, field_names=field_names), instrument)
# ----------------------------------------------------------------------
# Some generic functions used for calculating observables
## This function returns log10(x), or NaN for x<=0
def log_if_positive(x):
positive = x>0
result = np.empty_like(x)
result[positive] = np.log10(x[positive])
result[~positive] = np.nan
return result
## This function returns x/y, or NaN for y<=0
def divide_if_positive(x,y):
positive = y>0
result = np.empty_like(x)
result[positive] = x[positive] / y[positive]
result[~positive] = np.nan
return result
## This function returns log10(x/y), or NaN for x<=0 or y<=0
def log_divide_if_positive(x,y):
result = np.zeros_like(x)
positive = y>0
result[positive] = x[positive] / y[positive]
positive = result>0
result[positive] = np.log10(result[positive])
result[~positive] = np.nan
return result
# ----------------------------------------------------------------------
|
SKIRT/PTS
|
eagle/collection.py
|
Python
|
agpl-3.0
| 15,980
|
[
"Galaxy"
] |
d71c4796ecaf5e7adf31138547ba47d3215697a92c3607550504d9885b239e0c
|
""" High-level classes for reading HDF5 files. """
from collections import deque
from collections.abc import Mapping, Sequence
import os
import posixpath
import numpy as np
from .core import Reference
from .dataobjects import DataObjects
from .misc_low_level import SuperBlock
class Group(Mapping):
"""
An HDF5 Group which may hold attributes, datasets, or other groups.
Attributes
----------
attrs : dict
Attributes for this group.
name : str
Full path to this group.
file : File
File instance where this group resides.
parent : Group
Group instance containing this group.
"""
def __init__(self, name, dataobjects, parent):
""" initalize. """
self.parent = parent
self.file = parent.file
self.name = name
self._links = dataobjects.get_links()
self._dataobjects = dataobjects
self._attrs = None # cached property
def __repr__(self):
return '<HDF5 group "%s" (%d members)>' % (self.name, len(self))
def __len__(self):
""" Number of links in the group. """
return len(self._links)
def _dereference(self, ref):
""" Deference a Reference object. """
if not ref:
raise ValueError('cannot deference null reference')
obj = self.file._get_object_by_address(ref.address_of_reference)
if obj is None:
raise ValueError('reference not found in file')
return obj
def __getitem__(self, y):
""" x.__getitem__(y) <==> x[y] """
if isinstance(y, Reference):
return self._dereference(y)
path = posixpath.normpath(y)
if path == '.':
return self
if path.startswith('/'):
return self.file[path[1:]]
if posixpath.dirname(path) != '':
next_obj, additional_obj = path.split('/', 1)
else:
next_obj = path
additional_obj = '.'
if next_obj not in self._links:
raise KeyError('%s not found in group' % (next_obj))
obj_name = posixpath.join(self.name, next_obj)
link_target = self._links[next_obj]
if isinstance(link_target, str):
try:
return self.__getitem__(link_target)
except KeyError:
return None
dataobjs = DataObjects(self.file._fh, link_target)
if dataobjs.is_dataset:
if additional_obj != '.':
raise KeyError('%s is a dataset, not a group' % (obj_name))
return Dataset(obj_name, dataobjs, self)
return Group(obj_name, dataobjs, self)[additional_obj]
def __iter__(self):
for k in self._links.keys():
yield k
def visit(self, func):
"""
Recursively visit all names in the group and subgroups.
func should be a callable with the signature:
func(name) -> None or return value
Returning None continues iteration, return anything else stops and
return that value from the visit method.
"""
return self.visititems(lambda name, obj: func(name))
def visititems(self, func):
"""
Recursively visit all objects in this group and subgroups.
func should be a callable with the signature:
func(name, object) -> None or return value
Returning None continues iteration, return anything else stops and
return that value from the visit method.
"""
root_name_length = len(self.name)
if not self.name.endswith('/'):
root_name_length += 1
queue = deque(self.values())
while queue:
obj = queue.popleft()
name = obj.name[root_name_length:]
ret = func(name, obj)
if ret is not None:
return ret
if isinstance(obj, Group):
queue.extend(obj.values())
return None
@property
def attrs(self):
""" attrs attribute. """
if self._attrs is None:
self._attrs = self._dataobjects.get_attributes()
return self._attrs
class File(Group):
"""
Open a HDF5 file.
Note in addition to having file specific methods the File object also
inherit the full interface of **Group**.
File is also a context manager and therefore supports the with statement.
Files opened by the class will be closed after the with block, file-like
object are not closed.
Parameters
----------
filename : str or file-like
Name of file (string or unicode) or file like object which has read
and seek methods which behaved like a Python file object.
Attributes
----------
filename : str
Name of the file on disk, None if not available.
mode : str
String indicating that the file is open readonly ("r").
userblock_size : int
Size of the user block in bytes (currently always 0).
"""
def __init__(self, filename):
""" initalize. """
self._close = False
if hasattr(filename, 'read'):
if not hasattr(filename, 'seek'):
raise ValueError(
'File like object must have a seek method')
self._fh = filename
self.filename = getattr(filename, 'name', None)
else:
self._fh = open(filename, 'rb')
self._close = True
self.filename = filename
self._superblock = SuperBlock(self._fh, 0)
offset = self._superblock.offset_to_dataobjects
dataobjects = DataObjects(self._fh, offset)
self.file = self
self.mode = 'r'
self.userblock_size = 0
super(File, self).__init__('/', dataobjects, self)
def __repr__(self):
return '<HDF5 file "%s" (mode r)>' % (os.path.basename(self.filename))
def _get_object_by_address(self, obj_addr):
""" Return the object pointed to by a given address. """
if self._dataobjects.offset == obj_addr:
return self
return self.visititems(
lambda x, y: y if y._dataobjects.offset == obj_addr else None)
def close(self):
""" Close the file. """
if self._close:
self._fh.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.close()
class Dataset(object):
"""
A HDF5 Dataset containing an n-dimensional array and meta-data attributes.
Attributes
----------
shape : tuple
Dataset dimensions.
dtype : dtype
Dataset's type.
size : int
Total number of elements in the dataset.
chunks : tuple or None
Chunk shape, or NOne is chunked storage not used.
compression : str or None
Compression filter used on dataset. None if compression is not enabled
for this dataset.
compression_opts : dict or None
Options for the compression filter.
scaleoffset : dict or None
Setting for the HDF5 scale-offset filter, or None if scale-offset
compression is not used for this dataset.
shuffle : bool
Whether the shuffle filter is applied for this dataset.
fletcher32 : bool
Whether the Fletcher32 checksumming is enabled for this dataset.
fillvalue : float or None
Value indicating uninitialized portions of the dataset. None is no fill
values has been defined.
dim : int
Number of dimensions.
dims : None
Dimension scales.
attrs : dict
Attributes for this dataset.
name : str
Full path to this dataset.
file : File
File instance where this dataset resides.
parent : Group
Group instance containing this dataset.
"""
def __init__(self, name, dataobjects, parent):
""" initalize. """
self.parent = parent
self.file = parent.file
self.name = name
self._dataobjects = dataobjects
self._attrs = None
self._astype = None
def __repr__(self):
info = (os.path.basename(self.name), self.shape, self.dtype)
return '<HDF5 dataset "%s": shape %s, type "%s">' % info
def __getitem__(self, args):
data = self._dataobjects.get_data()[args]
if self._astype is None:
return data
return data.astype(self._astype)
def read_direct(self, array, source_sel=None, dest_sel=None):
"""
Read from a HDF5 dataset directly into a NumPy array.
This is equivalent to dset[source_sel] = arr[dset_sel].
Creation of intermediates is not avoided. This method if provided from
compatibility with h5py, it is not efficient.
"""
array[dest_sel] = self[source_sel]
def astype(self, dtype):
"""
Return a context manager which returns data as a particular type.
Conversion is handled by NumPy after reading extracting the data.
"""
return AstypeContext(self, dtype)
def len(self):
""" Return the size of the first axis. """
return self.shape[0]
@property
def shape(self):
""" shape attribute. """
return self._dataobjects.shape
@property
def ndim(self):
""" number of dimensions. """
return len(self.shape)
@property
def dtype(self):
""" dtype attribute. """
return self._dataobjects.dtype
@property
def value(self):
""" alias for dataset[()]. """
DeprecationWarning(
"dataset.value has been deprecated. Use dataset[()] instead.")
return self[()]
@property
def size(self):
""" size attribute. """
return np.prod(self.shape)
@property
def chunks(self):
""" chunks attribute. """
return self._dataobjects.chunks
@property
def compression(self):
""" compression attribute. """
return self._dataobjects.compression
@property
def compression_opts(self):
""" compression_opts attribute. """
return self._dataobjects.compression_opts
@property
def scaleoffset(self):
""" scaleoffset attribute. """
return None # TODO support scale-offset filter
@property
def shuffle(self):
""" shuffle attribute. """
return self._dataobjects.shuffle
@property
def fletcher32(self):
""" fletcher32 attribute. """
return self._dataobjects.fletcher32
@property
def fillvalue(self):
""" fillvalue attribute. """
return self._dataobjects.fillvalue
@property
def dims(self):
""" dims attribute. """
return DimensionManager(self)
@property
def attrs(self):
""" attrs attribute. """
if self._attrs is None:
self._attrs = self._dataobjects.get_attributes()
return self._attrs
class DimensionManager(Sequence):
""" Represents a collection of dimensions associated with a dataset. """
def __init__(self, dset):
ndim = len(dset.shape)
dim_list = [[]]*ndim
if 'DIMENSION_LIST' in dset.attrs:
dim_list = dset.attrs['DIMENSION_LIST']
dim_labels = [b'']*ndim
if 'DIMENSION_LABELS' in dset.attrs:
dim_labels = dset.attrs['DIMENSION_LABELS']
self._dims = [
DimensionProxy(dset.file, label, refs) for
label, refs in zip(dim_labels, dim_list)]
def __len__(self):
return len(self._dims)
def __getitem__(self, x):
return self._dims[x]
class DimensionProxy(Sequence):
""" Represents a HDF5 "dimension". """
def __init__(self, dset_file, label, refs):
self.label = label.decode('utf-8')
self._refs = refs
self._file = dset_file
def __len__(self):
return len(self._refs)
def __getitem__(self, x):
return self._file[self._refs[x]]
class AstypeContext(object):
"""
Context manager which allows changing the type read from a dataset.
"""
def __init__(self, dset, dtype):
self._dset = dset
self._dtype = np.dtype(dtype)
def __enter__(self):
self._dset._astype = self._dtype
def __exit__(self, *args):
self._dset._astype = None
|
jjhelmus/pyfive
|
pyfive/high_level.py
|
Python
|
bsd-3-clause
| 12,361
|
[
"VisIt"
] |
bed358089aece6bbb04ad3cc305e6fb645402cda1e94e1941c76bf18548b0996
|
import numpy as np
import scipy.linalg
import scipy.stats
import random
from math import log, pi, e
def log_sum(log_summands):
a = np.inf
x = log_summands.copy()
while a == np.inf or a == -np.inf or np.isnan(a):
a = x[0] + np.log(1 + np.sum(np.exp(x[1:] - x[0])))
random.shuffle(x)
return a
def numerical_error(loglike, logweight, logevidence):
"""
Compute the standard numerical error as defined by Geweke (Econometria 57,
N. 6, pp. 1317-1339).
:param array loglike:
1-D array with log(likelihood)values evaluated in the sample drawn from
the importance sampling density.
:param array logweight:
1-D array with log(weight) values evaluated in the sample drawn from
the importance sampling density. The weight function is the prior
density divided the importance sampling density (w = pi/I)
:param float logevidence:
log of the marginal likelihood estimation obtained.
:return:
"""
log_likeminusevidence = np.zeros_like(loglike)
for i in range(len(loglike)):
log_likeminusevidence = log_sum(np.array([loglike[i], logevidence +
np.log(-1)]))
return log_sum(log(like - e) + 2*logweight) - 2 * log_sum(logweight)
def multivariate_normal(r, c, method='cholesky'):
"""
Computes multivariate normal density for "residuals" vector r and
covariance c.
:param array r:
1-D array of k dimensions.
:param array c:
2-D array or matrix of (k x k).
:param string method:
Method used to compute multivariate density.
Possible values are:
* "cholesky": uses the Cholesky decomposition of the covariance c,
implemented in scipy.linalg.cho_factor and
scipy.linalg.cho_solve.
* "solve": uses the numpy.linalg functions solve() and slogdet().
:return array: multivariate density at vector position r.
"""
# Compute normalization factor used for all methods.
kk = len(r) * log(2*pi)
if method == 'cholesky':
# Use Cholesky decomposition of covariance.
cho, lower = scipy.linalg.cho_factor(c)
alpha = scipy.linalg.cho_solve((cho, lower), r)
return -0.5 * (kk + np.dot(r, alpha) +
2 * np.sum(np.log(np.diag(cho)))
)
elif method == 'solve':
# Use slogdet and solve
(s, d) = np.linalg.slogdet(c)
alpha = np.linalg.solve(c, r)
return -0.5 * (kk + np.dot(r, alpha) + d)
class MultivariateGaussian(scipy.stats.rv_continuous):
def __init__(self, mu, cov):
self.mu = mu
self.covariance = cov + 1e-10
self.dimensions = len(cov)
# CHANGE THIS TO COMPUTE ON MULTI DIMENSIONAL x....
def pdf(self, x, method='cholesky'):
if 1 < len(x.shape) < 3:
# Input array is multi-dimensional
# Check that input array is well aligned with covariance.
if x.T.shape[0] != len(self.covariance):
raise ValueError('Input array not aligned with covariance. '
'It must have dimensions (n x k), where k is '
'the dimension of the multivariate Gaussian.')
# If ok, create array to contain results
mvg = np.zeros(len(x))
for s, rr in enumerate(x):
mvg[s] = multivariate_normal(rr - self.mu, self.covariance,
method)
return mvg
elif len(x.shape) == 1:
return multivariate_normal(x - self.mu, self.covariance, method)
else:
raise ValueError('Input array must be 1- or 2-D.')
def rvs(self, nsamples):
return np.random.multivariate_normal(self.mu, self.covariance,
nsamples)
|
exord/bayev
|
lib.py
|
Python
|
mit
| 3,932
|
[
"Gaussian"
] |
9804ad6aa209f9f6f544739d93edf888ad29451ffe6dc39461223d3efabb228e
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
orthorhombic_fitting
--------------------
This script creates an AnisotropicMineral object corresponding to
San Carlos olivine (an orthorhombic mineral). If run_fitting is set to True,
the script uses experimental data to find the optimal anisotropic parameters.
If set to False, it uses pre-optimized parameters.
The data is used to optimize both the isotropic (volumetric) and
anisotropic parameters.
The script ends by making three plots; one with the linear and volumetric
thermal expansivities at 1 bar, one with components of the
isentropic elastic stiffness tensor at high pressure, and one with
selected seismic properties at a fixed pressure and temperature.
"""
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import burnman
from burnman import AnisotropicMineral
from tools import print_table_for_mineral_constants
from burnman.tools.plot import plot_projected_elastic_properties
run_fitting = False
formula = 'Mg1.8Fe0.2SiO4'
formula = burnman.tools.chemistry.dictionarize_formula(formula)
formula_mass = burnman.tools.chemistry.formula_mass(formula)
# Define the unit cell lengths and unit cell volume.
# These are taken from Abramson et al., 1997
Z = 4.
cell_lengths_angstrom = np.array([4.7646, 10.2296, 5.9942])
cell_lengths_0_guess = cell_lengths_angstrom*np.cbrt(burnman.constants.Avogadro/Z/1.e30)
V_0_guess = np.prod(cell_lengths_0_guess)
ol_data = np.loadtxt('data/Mao_et_al_2015_ol.dat')
ol_1bar_lattice_data_Suzuki = np.loadtxt('data/Suzuki_1975_ol_Kenya_expansion.dat')
fo = burnman.minerals.SLB_2011.forsterite()
fa = burnman.minerals.SLB_2011.fayalite()
def make_orthorhombic_mineral_from_parameters(x):
f_order = 3
Pth_order = 2
constants = np.zeros((6, 6, f_order+1, Pth_order+1))
san_carlos_params = {'name': 'San Carlos olivine',
'formula': formula,
'equation_of_state': 'slb3',
'F_0': 0.0,
'V_0': V_0_guess, # we overwrite this in a second
'K_0': 1.263e+11, # Abramson et al. 1997
'Kprime_0': 4.28, # Abramson et al. 1997
'Debye_0': fo.params['Debye_0']*0.9 + fa.params['Debye_0']*0.1, #
'grueneisen_0': 0.99282, # Fo in SLB2011
'q_0': 2.10672, # Fo in SLB2011
'G_0': 81.6e9,
'Gprime_0': 1.46257,
'eta_s_0': 2.29972,
'n': 7.,
'molar_mass': formula_mass}
san_carlos_property_modifiers = [['linear', {'delta_E': 0.0,
'delta_S': 26.76*0.1 - 2.*burnman.constants.gas_constant*(0.1*np.log(0.1) + 0.9*np.log(0.9)),
'delta_V': 0.0}]]
ol = burnman.Mineral(params=san_carlos_params,
property_modifiers=san_carlos_property_modifiers)
# Overwrite some properties
ol.params['V_0'] = x[0]*V_0_guess # Abramson et al. 1997
ol.params['K_0'] = x[1]*1.263e+11 # Abramson et al. 1997
ol.params['Kprime_0'] = x[2]*4.28 # Abramson et al. 1997
#ol.params['Debye_0'] = x[3]*809.1703 # Fo in SLB2011 strong tendency to 0
ol.params['grueneisen_0'] = x[3]*0.99282 # Fo in SLB2011
ol.params['q_0'] = x[4]*2.10672 # Fo in SLB2011
# Next, each of the eight independent elastic tensor component get their turn.
# We arbitrarily choose S[2,3] as the ninth component, which is determined by the others.
i = 5
for (p, q) in ((1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(1, 2),
(1, 3)):
for (m, n) in ((1, 0),
(2, 0),
(3, 0)):
constants[p-1, q-1, m, n] = x[i]
constants[q-1, p-1, m, n] = x[i]
i += 1
for (m, n) in ((0, 1),
(1, 1),
(2, 1),
(3, 1)):
constants[p-1, q-1, m, n] = x[i]*1.e-11
constants[q-1, p-1, m, n] = x[i]*1.e-11
i += 1
for (m, n) in ((0, 2),):
constants[p-1, q-1, m, n] = x[i]*1.e-22
constants[q-1, p-1, m, n] = x[i]*1.e-22
i += 1
assert i == 69 # 40 parameters
# Fill the values for the dependent element c[2,3]
constants[1,2,1,0] = (1. - np.sum(constants[:3,:3,1,0])) / 2.
constants[1,2,2:,0] = - np.sum(constants[:3,:3,2:,0], axis=(0, 1)) / 2.
constants[1,2,:,1:] = - np.sum(constants[:3,:3,:,1:], axis=(0, 1)) / 2.
# And for c[3,2]
constants[2,1,:,:] = constants[1,2,:,:]
cell_lengths = cell_lengths_0_guess*np.cbrt(ol.params['V_0']/V_0_guess)
ol_cell_parameters = np.array([cell_lengths[0],
cell_lengths[1],
cell_lengths[2],
90, 90, 90])
m = AnisotropicMineral(ol, ol_cell_parameters, constants)
return m
sol = []
if run_fitting:
def orthorhombic_misfit(x, imin):
m = make_orthorhombic_mineral_from_parameters(x)
chisqr = 0.
try:
for d in ol_data:
TK, PGPa, rho, rhoerr = d[:4]
C11, C11err = d[4:6]
C22, C22err = d[6:8]
C33, C33err = d[8:10]
C44, C44err = d[10:12]
C55, C55err = d[12:14]
C66, C66err = d[14:16]
C12, C12err = d[16:18]
C13, C13err = d[18:20]
C23, C23err = d[20:22]
PPa = PGPa * 1.e9
m.set_state(PPa, TK)
CN = m.isentropic_stiffness_tensor/1.e9
chisqr += np.power((m.density/1000. - rho)/rhoerr, 2.)
chisqr += np.power((CN[0,0] - C11)/C11err, 2.)
chisqr += np.power((CN[1,1] - C22)/C22err, 2.)
chisqr += np.power((CN[2,2] - C33)/C33err, 2.)
chisqr += np.power((CN[3,3] - C44)/C44err, 2.)
chisqr += np.power((CN[4,4] - C55)/C55err, 2.)
chisqr += np.power((CN[5,5] - C66)/C66err, 2.)
chisqr += np.power((CN[0,1] - C12)/C12err, 2.)
chisqr += np.power((CN[0,2] - C13)/C13err, 2.)
chisqr += np.power((CN[1,2] - C23)/C23err, 2.)
# Not San Carlos, fo92.3, not fo90.4
for d in ol_1bar_lattice_data_Suzuki:
m.set_state(1.e5, d[0] + 273.15) # T in C
Y = ((np.diag(m.cell_vectors) / np.diag(m.cell_vectors_0)) - 1.)*1.e4
Y_expt = d[1:4]
Y_err = 0.01*Y_expt + 1.
for i in range(3):
chisqr += np.power((Y_expt[i] - Y[i])/Y_err[i], 2.)
#if chisqr < 1500.:
# print(chisqr)
#m.set_state(1.e5, 300)
#print(np.diag(m.thermal_expansivity_tensor))
if np.isnan(chisqr):
print(d, "Noooo, there was a nan")
chisqr = 1.e7
except:
print('There was an exception')
chisqr = 1.e7
imin[0][0] += 1
if chisqr < imin[0][1]:
imin[0][1] = chisqr
print(imin[0])
print(repr(x))
return chisqr
guesses = np.array([ 1.00261177e+00, 9.91759509e-01, 1.00180767e+00, 1.12629568e+00,
3.13913957e-01, 4.43835171e-01, -9.38192626e-01, 8.57450038e-01,
2.63521201e-01, 3.10992538e-01, -5.84207311e+00, 1.22205974e+01,
5.11362234e-01, 7.76039201e-01, -1.00640533e+00, 5.66780847e+00,
5.12401782e-01, 1.59529634e+00, 1.23345902e+01, -7.60264507e+00,
3.06123818e-01, 6.62862573e-01, -6.29539285e-01, 9.07101981e+00,
1.70501045e+00, 1.90725482e+00, 6.48576298e+00, 2.99733967e+00,
3.62644594e-01, 1.96838589e+00, -4.97224163e-01, 2.08768703e+01,
-2.66242709e+00, 2.32579910e+00, -6.26342959e+00, 1.10758805e+01,
-4.99496737e+00, 1.61144010e+00, -1.85034515e+00, 2.32110973e+01,
-3.15692901e+00, 2.65209318e+00, 4.39232410e-01, 4.71069329e+00,
-6.24379333e+00, 1.55360338e+00, -1.42688476e+00, 1.26449796e+01,
-3.69943280e-01, 5.71780041e+00, 6.49141249e+00, -3.81945412e+00,
-1.25012075e+00, -1.20402033e-01, 4.38934297e-01, -1.17987749e+00,
4.61289178e-01, -2.21403680e-01, 7.81563940e+00, 8.17777878e+00,
-1.34030384e-02, -1.01671929e-01, 2.70232982e-01, -2.68143106e+00,
-6.93075277e-01, -4.04634113e-01, -3.49178491e+00, 1.09213501e+01,
4.91098948e-02])
i = 0
min = 1.e10
sol = minimize(orthorhombic_misfit, guesses, method='COBYLA', args=[[i, min]], options={'rhobeg': 0.2, 'maxiter': 10000})
print(sol)
do_plotting = True
if do_plotting:
if run_fitting:
m = make_orthorhombic_mineral_from_parameters(sol.x)
else:
# Not final solution, but taken while improvement was slowing down.
m = make_orthorhombic_mineral_from_parameters([ 1.00261177e+00, 9.91759509e-01, 1.00180767e+00, 1.12629568e+00,
3.13913957e-01, 4.43835171e-01, -9.38192626e-01, 8.57450038e-01,
2.63521201e-01, 3.10992538e-01, -5.84207311e+00, 1.22205974e+01,
5.11362234e-01, 7.76039201e-01, -1.00640533e+00, 5.66780847e+00,
5.12401782e-01, 1.59529634e+00, 1.23345902e+01, -7.60264507e+00,
3.06123818e-01, 6.62862573e-01, -6.29539285e-01, 9.07101981e+00,
1.70501045e+00, 1.90725482e+00, 6.48576298e+00, 2.99733967e+00,
3.62644594e-01, 1.96838589e+00, -4.97224163e-01, 2.08768703e+01,
-2.66242709e+00, 2.32579910e+00, -6.26342959e+00, 1.10758805e+01,
-4.99496737e+00, 1.61144010e+00, -1.85034515e+00, 2.32110973e+01,
-3.15692901e+00, 2.65209318e+00, 4.39232410e-01, 4.71069329e+00,
-6.24379333e+00, 1.55360338e+00, -1.42688476e+00, 1.26449796e+01,
-3.69943280e-01, 5.71780041e+00, 6.49141249e+00, -3.81945412e+00,
-1.25012075e+00, -1.20402033e-01, 4.38934297e-01, -1.17987749e+00,
4.61289178e-01, -2.21403680e-01, 7.81563940e+00, 8.17777878e+00,
-1.34030384e-02, -1.01671929e-01, 2.70232982e-01, -2.68143106e+00,
-6.93075277e-01, -4.04634113e-01, -3.49178491e+00, 1.09213501e+01,
4.91098948e-02])
print('The following parameters were used for the volumetric part of '
f'the isotropic model: $V_0$: {m.params["V_0"]*1.e6:.5f} cm$^3$/mol, '
f'$K_0$: {m.params["K_0"]/1.e9:.5f} GPa, '
f'$K\'_0$: {m.params["Kprime_0"]:.5f}, '
f'$\Theta_0$: {m.params["Debye_0"]:.5f} K, '
f'$\gamma_0$: {m.params["grueneisen_0"]:.5f}, '
f'and $q_0$: {m.params["q_0"]:.5f}.')
print_table_for_mineral_constants(m, [(1, 1), (2, 2), (3, 3),
(4, 4), (5, 5), (6, 6),
(1, 2), (1, 3), (2, 3)])
# Plot thermal expansion figure
fig = plt.figure(figsize=(4, 8))
ax = [fig.add_subplot(2, 1, i) for i in range(1, 3)]
temperatures = np.linspace(10., 1600., 101)
alphas = np.empty((101,4))
extensions = np.empty((101,3))
vectors = np.empty((101,4))
labels = ['a', 'b', 'c', 'V']
for i, T in enumerate(temperatures):
m.set_state(1.e5, T)
alphas[i,:3] = np.diag(m.thermal_expansivity_tensor)*1.e5
alphas[i,3] = m.alpha*1.e5 / 3.
extensions[i] = ((np.diag(m.cell_vectors) / np.diag(m.cell_vectors_0)) - 1.)*1.e4
vectors[i,:3] = np.diag(m.cell_vectors)
vectors[i,3] = m.V
for i in range(4):
label = f'$\\alpha_{{{labels[i]}}}$'
if i == 3:
ln =ax[0].plot(temperatures, alphas[:,i], label=label+'/3')
ol_SLB = burnman.minerals.SLB_2011.mg_fe_olivine([0.903, 0.097])
pressures = 1.e5 + 0.*temperatures
ax[0].plot(temperatures,
ol_SLB.evaluate(['alpha'], pressures,
temperatures)[0]*1.e5/3.,
label=label+'/3 (SLB2011)',
linestyle='--', color=ln[0].get_color())
else:
ax[0].plot(temperatures, alphas[:,i], label=label)
for i in range(3):
l = ax[1].plot(temperatures, extensions[:,i], label=labels[i])
ax[1].scatter(ol_1bar_lattice_data_Suzuki[:,0]+273.15,
ol_1bar_lattice_data_Suzuki[:,1+i],
color=l[0].get_color())
Vthird_expansion = 1.e4*(np.power(np.prod(extensions*1.e-4 + 1, axis=1), 1./3.) - 1.)
ln =ax[1].plot(temperatures, Vthird_expansion, label='$V^{1/3}$')
ol_SLB = burnman.minerals.SLB_2011.mg_fe_olivine([0.903, 0.097])
ol_SLB.set_state(1.e5, 300)
V_0 = ol_SLB.V
pressures = 1.e5 + 0.*temperatures
ax[1].plot(temperatures,
1.e4*(np.power(ol_SLB.evaluate(['V'], pressures,
temperatures)[0]/V_0, 1./3.) - 1.),
label='$V^{1/3}$ (SLB2011)',
linestyle='--', color=ln[0].get_color())
Vthird_expansion = 1.e4*(np.power(np.prod(ol_1bar_lattice_data_Suzuki[:,1:4]*1.e-4 + 1, axis=1), 1./3.) - 1.)
ax[1].scatter(ol_1bar_lattice_data_Suzuki[:,0]+273.15,
Vthird_expansion,
color=ln[0].get_color())
ax[0].set_ylim(0.,)
for i in range(2):
ax[i].set_xlim(0.,1600.)
ax[i].set_xlabel('Temperature (K)')
ax[i].legend()
ax[0].set_ylabel('Thermal expansivity (10$^{-5}$/K)')
ax[1].set_ylabel('Relative length change ($10^{4} (x/x_0 - 1)$)')
fig.set_tight_layout(True)
fig.savefig('olivine_expansivities.pdf')
plt.show()
# Start plotting Cij figure
fig = plt.figure(figsize=(12, 12))
ax = [fig.add_subplot(3, 3, i) for i in range(1, 10)]
pressures = np.linspace(1.e7, 30.e9, 101)
G_iso = np.empty_like(pressures)
G_aniso = np.empty_like(pressures)
C = np.empty((len(pressures), 6, 6))
f = np.empty_like(pressures)
dXdf = np.empty_like(pressures)
i_pq = ((1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(1, 2),
(1, 3),
(2, 3))
temperatures = [300., 500., 750., 900.]
for T in temperatures:
for i, P in enumerate(pressures):
m.set_state(P, T)
C[i] = m.isentropic_stiffness_tensor
# TK, PGPa, rho, rhoerr = d[:4]
#C11, C11err = d[4:6]
#C22, C22err = d[6:8]
#C33, C33err = d[8:10]
#C44, C44err = d[10:12]
#C55, C55err = d[12:14]
#C66, C66err = d[14:16]
#C12, C12err = d[16:18]
#C13, C13err = d[18:20]
#C23, C23err = d[20:22]
T_data = np.array([d for d in ol_data if np.abs(d[0] - T) < 1])
for i, (p, q) in enumerate(i_pq):
ln = ax[i].plot(pressures/1.e9, C[:, p-1, q-1]/1.e9, label=f'{T} K')
j = 4 + 2*i
ax[i].scatter(T_data[:,1], T_data[:,j], color=ln[0].get_color())
ax[i].errorbar(T_data[:,1], T_data[:,j], yerr=T_data[:,j+1],
linestyle='None', color=ln[0].get_color())
for i, (p, q) in enumerate(i_pq):
ax[i].set_xlabel('Pressure (GPa)')
ax[i].set_ylabel(f'$C_{{N {p}{q}}}$ (GPa)')
ax[i].legend()
fig.set_tight_layout(True)
fig.savefig('olivine_CNijs.pdf')
plt.show()
fig = plt.figure(figsize=(12, 7))
ax = [fig.add_subplot(2, 3, i, projection='polar') for i in range(1, 7)]
P = 3.e9
T = 1600.
m.set_state(P, T)
plot_types = ['vp', 'vs1', 'vp/vs1',
's anisotropy', 'linear compressibility', 'youngs modulus']
contour_sets, ticks, lines = plot_projected_elastic_properties(m,
plot_types,
ax)
for i in range(len(contour_sets)):
cbar = fig.colorbar(contour_sets[i], ax=ax[i],
ticks=ticks[i], pad = 0.1)
cbar.add_lines(lines[i])
fig.set_tight_layout(True)
fig.savefig(f'olivine_seismic_properties_{P/1.e9:.2f}_GPa_{int(T)}_K.pdf')
plt.show()
|
bobmyhill/burnman
|
contrib/anisotropic_eos/orthorhombic_fitting.py
|
Python
|
gpl-2.0
| 16,690
|
[
"Avogadro"
] |
7ddb77d89bf4f6a951d8e8b6d8af9bd9046b97c08c62eefe1a06042d1b48b076
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
|
psi4/psi4
|
psi4/driver/util/filesystem.py
|
Python
|
lgpl-3.0
| 1,249
|
[
"Psi4"
] |
2b65fb3938b4c8c5cd23960aefebff064a49b0314d2eb715e93f4b12f5f7542a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.