text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import itertools
import discord
from discord.ext import commands
from discord.ext.commands import Context
import cogs
from cogs.utils.chat_formatting import pagify
from cogs.utils import checks
from random import choice
import aiohttp
from __main__ import send_cmd_help
from cogs.economy import SetParser
channel_name_to_id = {
'general' : '291126049268563968',
'botspam' : '340737442367799297',
'cr' : '291128641335853056',
'streamclips': '292351030971334658',
'memes' : '302615325390798849',
'gaschamber' : '340368355754115074',
'botdev' : '340372201578430467'
}
RULES_URL = "https://www.reddit.com/r/CRRedditAlpha/comments/584ba2/reddit_alpha_clan_family_rules/"
ROLES_URL = "https://www.reddit.com/r/CRRedditAlpha/wiki/roles"
DISCORD_URL = "http://discord.gg/racf"
welcome_msg = "Hi {}! Are you in the Reddit Alpha Clan Family (RACF) / " \
"interested in joining our clans / just visiting?"
CHANGECLAN_ROLES = ["Leader", "Co-Leader", "Elder", "High Elder", "Member"]
BS_CHANGECLAN_ROLES = ["Member", "Brawl-Stars"]
DISALLOWED_ROLES = ["SUPERMOD", "MOD", "AlphaBot"]
HEIST_ROLE = "Heist"
RECRUIT_ROLE = "Recruit"
TOGGLE_ROLES = ["Member", "Visitor"]
TOGGLEABLE_ROLES = [
"Heist", "Practice", "Tourney", "Recruit", "CoC",
"Battle-Bay", "RACF-Tourney", "Brawl-Stars", "vc-crew"]
TOGGLE_PERM = {
"Member": [
"Heist", "Practice", "Tourney", "Recruit", "CoC",
"Battle-Bay", "RACF-Tourney", "Brawl-Stars", "vc-crew",
"BSPlay"
],
"Visitor": [
"BSPlay", "Heist", "Recruit"
]
}
MEMBER_DEFAULT_ROLES = ["Member", "Tourney", "Practice"]
CLANS = [
"Alpha", "Bravo", "Charlie", "Delta",
"Echo", "Foxtrot", "Golf", "Hotel"]
BS_CLANS = [
"BS-Alpha", "BS-Bravo", "BS-Charlie"]
BS_CLANS_PREFIX = 'BS-'
BOTCOMMANDER_ROLE = ["Bot Commander"]
HE_BOTCOMMANDER_ROLES = ["Bot Commander", "High-Elder"]
COMPETITIVE_CAPTAIN_ROLES = ["Competitive-Captain", "Bot Commander"]
COMPETITIVE_TEAM_ROLES = [
"CRL", "RPL-NA", "RPL-EU", "RPL-APAC", "MLG",
"ClashWars", "CRL-Elite", "CRL-Legends", "CRL-Rockets"]
KICK5050_MSG = (
"Sorry, but you were 50/50 and we have kicked you from the clan. "
"Please join one of our feeders for now. "
"Our clans are Alpha / Bravo / Charlie / Delta / "
"Echo / Foxtrot / Golf / Hotel with the red rocket emblem. "
"Good luck on the ladder!")
BS_KICK5050_MSG = (
"Sorry, but you were 50/50 and we have "
"kicked you from the Brawl Stars band. "
"Please join one of our feeders for now. "
"Our clans are Alpha / Bravo / Charlie "
"with the red skull emblem. "
"Good luck in your future games!")
VISITOR_RULES = (
"Welcome to the **Reddit Alpha Clan Family** (RACF) Discord server. "
"As a visitor, you agree to follow the following rules: \n"
"\n"
"+ No spamming.\n"
"+ No advertisement of any kind, "
"e.g. Facebook / Twitter / YouTube / Friend Invite Links\n"
"+ Use #bot-commands for bot features, e.g. `!deck` / `!crdata`\n"
"+ Use #casino for bot commands related to casino, "
"e.g. `!payday` / `!slot` / `!heist`\n"
"\n"
"Failure to follow these rules will get you kicked from the server. "
"Repeat offenders will be banned.\n"
"\n"
"If you would like to invite your friends to join this server, "
"you may use this Discord invite: <http://discord.gg/racf> \n"
"\n"
"Thanks + enjoy!")
ELDER_MSG = (
"Congratulations on your recent promotion to Elder! \n"
"\n"
"You have the following responsibilities as elder in the RACF:\n"
"+ Accept new members.\n"
"+ When accepting new members, you should:\n"
".. + Ask if the person is new to the RACF.\n"
".. + Ask that person to join our Discord server: http://discord.gg/racf\n"
".. + Let them know about the 50/50 kicking policy.\n"
"+ Not allowed to kick 50/50.\n"
"\n"
"Please consult !rules and !roles on the RACF server for more info."
)
def grouper(n, iterable, fillvalue=None):
"""Group lists into lists of items.
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
class RACF:
"""Display RACF specifc info.
Note: RACF specific plugin for Red
"""
def __init__(self, bot):
"""Constructor."""
self.bot = bot
# @commands.command(pass_context=True, no_pm=True)
# async def racf(self, ctx: Context):
# """RACF Rules + Roles."""
# server = ctx.message.server
# color = ''.join([choice('0123456789ABCDEF') for x in range(6)])
# color = int(color, 16)
# data = discord.Embed(
# color=discord.Color(value=color),
# title="Rules + Roles",
# description="Important information for all members. Please read.")
# if server.icon_url:
# data.set_author(name=server.name, url=server.icon_url)
# data.set_thumbnail(url=server.icon_url)
# else:
# data.set_author(name=server.name)
# try:
# await self.bot.say(embed=data)
# except discord.HTTPException:
# await self.bot.say(
# "I need the `Embed links` permission to send this.")
# out = []
# out.append("**Rules**")
# out.append("<{}>".format(RULES_URL))
# out.append('')
# out.append("**Roles**")
# out.append("<{}>".format(ROLES_URL))
# out.append('')
# out.append("**Discord invite**")
# out.append("<{}>".format(DISCORD_URL))
# await self.bot.say('\n'.join(out))
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*CHANGECLAN_ROLES)
# async def changeclan(self, ctx, clan: str=None):
# """Update clan role when moved to a new clan.
# Example: !changeclan Delta
# """
# clans = [c.lower() for c in CLANS]
# await self.do_changeclan(ctx, clan, clans)
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*BS_CHANGECLAN_ROLES)
# async def bschangeclan(self, ctx, clan: str=None):
# """Update clan role when moved to a new clan.
# Example: !bschangeclan BS-Delta
# """
# if clan is None:
# await send_cmd_help(ctx)
# return
# if not clan.lower().startswith(BS_CLANS_PREFIX.lower()):
# clan = BS_CLANS_PREFIX + clan
# clans = [c.lower() for c in BS_CLANS]
# await self.do_changeclan(ctx, clan, clans)
# @commands.command(pass_context=True ,no_pm=True)
# @commands.has_any_role(*HE_BOTCOMMANDER_ROLES)
# async def bselder(self, ctx, member: discord.Member):
# """Add bs-elder role for member.
# TMP command for bs leader who’s not a bot comamnder.
# """
# role = discord.utils.get(ctx.message.server.roles, name="BS-Elder")
# await self.bot.add_roles(member, role)
# await self.bot.say(
# "Added {} for {}".format(
# role.name, member.display_name))
# async def do_changeclan(self, ctx, clan: str=None, clans=[]):
# """Perform clan changes."""
# author = ctx.message.author
# server = ctx.message.server
# if clan is None:
# await send_cmd_help(ctx)
# return
# if clan.lower() not in clans:
# await self.bot.say(
# "{} is not a clan you can self-assign.".format(clan))
# return
# clan_roles = [r for r in server.roles if r.name.lower() in clans]
# to_remove_roles = set(author.roles) & set(clan_roles)
# to_add_roles = [
# r for r in server.roles if r.name.lower() == clan.lower()]
# await self.bot.remove_roles(author, *to_remove_roles)
# await self.bot.say("Removed {} for {}".format(
# ",".join([r.name for r in to_remove_roles]),
# author.display_name))
# await self.bot.add_roles(author, *to_add_roles)
# await self.bot.say("Added {} for {}".format(
# ",".join([r.name for r in to_add_roles]),
# author.display_name))
async def changerole(self, ctx, member: discord.Member=None, *roles: str):
"""Change roles of a user.
Uses the changerole command in the MM cog.
"""
mm = self.bot.get_cog("MemberManagement")
if mm is None:
await self.bot.say(
"You must load MemberManagement for this to run.")
return
await ctx.invoke(mm.changerole, member, *roles)
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def addrole(
self, ctx, member: discord.Member=None, *, role_name: str=None):
"""Add role to a user.
Example: !addrole SML Delta
"""
await self.changerole(ctx, member, role_name)
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def removerole(
self, ctx, member: discord.Member=None, *, role_name: str=None):
"""Remove role from a user.
Example: !removerole SML Delta
"""
role_name = '-{}'.format(role_name)
await self.changerole(ctx, member, role_name)
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def multiaddrole(self, ctx, role, *members: discord.Member):
"""Add a role to multiple users.
!multiaddrole rolename User1 User2 User3
"""
for member in members:
await self.changerole(ctx, member, role)
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def multiremoverole(self, ctx, role, *members: discord.Member):
"""Remove a role from multiple users.
!multiremoverole rolename User1 User2 User3
"""
role = '-{}'.format(role)
for member in members:
await self.changerole(ctx, member, role)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(mention_everyone=True)
async def mentionusers(self, ctx, role: str, *msg):
"""Mention users by role.
Example:
!mentionusers Delta Anyone who is 4,300+ please move up to Charlie!
Note: only usable by people with the permission to mention @everyone
"""
server = ctx.message.server
server_roles_names = [r.name for r in server.roles]
if role not in server_roles_names:
await self.bot.say(
"{} is not a valid role on this server.".format(role))
elif not msg:
await self.bot.say("You have not entered any messages.")
else:
out_mentions = []
for m in server.members:
if role in [r.name for r in m.roles]:
out_mentions.append(m.mention)
await self.bot.say("{} {}".format(" ".join(out_mentions),
" ".join(msg)))
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(mention_everyone=True)
async def mentionrole(self, ctx, role_name: str, *, msg):
"""Mention a role with message.
Temporarily make a role mentionable and send a message.
Delete message sending the command so it won’t be a dupe.
"""
server = ctx.message.server
# role = discord.utils.get(server.roles, name=role_name)
# find role regardless of casing
role = None
for r in server.roles:
if r.name.lower() == role_name.lower():
role = r
break
if role is None:
await self.bot.say(
'{} is not a valid role on this server.'.format(
role_name))
return
orig_mentionable = role.mentionable
await self.bot.edit_role(server, role, mentionable=True)
await self.bot.say(
'**{author.mention}** ({author.id}): '
'{role.mention} {message}'.format(
author=ctx.message.author,
role=role,
message=msg))
await self.bot.edit_role(server, role, mentionable=orig_mentionable)
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True, no_pm=True)
async def avatar(self, ctx, member: discord.Member=None):
"""Display avatar of the user."""
author = ctx.message.author
if member is None:
member = author
if(member.nick == None):
name = member.name
else:
name = member.nick
postembed = discord.Embed(title=name, url=member.avatar_url)
postembed.set_image(url=member.avatar_url)
await self.bot.say(embed=postembed)
@commands.command(pass_context=True, no_pm=True)
async def serverinfo2(self, ctx: Context):
"""Show server's informations specific to RACF."""
server = ctx.message.server
online = len([m.status for m in server.members
if m.status == discord.Status.online or
m.status == discord.Status.idle])
total_users = len(server.members)
text_channels = len([x for x in server.channels
if x.type == discord.ChannelType.text])
voice_channels = len(server.channels) - text_channels
passed = (ctx.message.timestamp - server.created_at).days
created_at = ("Since {}. That's over {} days ago!"
"".format(server.created_at.strftime("%d %b %Y %H:%M"),
passed))
role_names = [
"Leader", "Co-Leader", "High Elder", "Elder",
"Member", "Honorary Member", "Visitor"]
role_count = {}
for role_name in role_names:
role_count[role_name] = len(
[m for m in server.members
if role_name in [r.name for r in m.roles]])
colour = ''.join([choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
data = discord.Embed(
description=created_at,
colour=discord.Colour(value=colour))
data.add_field(name="Region", value=str(server.region))
data.add_field(name="Users", value="{}/{}".format(online, total_users))
data.add_field(name="Text Channels", value=text_channels)
data.add_field(name="Voice Channels", value=voice_channels)
data.add_field(name="Roles", value=len(server.roles))
data.add_field(name="Owner", value=str(server.owner))
data.add_field(name="\a", value="\a", inline=False)
for role_name in role_names:
data.add_field(name="{}s".format(role_name),
value=role_count[role_name])
data.set_footer(text="Server ID: " + server.id)
if server.icon_url:
data.set_author(name=server.name, url=server.icon_url)
data.set_thumbnail(url=server.icon_url)
else:
data.set_author(name=server.name)
try:
await self.bot.say(embed=data)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission "
"to send this")
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(administrator=True)
async def member2roles(self, ctx: Context, with_role, new_role):
"""Add role to a list of users with specific roles."""
server = ctx.message.server
with_role = discord.utils.get(server.roles, name=with_role)
new_role = discord.utils.get(server.roles, name=new_role)
if with_role is None:
await self.bot.say('{} is not a valid role'.format(with_role))
return
if new_role is None:
await self.bot.say('{} is not a valid role.'.format(new_role))
return
members = [m for m in server.members if with_role in m.roles]
for member in members:
await self.bot.add_roles(member, new_role)
await self.bot.say("Added {} for {}".format(
new_role, member.display_name))
# @commands.command(pass_context=True, no_pm=True, aliases=["m2v"])
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def member2visitor(self, ctx: Context, *members: discord.Member):
# """Re-assign list of people from members to visitors."""
# server = ctx.message.server
# to_add_roles = [r for r in server.roles if r.name == 'Visitor']
# for member in members:
# to_remove_roles = [
# r for r in member.roles if r.name in MEMBER_DEFAULT_ROLES]
# to_remove_roles.extend([
# r for r in member.roles if r.name in CLANS])
# to_remove_roles.extend([
# r for r in member.roles if r.name in ['eSports']])
# await self.bot.add_roles(member, *to_add_roles)
# await self.bot.say("Added {} for {}".format(
# ", ".join([r.name for r in to_add_roles]), member.display_name))
# await self.bot.remove_roles(member, *to_remove_roles)
# await self.bot.say("Removed {} from {}".format(
# ", ".join([r.name for r in to_remove_roles]), member.display_name))
# @commands.command(pass_context=True, no_pm=True, aliases=["v2m"])
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def visitor2member(
# self, ctx: Context, member: discord.Member, *roles):
# """Assign visitor to member and add clan name."""
# server = ctx.message.server
# roles_param = MEMBER_DEFAULT_ROLES.copy()
# roles_param.extend(roles)
# roles_param.append("-Visitor")
# channel = discord.utils.get(
# ctx.message.server.channels, name="family-chat")
# # print(roles_param)
# await self.changerole(ctx, member, *roles_param)
# if channel is not None:
# await self.bot.say(
# "{} Welcome! Main family chat at {} — enjoy!".format(
# member.mention, channel.mention))
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def dmusers(self, ctx: Context, msg: str=None,
*members: discord.Member):
"""Send a DM to a list of people.
Example
!dmusers "Have a nice day" @Dino @AbePlaysGame AwesomeAim
"""
if msg is None:
await self.bot.say("Please include a message.")
elif not len(members):
await self.bot.say("You must include at least one member.")
else:
data = discord.Embed(description=msg)
data.set_author(
name=ctx.message.author,
icon_url=ctx.message.author.avatar_url)
data.set_footer(text=ctx.message.server.name)
# data.add_field(
# name="How to reply",
# value="DM or tag {0.mention} if you want to reply.".format(
# ctx.message.author))
for m in members:
try:
await self.bot.send_message(m, embed=data)
await self.bot.say(
"Message sent to {}".format(m.display_name))
except discord.errors.Forbidden:
await self.bot.say(
"{} does not accept DMs from me.".format(
m.display_name))
raise
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions()
async def dmusersa(self, ctx: Context, msg: str=None,
*members: discord.Member):
"""Send a DM anonymously to a list of people.
Example
!dmusers "Have a nice day" @Dino @AbePlaysGame AwesomeAim
"""
if msg is None:
await self.bot.say("Please include a message.")
elif not len(members):
await self.bot.say("You must include at least one member.")
else:
data = discord.Embed(description=msg)
data.set_author(
name=ctx.message.author,
icon_url=ctx.message.author.avatar_url)
data.set_footer(text=ctx.message.server.name)
# data.add_field(
# name="How to reply",
# value="DM or tag {0.mention} if you want to reply.".format(
# ctx.message.author))
for m in members:
try:
await self.bot.send_message(m, embed=data)
await self.bot.say(
"Message sent to {}".format(m.display_name))
except discord.errors.Forbidden:
await self.bot.say(
"{} does not accept DMs from me.".format(
m.display_name))
raise
@commands.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLE)
async def changenick(
self, ctx: Context, member: discord.Member, *, nickname: str):
"""Change the nickname of a member.
Example
!changenick SML "New Nick"
!changenick @SML "New Nick"
"""
# await self.bot.change_nickname(member, nickname)
if(member.nick == None):
prevnick = member.name
else:
prevnick = member.nick
try:
await self.bot.change_nickname(member, nickname)
except discord.HTTPException:
await self.bot.say(
"I don’t have permission to do this.")
else:
await self.bot.say("{member.mention} changed from {prevnick} to {nickname}.")
@commands.command(pass_context=True, no_pm=True)
async def emojis(self, ctx: Context, embed=False):
"""Show all emojis available on server."""
server = ctx.message.server
if embed:
emoji_list = [emoji for emoji in server.emojis if not emoji.managed]
emoji_lists = grouper(25, emoji_list)
for emoji_list in emoji_lists:
em = discord.Embed()
for emoji in emoji_list:
if emoji is not None:
em.add_field(
name=str(emoji), value="`:{}:`".format(emoji.name))
await self.bot.say(embed=em)
else:
out = []
for emoji in server.emojis:
# only include in list if not managed by Twitch
if not emoji.managed:
emoji_str = str(emoji)
out.append("{} `:{}:`".format(emoji_str, emoji.name))
for page in pagify("\n".join(out), shorten_by=12):
await self.bot.say(page)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions()
async def bankset(
self, ctx: Context, user: discord.Member, credits: SetParser):
"""Work around to allow MODs to set bank."""
econ = self.bot.get_cog("Economy")
await ctx.invoke(econ._set, user, credits)
@commands.group(pass_context=True, no_pm=True)
@checks.mod_or_permissions()
async def removereaction(self, ctx: Context):
"""Remove reactions from messages."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@removereaction.command(name="messages", pass_context=True, no_pm=True)
async def removereaction_messages(self, ctx: Context, number: int):
"""Remove reactions from last X messages."""
channel = ctx.message.channel
author = ctx.message.author
server = author.server
has_permissions = channel.permissions_for(server.me).manage_messages
to_manage = []
if not has_permissions:
await self.bot.say("I’m not allowed to remove reactions.")
return
async for message in self.bot.logs_from(channel, limit=number + 1):
to_manage.append(message)
await self.remove_reactions(to_manage)
async def remove_reactions(self, messages):
"""Remove reactions."""
for message in messages:
await self.bot.clear_reactions(message)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions()
async def addreaction(self, ctx, *args):
"""Add reactions to a message by message id.
Add reactions to a specific message id
[p]addreation 123456 :white_check_mark: :x: :zzz:
Add reactions to the last message in channel
[p]addreation :white_check_mark: :x: :zzz:
"""
channel = ctx.message.channel
if not len(args):
await send_cmd_help(ctx)
return
has_message_id = args[0].isdigit()
emojis = args[1:] if has_message_id else args
message_id = args[0] if has_message_id else None
if has_message_id:
try:
message = await self.bot.get_message(channel, message_id)
except discord.NotFound:
await self.bot.say("Cannot find message with that id.")
return
else:
# use the 2nd last message because the last message would be the command
messages = [m async for m in self.bot.logs_from(channel, limit=2)]
message = messages[1]
for emoji in emojis:
try:
await self.bot.add_reaction(message, emoji)
except discord.HTTPException:
# reaction add failed
pass
except discord.Forbidden:
await self.bot.say(
"I don’t have permission to react to that message.")
break
except discord.InvalidArgument:
await self.bot.say("Invalid arguments for emojis")
break
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions()
async def addreaction2(self, ctx, *args):
"""Add reactions to a message by message id.
Add reactions to a specific message id
[p]addreation 123456 :white_check_mark: :x: :zzz:
Add reactions to the last message in channel
[p]addreation :white_check_mark: :x: :zzz:
"""
channel = ctx.message.channel
if not len(args):
await send_cmd_help(ctx)
return
has_message_id = args[0].isdigit()
emojis = args[1:] if has_message_id else args
message_id = args[0] if has_message_id else None
if has_message_id:
try:
message = await self.bot.get_message(channel, message_id)
except discord.NotFound:
await self.bot.say("Cannot find message with that id.")
return
else:
# use the 2nd last message because the last message would be the command
messages = [m async for m in self.bot.logs_from(channel, limit=2)]
message = messages[1]
await self.bot.say(message.id)
await self.bot.say(emojis)
for emoji in emojis:
try:
await self.bot.add_reaction(message, emoji)
except discord.HTTPException:
# reaction add failed
pass
except discord.Forbidden:
await self.bot.say(
"I don’t have permission to react to that message.")
break
except discord.InvalidArgument:
await self.bot.say("Invalid arguments for emojis")
break
await self.bot.delete_message(ctx.message)
# @commands.command(pass_context=True, no_pm=True)
# async def toggleheist(self, ctx: Context):
# """Self-toggle heist role."""
# author = ctx.message.author
# server = ctx.message.server
# heist_role = discord.utils.get(
# server.roles, name=HEIST_ROLE)
# if heist_role in author.roles:
# await self.bot.remove_roles(author, heist_role)
# await self.bot.say(
# "Removed {} role from {}.".format(
# HEIST_ROLE, author.display_name))
# else:
# await self.bot.add_roles(author, heist_role)
# await self.bot.say(
# "Added {} role for {}.".format(
# HEIST_ROLE, author.display_name))
# @commands.command(pass_context=True, no_pm=True)
# async def togglerecruit(self, ctx: Context):
# """Self-toggle heist role."""
# author = ctx.message.author
# server = ctx.message.server
# role = discord.utils.get(
# server.roles, name=RECRUIT_ROLE)
# if role in author.roles:
# await self.bot.remove_roles(author, role)
# await self.bot.say(
# "Removed {} role from {}.".format(
# RECRUIT_ROLE, author.display_name))
# else:
# await self.bot.add_roles(author, role)
# await self.bot.say(
# "Added {} role for {}.".format(
# RECRUIT_ROLE, author.display_name))
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*TOGGLE_ROLES)
# async def togglerole(self, ctx: Context, role_name):
# """Self-toggle role assignments."""
# author = ctx.message.author
# server = ctx.message.server
# # toggleable_roles = [r.lower() for r in TOGGLEABLE_ROLES]
# member_role = discord.utils.get(server.roles, name="Member")
# is_member = member_role in author.roles
# if is_member:
# toggleable_roles = TOGGLE_PERM["Member"]
# else:
# toggleable_roles = TOGGLE_PERM["Visitor"]
# toggleable_roles = sorted(toggleable_roles)
# toggleable_roles_lower = [r.lower() for r in toggleable_roles]
# if role_name.lower() in toggleable_roles_lower:
# role = [
# r for r in server.roles
# if r.name.lower() == role_name.lower()]
# if len(role):
# role = role[0]
# if role in author.roles:
# await self.bot.remove_roles(author, role)
# await self.bot.say(
# "Removed {} role from {}.".format(
# role.name, author.display_name))
# else:
# await self.bot.add_roles(author, role)
# await self.bot.say(
# "Added {} role for {}.".format(
# role_name, author.display_name))
# else:
# await self.bot.say(
# "{} is not a valid role on this server.".format(role_name))
# else:
# out = []
# out.append(
# "{} is not a toggleable role for you.".format(role_name))
# out.append(
# "Toggleable roles for you: {}.".format(
# ", ".join(toggleable_roles)))
# await self.bot.say("\n".join(out))
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*COMPETITIVE_CAPTAIN_ROLES)
# async def teamadd(self, ctx, member: discord.Member, role):
# """Add competitive team member roles."""
# server = ctx.message.server
# competitive_team_roles = [r.lower() for r in COMPETITIVE_TEAM_ROLES]
# if role.lower() not in competitive_team_roles:
# await self.bot.say(
# "{} is not a competitive team role.".format(role))
# return
# if role.lower() not in [r.name.lower() for r in server.roles]:
# await self.bot.say("{} is not a role on this server.".format(role))
# return
# roles = [r for r in server.roles if r.name.lower() == role.lower()]
# await self.bot.add_roles(member, *roles)
# await self.bot.say("Added {} for {}".format(role, member.display_name))
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*COMPETITIVE_CAPTAIN_ROLES)
# async def teamremove(self, ctx, member: discord.Member, role):
# """Remove competitive team member roles."""
# server = ctx.message.server
# competitive_team_roles = [r.lower() for r in COMPETITIVE_TEAM_ROLES]
# if role.lower() not in competitive_team_roles:
# await self.bot.say(
# "{} is not a competitive team role.".format(role))
# return
# if role.lower() not in [r.name.lower() for r in server.roles]:
# await self.bot.say("{} is not a role on this server.".format(role))
# return
# roles = [r for r in server.roles if r.name.lower() == role.lower()]
# await self.bot.remove_roles(member, *roles)
# await self.bot.say(
# "Removed {} from {}".format(role, member.display_name))
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*COMPETITIVE_CAPTAIN_ROLES)
# async def teamlist(self, ctx, role_name):
# """List team members with specific competitive roles.
# Default CSV output.
# """
# server = ctx.message.server
# competitive_team_roles = [r.lower() for r in COMPETITIVE_TEAM_ROLES]
# if role_name.lower() not in competitive_team_roles:
# await self.bot.say(
# "{} is not a competitive team role.".format(role_name))
# return
# role = discord.utils.get(server.roles, name=role_name)
# if role is None:
# await self.bot.say(
# '{} is not a valid role on this server.'.format(role_name))
# return
# members = [m for m in server.members if role in m.roles]
# members = sorted(members, key=lambda x: x.display_name)
# out = ', '.join([m.display_name for m in members])
# await self.bot.say(
# 'List of members with {}:\n'
# '{}'.format(role_name, out))
# @commands.command(pass_context=True, no_pm=True, aliases=["k5"])
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def kick5050(self, ctx, member: discord.Member):
# """Notify member that they were kicked for lower trophies.
# Remove clan tags in the process.
# """
# await ctx.invoke(self.dmusers, KICK5050_MSG, member)
# member_clan = [
# '-{}'.format(r.name) for r in member.roles if r.name in CLANS]
# if len(member_clan):
# await self.changerole(ctx, member, *member_clan)
# else:
# await self.bot.say("Member has no clan roles to remove.")
# @commands.command(pass_context=True, no_pm=True, aliases=["bsk5", "bk5"])
# @commands.has_any_role(*HE_BOTCOMMANDER_ROLES)
# async def bskick5050(self, ctx, member: discord.Member):
# """Notify member that they were kicked for lower trophies.
# Remove clan tags in the process.
# """
# await ctx.invoke(self.dmusers, BS_KICK5050_MSG, member)
# member_clan = [
# '-{}'.format(r.name) for r in member.roles if r.name in BS_CLANS]
# if len(member_clan):
# await self.changerole(ctx, member, *member_clan)
# else:
# await self.bot.say("Member has no clan roles to remove.")
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def recruit(self, ctx, member: discord.Member):
# """Assign member with recruit roles and give them info.
# Command detects origin:
# If command is invoked from default channel, add Visitor role.
# If command in invoked from other channels, only add Recruit role.
# """
# recruit_roles = ["Recruit"]
# add_visitor_role = False
# if ctx.message.channel.is_default:
# recruit_roles.append("Visitor")
# add_visitor_role = True
# await self.changerole(ctx, member, *recruit_roles)
# channel = discord.utils.get(
# ctx.message.server.channels, name="esports-recruiting")
# if channel is not None:
# await self.bot.say(
# "{} Please see pinned messages "
# "in {} for eSports information.".format(
# member.mention, channel.mention))
# if add_visitor_role:
# visitor_channel = discord.utils.get(
# ctx.message.server.channels, name="visitors")
# if visitor_channel is not None:
# await self.bot.say(
# "{} You can now chat in {} — enjoy!".format(
# member.mention, visitor_channel.mention))
# await ctx.invoke(self.visitorrules, member)
# @commands.command(pass_context=True, no_pm=True)
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def visitor(self, ctx, member: discord.Member):
# """Assign member with visitor roles and give them info."""
# visitor_roles = ["Visitor"]
# channel = discord.utils.get(
# ctx.message.server.channels, name="visitors")
# await self.changerole(ctx, member, *visitor_roles)
# if channel is not None:
# await self.bot.say(
# "{} You can now chat in {} — enjoy!".format(
# member.mention, channel.mention))
# await ctx.invoke(self.visitorrules, member)
# @commands.command(pass_context=True, no_pm=True, aliases=['bs'])
# @commands.has_any_role(*HE_BOTCOMMANDER_ROLES)
# async def brawlstars(self, ctx, member: discord.Member, *roles):
# """Assign member with visitor and brawl-stars roles."""
# bs_roles = ["Brawl-Stars"]
# if discord.utils.get(member.roles, name="Member") is None:
# if discord.utils.get(member.roles, name="Guest") is None:
# if discord.utils.get(member.roles, name="Visitor") is None:
# bs_roles.append("Visitor")
# channel = discord.utils.get(
# ctx.message.server.channels, name="brawl-stars")
# await self.changerole(ctx, member, *bs_roles)
# if channel is not None:
# await self.bot.say(
# "{} You can now chat in {} — enjoy!".format(
# member.mention, channel.mention))
# if "Visitor" in bs_roles:
# await ctx.invoke(self.visitorrules, member)
# # Add additional roles if present
# if len(roles):
# await self.changerole(ctx, member, *roles)
# @commands.command(pass_context=True, no_pm=True, aliases=['vrules', 'vr'])
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# async def visitorrules(self, ctx, *members: discord.Member):
# """DM server rules to user."""
# try:
# await ctx.invoke(self.dmusers, VISITOR_RULES, *members)
# await self.bot.say(
# "A list of rules has been sent via DM to {}.".format(
# ", ".join([m.display_name for m in members])))
# except discord.errors.Forbidden:
# await self.bot.say(
# '{} {}'.format(
# " ".join([m.mention for m in members]),
# VISITOR_RULES))
# @commands.command(pass_context=True, no_pm=True)
# async def pay(self, ctx, amt, *members: discord.Member):
# """Pay amount to member(s).
# If more than one person is specificed, equally divide the credits.
# """
# bank = self.bot.get_cog('Economy').bank
# amt = int(amt)
# split_amt = int(amt / (len(members)))
# for member in members:
# if member != ctx.message.author:
# try:
# bank.transfer_credits(
# ctx.message.author, member, split_amt)
# except cogs.economy.NoAccount:
# await self.bot.say(
# "{} has no account.".format(member.display_name))
# split_msg = ""
# if len(members) > 1:
# split_msg = ' ({} credits each)'.format(split_amt)
# await self.bot.say(
# "{} has transfered {} credits{} to {}.".format(
# ctx.message.author.display_name,
# amt,
# split_msg,
# ", ".join([m.display_name for m in members])))
# @commands.command(pass_context=True, no_pm=True)
# async def skill(self, ctx, pb, *cardlevels):
# """Calculate skill level based on card levels.
# !skills 5216 c12 c12 r10 r9 e5 e4 l2 l1
# c = commons
# r = rares
# e = epics
# l = legendaries
# """
# if not pb.isdigit():
# await self.bot.say("PB (Personal Best) must be a number.")
# await send_cmd_help(ctx)
# return
# if len(cardlevels) != 8:
# await self.bot.say("You must enter exactly 8 cards.")
# await send_cmd_help(ctx)
# return
# rarities = {
# 'c': 0,
# 'r': 2,
# 'e': 5,
# 'l': 8
# }
# rarity_names = {
# 'c': 'Common',
# 'r': 'Rare',
# 'e': 'Epic',
# 'l': 'Legendary'
# }
# cards = [{'r': cl[0], 'l': int(cl[1:])} for cl in cardlevels]
# common_levels = []
# for card in cards:
# rarity = card['r']
# level = int(card['l'])
# if rarity not in rarities:
# await self.bot.say('{} is not a valid rarity.'.format(rarity))
# return
# common_level = level + rarities[rarity]
# common_levels.append(common_level)
# pb = int(pb)
# skill = pb / sum(common_levels) * 8
# out = []
# out.append('You have entered:')
# out.append(
# ', '.join(
# ['{} ({})'.format(
# rarity_names[card['r']], card['l']) for card in cards]))
# out.append(
# 'With a PB of {}, your skill level is {}.'.format(pb, skill))
# await self.bot.say('\n'.join(out))
@commands.command(pass_context=True, no_pm=True)
async def test(self, ctx):
"""Test."""
await self.bot.say("test")
@commands.has_any_role(*BOTCOMMANDER_ROLE)
@commands.command(pass_context=True, no_pm=True)
async def iosfix(self, ctx: Context, *members: discord.Member):
"""Quick fix to iOS bug.
Remove all roles from members and then re-add them."""
await self.bot.say("iOS Fix")
await self.run_iosfix(ctx, *members)
@commands.command(pass_context=True, no_pm=True)
async def iosfixme(self, ctx: Context):
"""Self-Quick fix to iOS bug."""
await self.bot.say("iOS Fix me")
await self.run_iosfix(ctx, ctx.message.author)
@checks.admin_or_permissions()
@commands.command(pass_context=True, no_pm=True)
async def say(self, ctx, *, msg):
"""Have bot say stuff. Remove command after run."""
message = ctx.message
await self.bot.delete_message(message)
await self.bot.say(msg)
# @checks.admin_or_permissions()
# @commands.command(pass_context=True)
# async def saygeneral(self, ctx, *, msg):
# """Have bot say stuff. Remove command after run."""
# message = ctx.message
# server2 = ctx.message.server
# server = self.bot.get_server('264119826069454849') #dino's test server
# abeserver = self.bot.get_server('291126049268563968')
# general_channel = abeserver.get_channel('291126049268563968')
# #abe's server's general channel
# # await self.bot.say(general_channel)
# await self.bot.delete_message(message)
# await self.bot.send_message(general_channel, msg)
# @checks.admin_or_permissions()
# @commands.command(pass_context=True)
# async def saybotspam(self, ctx, *, msg):
# """Have bot say stuff. Remove command after run."""
# message = ctx.message
# server2 = ctx.message.server
# server = self.bot.get_server('264119826069454849') #dino's test server
# abeserver = self.bot.get_server('291126049268563968')
# general_channel = abeserver.get_channel('340737442367799297')
# #abe's server's botspam channel
# # await self.bot.say(general_channel)
# await self.bot.delete_message(message)
# await self.bot.send_message(general_channel, msg)
@checks.admin_or_permissions()
@commands.command(pass_context=True)
async def saychan(self, ctx, channel, *, msg):
"""Have bot say stuff. Remove command after run."""
try:
channel_id = channel_name_to_id[channel]
except:
await self.bot.say("invalid channel, channels are:")
y = ''
for x in channel_name_to_id:
y = y + x +', '
y = y[:-2]
await self.bot.say(y)
return
message = ctx.message
server2 = ctx.message.server
server = self.bot.get_server('264119826069454849') #dino's test server
abeserver = self.bot.get_server('291126049268563968')
general_channel = abeserver.get_channel(channel_id)
#abe's server's general channel
# await self.bot.say(general_channel)
await self.bot.delete_message(message)
await self.bot.send_message(general_channel, msg)
# @commands.command(pass_context=True, no_pm=False)
# async def crsettag(self, ctx, tag, member: discord.Member=None):
# """Set CR tags for members.
# This is the equivalent of running:
# !crclan settag [tag] [member]
# !crprofile settag [tag] [member]
# If those cogs are not loaded, it will just ignore it.
# """
# crclan = self.bot.get_cog("CRClan")
# crprofile = self.bot.get_cog("CRProfile")
# if crclan is not None:
# await ctx.invoke(crclan.crclan_settag, tag, member)
# if crprofile is not None:
# await ctx.invoke(crprofile.crprofile_settag, tag, member)
# @commands.has_any_role(*BOTCOMMANDER_ROLE)
# @commands.command(pass_context=True, no_pm=True)
# async def elder(self, ctx, member: discord.Member):
# """Elder promotion DM + role change."""
# elder_roles = ["Elder"]
# await self.changerole(ctx, member, *elder_roles)
# try:
# await ctx.invoke(self.dmusers, ELDER_MSG, member)
# except discord.errors.Forbidden:
# await self.bot.say(
# "Unable to send DM to {}. User might have a stricter DM setting.".format(member))
async def run_iosfix(self, ctx: Context, *members: discord.Member):
"""Actual fix to allow members without the bot commander to run on themselves."""
for member in members:
roles = member.roles.copy()
for role in roles:
if not role.is_everyone:
try:
await self.bot.remove_roles(member, role)
await self.bot.add_roles(member, role)
await self.bot.say(
"Removed and re-added {} to {}.".format(
role, member))
except discord.errors.Forbidden:
await self.bot.say(
"I am not allowed to remove {} from {}.".format(
role, member))
def setup(bot):
r = RACF(bot)
bot.add_cog(r)
|
Dino0631/RedRain-Bot
|
cogs/notinusecogs/misc2.py
|
Python
|
gpl-3.0
| 49,471
|
[
"CASINO"
] |
958176997d96c61427a3d33d102c810b684a546af77cb56c8f2599a0d7206cdc
|
## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.3.137 (20150514) ##
## File: main.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from itertools import repeat, chain
from math import sqrt, radians
# Import blender modules
from mathutils import Matrix, Euler, Quaternion
# Import linmath modules
from linmath import Vec3, Mat4x4
# Import user modules
from history import History
from utils import (name_of_vertex,
index_of_vertex)
from surface import (VertexLocked,
VertexAlreadySelected)
from app import (Application,
EscapeApplication,
RestartApplication,
MOUNTED_ON_DESK,
MOUNTED_ON_HEAD)
# Import global level constants
from const import (APP_ESCAPED,
COLOR_ROTATE_PINCH_BASE,
COLOR_ROTATE_PINCH_OKAY,
COLOR_GRAB_MOVE_OKAY,
COLOR_GRAB_PINCH_BASE,
COLOR_GRAB_PINCH_FAIL,
COLOR_GRAB_PINCH_OKAY,
COLOR_GEOMETRY_BASE,
COLOR_GEOMETRY_DARK,
COLOR_GEOMETRY_LITE,
COLOR_LOCKED,
COLOR_UNLOCKED,
COMM_IS_PAIRED,
COMM_IS_MASTER,
COMM_RESTART)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Module level constants
PICK_HOLD_DISTANCE = 3.5
PICK_RELEASE_DISTANCE = 2.5
#GRAB_HOLD_DISTANCE = 3.5
GRAB_RELEASE_DISTANCE = 3.5
SWIPE_DISTANCE = 135
SWIPE_DEVIANCE = 20
ZOOM_SCALE_FACTOR = 0.1
ROTATE_SCALE_FACTOR = 0.1
# Helper functions
#------------------------------------------------------------------------------#
def distance(position1, position2):
return sqrt(pow(position2[0] - position1[0], 2) +
pow(position2[1] - position1[1], 2) +
pow(position2[2] - position1[2], 2))
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def midpoint(position1, position2):
return ((position1[0] + position2[0])/2,
(position1[1] + position2[1])/2,
(position1[2] + position2[2])/2)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def rotation_matrix_from_vectors(direction, target_direction):
v = target_direction.cross_product(direction)
skew = Mat4x4(( 0.0, -v.z, v.y, 0.0),
( v.z, 0.0, -v.x, 0.0),
(-v.y, v.x, 0.0, 0.0),
( 0.0, 0.0, 0.0, 0.0))
try:
return (Mat4x4.identity() + skew +
(skew*skew)*((1 - direction*target_direction)/v.length**2))
except ZeroDivisionError:
return Mat4x4.identity()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def rotation_quaternion_from_vectors(direction, target_direction):
return Quaternion(target_direction.cross_product(direction),
sqrt((direction.length**2)*(target_direction.length**2)) +
direction*target_direction)
#------------------------------------------------------------------------------#
class KibuVR(Application):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, *args, **kwargs):
super().__init__(MOUNTED_ON_DESK, *args, **kwargs)
# Set escape handler
self.append_callback('exit', self.on_exit)
self.append_callback('reset', self.on_reset)
# Set communication
if COMM_IS_PAIRED:
self.append_callback('comm', self.on_communication)
if not COMM_IS_MASTER:
self.vertex_origo.applyRotation((0, 0, radians(180)))
self.surface.update()
# Create undo stack
# self._action = None
@History.event
def history_is_empty(direction, prefix):
self.text.write('{PREFIX}History is empty'.format(PREFIX=prefix))
self._history = History(history_is_empty)
# Set initial states
self._is_picked = False
self._is_grabbed = False
self._is_dual_grabbed = False
self._grab_position = None
self._grab_start = None
self._dual_grab_vector = None
self._dual_grab_length = None
self._zoomed_pick_distance = PICK_HOLD_DISTANCE
# Set callback-states which will be used
# duyring the execution of the callbacks
self.hands.left.set_states(grabbed=False)
self.hands.right.set_states(grabbed=False)
# Set actual callbacks
self.hands.append_callback('grab', self.on_grab)
self.hands.left.append_callback('pick', self.on_pick)
self.hands.right.append_callback('pick', self.on_pick)
self.hands.left.append_callback('swipe_left_right', self.on_swipe_left_right)
self.hands.right.append_callback('swipe_left_right', self.on_swipe_left_right)
self.hands.left.append_callback('swipe_up_down', self.on_swipe_up_down)
self.hands.right.append_callback('swipe_up_down', self.on_swipe_up_down)
self.hands.left.append_callback('swipe_front_back', self.on_swipe_front_back)
self.hands.right.append_callback('swipe_front_back', self.on_swipe_front_back)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_exit(self, states):
if states['escape'] == APP_ESCAPED:
raise EscapeApplication
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_reset(self, states):
if states['restart'] == COMM_RESTART:
raise RestartApplication
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_communication(self, states):
# Local reference
surface = self.surface
# Prepare and send data
data = []
for identifier, vertex in surface.selected():
vertex_position = vertex.localPosition
data.append((index_of_vertex(vertex.name),
vertex_position[0],
vertex_position[1],
vertex_position[2]))
# Receive data and act based on it
for vertex in surface.unlock_all():
vertex.color = COLOR_UNLOCKED
try:
received_data = self._connection.transfer(data)
for i, x, y, z in received_data:
vertex_name = name_of_vertex(i)
surface[vertex_name].localPosition = x, y, z
surface.lock(vertex_name).color = COLOR_LOCKED
surface.update()
# If 'NoneType|int' object is not iterable
except TypeError:
if received_data == COMM_RESTART:
raise RestartApplication
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_swipe_left_right(self, states):
if states['grabbed']:
return
# Get current position of hand
position = states['leap_hand'].palm_position
x1, y1, z1 = position[0], position[1], position[2]
# If this is not the first cycle of a swipe-measurement
try:
# Get stored values
x0, y0, z0 = states['swipe_left_right_start']
last_x = states['swipe_left_right_last_x']
# Get deltas between start and current
# and previous and current
dx0 = x0 - x1
dx1 = last_x - x1
# If the next move is "violating" the deviance or
# hand is not moving to the same direction
if (abs(y0 - y1) > SWIPE_DEVIANCE or
abs(z0 - z1) > SWIPE_DEVIANCE or
((dx0 > 0 and dx1 <= 0) or
(dx0 <= 0 and dx1 > 0))):
raise KeyError
# If this is the end of a swipe
if (abs(dx0) >= SWIPE_DISTANCE):
# Moved left
if dx0 > 0:
self._history.undo()
# Moved right
else:
self._history.redo()
raise KeyError
# If this is the first cycle of a swipe-measurement
except KeyError:
# Start a new swipe-measuring cycle
states['swipe_left_right_start'] = x1, y1, z1
states['swipe_left_right_last_x'] = x1
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_swipe_front_back(self, states):
if states['grabbed']:
return
# Get current position of hand
position = states['leap_hand'].palm_position
x1, y1, z1 = position[0], position[1], position[2]
# If this is not the first cycle of a swipe-measurement
try:
# Get stored values
x0, y0, z0 = states['swipe_front_back_start']
last_z = states['swipe_front_back_last_z']
# Get deltas between start and current
# and previous and current
dz0 = z0 - z1
dz1 = last_z - z1
# If the next move is "violating" the deviance or
# hand is not moving to the same direction
if (abs(x0 - x1) > SWIPE_DEVIANCE or
abs(y0 - y1) > SWIPE_DEVIANCE or
((dz0 > 0 and dz1 <= 0) or
(dz0 <= 0 and dz1 > 0))):
raise KeyError
# If this is the end of a swipe
if (abs(dz0) >= SWIPE_DISTANCE):
# Moved forward
if dz0 > 0:
print('[MOVE] forward')
# Moved backward
else:
#pritn('[MOVE] backward')
self.text.clear()
self.text.write('Cleared messages')
raise KeyError
# If this is the first cycle of a swipe-measurement
except KeyError:
# Start a new swipe-measuring cycle
states['swipe_front_back_start'] = x1, y1, z1
states['swipe_front_back_last_z'] = z1
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_swipe_up_down(self, states):
if states['grabbed']:
return
# Get current position of hand
position = states['leap_hand'].palm_position
x1, y1, z1 = position[0], position[1], position[2]
# If this is not the first cycle of a swipe-measurement
try:
# Get stored values
x0, y0, z0 = states['swipe_up_down_start']
last_y = states['swipe_up_down_last_y']
# Get deltas between start and current
# and previous and current
dy0 = y0 - y1
dy1 = last_y - y1
# If the next move is "violating" the deviance or
# hand is not moving to the same direction
if (abs(x0 - x1) > SWIPE_DEVIANCE or
abs(z0 - z1) > SWIPE_DEVIANCE or
((dy0 > 0 and dy1 <= 0) or
(dy0 <= 0 and dy1 > 0))):
raise KeyError
# If this is the end of a swipe
if (abs(dy0) >= SWIPE_DISTANCE):
# Moved down
if dy0 > 0:
print('[MOVE] down')
# Moved up
else:
surface = self.surface
vertices = set()
for vertex in self.surface.deselect_all():
vertex.color = COLOR_GEOMETRY_DARK
vertices.add(vertex)
self.text.write('Deselect all vertices')
@History.event
def select_vertices(direction, prefix):
for vertex in vertices:
try:
surface.select(vertex.name)
vertex.color = COLOR_GEOMETRY_LITE
except (VertexLocked, VertexAlreadySelected):
pass
self.text.write('{PREFIX}Select vertices'.format(PREFIX=prefix))
@History.event
def deselect_vertices(direction, prefix):
for vertex in vertices:
try:
surface.deselect(vertex.name)
vertex.color = COLOR_GEOMETRY_DARK
except VertexLocked:
pass
self.text.write('{PREFIX}Deselect vertices'.format(PREFIX=prefix))
# Save events
self._history.push(undo=select_vertices,
redo=deselect_vertices)
#print('[MOVE] up')
raise KeyError
# If this is the first cycle of a swipe-measurement
except KeyError:
# Start a new swipe-measuring cycle
states['swipe_up_down_start'] = x1, y1, z1
states['swipe_up_down_last_y'] = y1
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_grab(self, states):
grabbing = []
# Check both hands
for hand in self.hands:
thumb_position = hand.thumb.position
# Check index and middle fingers as well
for finger in (hand.index,
hand.middle):
# If thumb's and the finger's distance
# is beyond the grabbing-release-range
if not distance(thumb_position,
finger.position) < GRAB_RELEASE_DISTANCE:
# Set state and stop checking the other finger
hand.set_states(grabbed=False)
break
# If both fingers are in the range of grabbing-release-range
else:
# Set state, and collect hand in the grabbing-hands list
grabbing.append(hand)
hand.set_states(grabbed=True)
# If both hands are grabbing
try:
# Get hands separately
left_hand, right_hand = grabbing
# Color thumbs and hide other fingers, so it won't confuse the user
left_hand.thumb.color = right_hand.thumb.color = COLOR_ROTATE_PINCH_OKAY
left_hand.hide_all('thumb')
right_hand.hide_all('thumb')
# Get the thumb positionS
ltp = left_hand.thumb.position
rtp = right_hand.thumb.position
# Get essentaial informations about the current state
curr_grab_vector = Vec3.from_line(ltp[0], ltp[1], ltp[2],
rtp[0], rtp[1], rtp[2])
curr_grab_length = curr_grab_vector.length
curr_grab_vector = curr_grab_vector.normalize()
# If this grab is part of a previous grab-cycle
try:
rotation = Matrix(tuple(rotation_matrix_from_vectors(self._dual_grab_vector,
curr_grab_vector))).to_euler()
rotation = -rotation[0], -rotation[1], -rotation[2]
# Rotate parent object of all vertices
#self.vertex_origo.applyRotation(rotation)
self._armature_control.applyRotation(rotation)
self._armature.applyRotation(rotation)
#self._geometry.applyRotation(rotation)
# Scale the parent object
try:
scale = 1/(self._dual_grab_length/curr_grab_length)
self._zoomed_pick_distance *= scale
self.vertex_origo.worldScale = \
[old*new for old, new in zip(self.vertex_origo.worldScale, repeat(scale))]
except ZeroDivisionError:
pass
# Update geometry
self.surface.update()
# If this grab is a new grab-cycle
except TypeError:
pass
# Store current values as previous ones for the next cycle
self._dual_grab_vector = curr_grab_vector
self._dual_grab_length = curr_grab_length
self._is_dual_grabbed = True
except ValueError:
# If only one hand is grabbing
try:
hand = grabbing[0]
curr = tuple(hand.thumb.position)
prev = self._grab_position
hand.hide_all('thumb')
hand.thumb.color = COLOR_GRAB_MOVE_OKAY
# If this is a mistaken single grab (one hand released accidentaly)
if self._is_dual_grabbed:
return
# If this is the first cycle of a single grab
if not self._is_grabbed:
self._is_grabbed = True
self._grab_start = {id: tuple(v.localPosition) for id, v in self.surface.selected()}
# If this grab is part of a previous grab-cycle
try:
# Calculate vector between previous
# and current thumb positions
movement = Vec3.from_line(prev[0], prev[1], prev[2],
curr[0], curr[1], curr[2])
# Move all selected vertices
for _, vertex in self.surface.selected():
vertex.applyMovement(movement)
# Update geometry
self.surface.update()
# If this grab is starting a new grab-cycle
except TypeError:
pass
# Store current position as previous one for the next cycle
self._grab_position = curr
# If none of the hands are grabbing
except IndexError:
if not self._is_picked:
self.hands.show_all()
# If this release is the end of a grab cycle
if self._is_grabbed:
start = self._grab_start
stop = {id: tuple(v.localPosition) for id, v in self.surface.selected()}
# Create events
@History.event
def move_back_vertices(direction, prefix):
surface = self.surface
# Move all selected vertices
for identifier, position in start.items():
# If opponent user is not using them
if not surface.is_locked(identifier):
surface[identifier].localPosition = position
# Update geometry
surface.update()
self.text.write('{PREFIX}Vertices moved to position'.format(PREFIX=prefix))
@History.event
def move_vertices(direction, prefix):
surface = self.surface
# Move all selected vertices
for identifier, position in stop.items():
# If opponent user is not using them
if not surface.is_locked(identifier):
surface[identifier].localPosition = position
# Update geometry
surface.update()
self.text.write('{PREFIX}Vertices moved to position'.format(PREFIX=prefix))
# Save events
self._history.push(undo=move_back_vertices,
redo=move_vertices)
self._grab_position = \
self._dual_grab_vector = \
self._dual_grab_length = None
self._is_grabbed = \
self._is_dual_grabbed = False
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_pick(self, states):
# If there is a grabbing going on
if states['grabbed']:
return
# Get local reference of this hand
hand = states['hand']
# Local reference
thumb_position = hand.thumb.position
index_position = hand.index.position
# If finger's distance to the thumb is in the picking-release-range
if distance(thumb_position,
index_position) < PICK_RELEASE_DISTANCE:
# Local reference
surface = self.surface
# Hide non picking fingers
hand.hide_all('thumb', 'index')
# Check all vertices on the surface
for vertex in surface:
# If vertex's distance to the thumb is in the picking-hold-range
if distance(midpoint(thumb_position,
index_position),
vertex.worldPosition) < self._zoomed_pick_distance:
# If user is already picking
if self._is_picked:
return
# Create events
@History.event
def deselect_vertex(direction, prefix):
index = index_of_vertex(vertex.name)
try:
surface.deselect(vertex.name)
vertex.color = COLOR_GEOMETRY_DARK
except VertexLocked:
pass
self.text.write(
'{PREFIX}Vertex #{INDEX} deselected'.format(
PREFIX = prefix,
INDEX = index))
@History.event
def select_vertex(direction, prefix):
index = index_of_vertex(vertex.name)
# If the opponent user is not grabbing the vertex already
try:
surface.select(vertex.name)
vertex.color = COLOR_GEOMETRY_LITE
self.text.write(
'{PREFIX}Vertex #{INDEX} selected'.format(
PREFIX = prefix,
INDEX = index))
# If the opponent user is grabbing the vertex
except VertexLocked:
self.text.write(
'{PREFIX}Vertex #{INDEX} is locked'.format(
PREFIX = prefix,
INDEX = index))
# If vertex is already selected
except VertexAlreadySelected:
# If first call
if direction == History.NONE:
raise VertexAlreadySelected
# If unod or redo
self.text.write(
'{PREFIX}Vertex #{INDEX} selected'.format(
PREFIX = prefix,
INDEX = index))
# Try to select vertex
try:
select_vertex(History.NONE, History.NONE_PREFIX)
self._history.push(undo=deselect_vertex,
redo=select_vertex)
# If vertex has already been selected
except VertexAlreadySelected:
deselect_vertex(History.NONE, History.NONE_PREFIX)
self._history.push(undo=select_vertex,
redo=deselect_vertex)
# Set state
self._is_picked = True
# Feedback the user about the pick's state
hand.thumb.color = hand.index.color = COLOR_GRAB_PINCH_OKAY
# Stop the iterations
break
# Picked in the air
else:
hand.thumb.color = hand.index.color = COLOR_GRAB_PINCH_FAIL
# If pick is released
else:
# Show all fingers again
hand.show_all()
# Feedback the user about the pick's state
hand.thumb.color = \
hand.index.color = COLOR_GRAB_PINCH_BASE
# Set state
self._is_picked = False
#------------------------------------------------------------------------------#
application = KibuVR()
|
kitchenbudapest/vr
|
main.py
|
Python
|
gpl-3.0
| 27,035
|
[
"VisIt"
] |
4dd4f021f0e95ae1fd9c608a6212e1b5f18b5823fd00353b88706139ccda70e8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Usage: MultiLane.py folder/ video.avi data.npz interpolated_lanes.pickle
from ArgParser import parse_args
from GPSReader import GPSReader
from GPSTransforms import IMUTransforms
from MultiLaneGenerator import MultiLane
from Q50_config import LoadParameters
from VtkRenderer import VtkPointCloud, VtkBoundingBox
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.spatial import distance, KDTree
from sklearn import cluster
import sys
from transformations import euler_from_matrix
import vtk
def load_ply(ply_file):
""" Loads a ply file and returns an actor """
reader = vtk.vtkPLYReader()
reader.SetFileName(ply_file)
reader.Update()
ply_mapper = vtk.vtkPolyDataMapper()
ply_mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(ply_mapper)
return actor
def vtk_transform_from_np(np4x4):
vtk_matrix = vtk.vtkMatrix4x4()
for r in range(4):
for c in range(4):
vtk_matrix.SetElement(r, c, np4x4[r, c])
transform = vtk.vtkTransform()
transform.SetMatrix(vtk_matrix)
return transform
def get_transforms(args):
""" Gets the IMU transforms for a run """
gps_reader = GPSReader(args['gps'])
gps_data = gps_reader.getNumericData()
imu_transforms = IMUTransforms(gps_data)
return imu_transforms
def saveClusters(lanes, times, lane_idx, num_lanes):
out = {}
out['num_lanes'] = np.array(num_lanes)
for i in xrange(num_lanes):
mask = lanes[:, lane_idx] == i
lane = lanes[mask]
time = times[mask]
lane = lane[:, :3]
shifted = np.vstack((lane[1:, :], np.zeros((1, 3))))
lane = np.hstack((lane, shifted))
out['lane' + str(i)] = lane
out['time' + str(i)] = time
np.savez('multilane_points', **out)
def saveInterp(interp, num_lanes):
out = {}
out['num_lanes'] = np.array(num_lanes)
for i in xrange(num_lanes):
out['lane' + str(i)] = interp[:,:,i]
print 'Saved multilane shifted points'
np.savez('multilane_points', **out)
class Blockworld:
def __init__(self):
self.start = 0
self.step = 5
self.end = self.step * 500
self.count = 0
self.ren = vtk.vtkRenderer()
args = parse_args(sys.argv[1], sys.argv[2])
# Transforms
self.imu_transforms = get_transforms(args)
self.trans_wrt_imu = self.imu_transforms[
self.start:self.end:self.step, 0:3, 3]
self.params = args['params']
self.lidar_params = self.params['lidar']
ml = MultiLane(sys.argv[3], sys.argv[4], 2, 2)
ml.extendLanes()
saveInterp(ml.interp, ml.rightLanes + ml.leftLanes)
ml.filterLaneMarkings()
print 'Adding filtered points'
pts = ml.lanes.copy()
raw_cloud = VtkPointCloud(pts[:, :3], pts[:, 4])
raw_actor = raw_cloud.get_vtk_cloud(zMin=0, zMax=100)
self.ren.AddActor(raw_actor)
try:
npz = np.load('cluster.npz')
print 'Loading clusters from file'
ml.lanes = npz['data']
ml.times = npz['t']
except IOError:
print 'Clustering points'
ml.clusterLanes()
ml.saveLanes('cluster.npz')
ml.sampleLanes()
print 'Adding clustered points'
clusters = ml.lanes.copy()
cluster_cloud = VtkPointCloud(clusters[:, :3], clusters[:, -2])
cluster_actor = cluster_cloud.get_vtk_cloud(zMin=0, zMax=4)
cluster_actor.GetProperty().SetPointSize(10)
self.ren.AddActor(cluster_actor)
print 'Interpolating lanes'
ml.interpolateLanes()
interp_lanes = ml.interp_lanes.copy()
interp_lanes_cloud = VtkPointCloud(interp_lanes[:, :3], interp_lanes[:, 3])
interp_lanes_actor = interp_lanes_cloud.get_vtk_cloud(zMin=0, zMax=4)
self.ren.AddActor(interp_lanes_actor)
# ml.fixMissingPoints()
# saveClusters(ml.lanes, ml.times, -1, 5)
print 'Adding car'
self.car = load_ply('../mapping/viz/gtr.ply')
self.ren.AddActor(self.car)
self.car.GetProperty().LightingOff()
print 'Rendering'
self.ren.ResetCamera()
self.win = vtk.vtkRenderWindow()
self.ren.SetBackground(0, 0, 0)
self.win.AddRenderer(self.ren)
self.win.SetSize(800, 400)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren .SetRenderWindow(self.win)
mouseInteractor = vtk.vtkInteractorStyleTrackballCamera()
self.iren.SetInteractorStyle(mouseInteractor)
self.iren.Initialize()
# Whether to write video
self.record = False
# Set up time
self.iren.AddObserver('TimerEvent', self.update)
self.timer = self.iren.CreateRepeatingTimer(100)
# Add keypress event
self.iren.AddObserver('KeyPressEvent', self.keyhandler)
self.mode = 'ahead'
self.iren.Start()
def getCameraPosition(self):
t = self.start + self.step * self.count
if self.mode == 'ahead':
position = self.imu_transforms[t, 0:3, 3]
focal_point = self.imu_transforms[t + self.step, 0:3, 3]
elif self.mode == 'behind':
# FIXME Tune this
position = self.imu_transforms[t - 0.3 * self.step, 0:3, 3]
position[2] = position[2] + 0.15
focal_point = self.imu_transforms[t + 0.2 * self.step, 0:3, 3]
focal_point[2] = focal_point[2] - 0.2
elif self.mode == 'above':
position = self.imu_transforms[
t - self.step, 0:3, 3] + np.array([0, 0, 75.0])
focal_point = self.imu_transforms[t, 0:3, 3]
elif self.mode == 'passenger':
# TODO Not sure being inside mesh works...
pass
return position, focal_point
def keyhandler(self, obj, event):
key = obj.GetKeySym()
if key == 'a':
self.mode = 'above'
elif key == 'b':
self.mode = 'behind'
elif key == 'd':
self.mode = 'ahead'
elif key == '0':
self.count = 0
else:
pass
def update(self, iren, event):
# Transform the car
t = self.start + self.step * self.count
imu_transform = self.imu_transforms[t, :,:]
transform = vtk_transform_from_np(imu_transform)
transform.RotateZ(90)
transform.Translate(-2, -3, -2)
self.car.SetUserTransform(transform)
# Set camera position
fren = iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
cam = fren.GetActiveCamera()
position, focal_point = self.getCameraPosition()
cam.SetPosition(position)
cam.SetFocalPoint(focal_point)
cam.SetViewUp(0, 0, 1)
fren.ResetCameraClippingRange()
cam.SetClippingRange(0.1, 1600)
iren.GetRenderWindow().Render()
self.count += 1
if __name__ == '__main__':
blockworld = Blockworld()
|
sameeptandon/sail-car-log
|
process/MultiLane.py
|
Python
|
bsd-2-clause
| 7,094
|
[
"VTK"
] |
928acd94c2a4ec8ad68bc69dfe3e73e7a67d17e7a251392758c95734b6f10be3
|
"""Fallaxy (ansible-galaxy) plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import uuid
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
display,
)
from ..docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
class FallaxyProvider(CloudProvider):
"""Fallaxy plugin.
Sets up Fallaxy (ansible-galaxy) stub server for tests.
It's source source itself resides at: https://github.com/ansible/fallaxy-test-container
"""
DOCKER_SIMULATOR_NAME = 'fallaxy-stub'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(FallaxyProvider, self).__init__(args)
if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER')
else:
self.image = 'quay.io/ansible/fallaxy-test-container:2.0.1'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the tests with the necessary config and res unavailable.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker_cmd = 'docker'
docker = find_executable(docker_cmd, required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
% (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
super(FallaxyProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get additional options needed when delegating tests to a container.
:rtype: list[str]
"""
return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
def cleanup(self):
"""Clean up the resource and temporary configs files after tests."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(FallaxyProvider, self).cleanup()
def _setup_dynamic(self):
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
display.info('%s Fallaxy simulator docker container.'
% ('Using the existing' if results else 'Starting a new'),
verbosity=1)
fallaxy_port = 8080
fallaxy_token = str(uuid.uuid4()).replace('-', '')
if not results:
if self.args.docker or container_id:
publish_ports = []
else:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', ':'.join((str(fallaxy_port),) * 2),
]
if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports,
)
if self.args.docker:
fallaxy_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
fallaxy_host = self._get_simulator_address()
display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1)
else:
fallaxy_host = 'localhost'
self._set_cloud_config('FALLAXY_HOST', fallaxy_host)
self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port))
self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class FallaxyEnvironment(CloudEnvironment):
"""Fallaxy environment plugin.
Updates integration test environment after delegation.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN')
fallaxy_host = self._get_cloud_config('FALLAXY_HOST')
fallaxy_port = self._get_cloud_config('FALLAXY_PORT')
return CloudEnvironmentConfig(
ansible_vars=dict(
fallaxy_token=fallaxy_token,
fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
),
env_vars=dict(
FALLAXY_TOKEN=fallaxy_token,
FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
),
)
|
azaghal/ansible
|
test/lib/ansible_test/_internal/cloud/fallaxy.py
|
Python
|
gpl-3.0
| 5,719
|
[
"Galaxy"
] |
50351a92207173c7cb69a183eb180c6d17fa32471df8cb72df4e284036a70a4a
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of Mulliken population analysis (MPA) based on data parsed by cclib."""
import random
import numpy
from cclib.method.population import Population
class MPA(Population):
"""Mulliken population analysis."""
def __init__(self, *args):
super().__init__(logname="MPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "MPA of %s" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'MPA("%s")' % (self.data)
def calculate(self, indices=None, fupdate=0.05):
"""Perform a Mulliken population analysis."""
# Determine number of steps, and whether process involves beta orbitals.
self.logger.info("Creating attribute aoresults: [array[2]]")
nbasis = self.data.nbasis
alpha = len(self.data.mocoeffs[0])
self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ]
nstep = alpha
unrestricted = (len(self.data.mocoeffs) == 2)
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# Intialize progress if available.
if self.progress:
self.progress.initialize(nstep)
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Mulliken Population Analysis")
# X_{ai} = \sum_b c_{ai} c_{bi} S_{ab}
# = c_{ai} \sum_b c_{bi} S_{ab}
# = c_{ai} C(i) \cdot S(a)
# X = C(i) * [C(i) \cdot S]
# C(i) is 1xn and S is nxn, result of matrix mult is 1xn
ci = self.data.mocoeffs[spin][i]
if hasattr(self.data, "aooverlaps"):
temp = numpy.dot(ci, self.data.aooverlaps)
# handle spin-unrestricted beta case
elif hasattr(self.data, "fooverlaps2") and spin == 1:
temp = numpy.dot(ci, self.data.fooverlaps2)
elif hasattr(self.data, "fooverlaps"):
temp = numpy.dot(ci, self.data.fooverlaps)
self.aoresults[spin][i] = numpy.multiply(ci, temp).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super().partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
# Create array for Mulliken charges.
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
alpha = numpy.zeros([size], "d")
if unrestricted:
beta = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if spin == 0:
alpha = numpy.add(alpha, temp)
elif spin == 1:
beta = numpy.add(beta, temp)
if not unrestricted:
self.fragcharges = numpy.multiply(self.fragcharges, 2)
else:
self.logger.info("Creating fragspins: array[1]")
self.fragspins = numpy.subtract(alpha, beta)
return True
|
cclib/cclib
|
cclib/method/mpa.py
|
Python
|
bsd-3-clause
| 3,953
|
[
"cclib"
] |
16c4343f183a78ea63fca38fd5393ec8874fec11abc14ff6a7a54e9df206952e
|
#!/usr/bin/python
##############################################################
# SpineML to GENN platform independent wrapper #
# Alex Cope 2017 #
# #
# convert_script_s2g is used to manage passing a SpineML #
# model to GENN #
##############################################################
# mkdir -p from stack overflow (https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python)
import errno
import os
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
import shutil
import filecmp
# xml parser
import xml.etree.ElementTree as ET
# Parse command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", help="Set the Working Directory")
parser.add_argument("-m", help="Set the Model Directory")
parser.add_argument("-o", help="Set the Output Directory")
parser.add_argument("-e", type=int, default=None, help="Set the Experiment index to run")
parser.add_argument("-p", help="Property change options")
parser.add_argument("-d", help="Delay change options")
parser.add_argument("-c", help="Constant current options")
parser.add_argument("-t", help="Time varying current options")
args = parser.parse_args()
if args.w:
print("Passed working directory: " + args.w)
else:
print("Working directory not used")
#exit(0)
if args.m:
print("Using model directory: " + args.m)
else:
print("Model directory required")
exit(0)
if args.o:
print("Using output directory: " + args.o)
else:
print "Output directory required"
exit(0)
if args.e is not None:
print("Using experiment index: " + str(args.e))
else:
print("Experiment index required")
exit(0)
# check we have a GENN_PATH
genn_path = os.path.dirname(os.path.abspath(os.path.join(__file__, "..")))
print("GENN_PATH is " + genn_path)
# we need to check that the directories exists and if not create them
#mkdir_p(args.w)
mkdir_p(os.path.join(args.o,"model"))
in_dir = args.m
out_dir = os.path.join(args.o,"model")
# we need to process the model, we have a reference to the experiment, so we can load that and extract the model file, then load that and get the component files:
ns_el = {'sml_el': 'http://www.shef.ac.uk/SpineMLExperimentLayer'}
ns_hnl = {'sml_hnl': 'http://www.shef.ac.uk/SpineMLNetworkLayer'}
ns_lnl = {'sml_lnl': 'http://www.shef.ac.uk/SpineMLLowLevelNetworkLayer'}
# extract model file name from the experiment file
el_tree = ET.parse(os.path.join(in_dir, "experiment" + str(args.e) + ".xml"))
el_root = el_tree.getroot()
model_file_name = el_root.find("sml_el:Experiment",ns_el).find("sml_el:Model",ns_el).get("network_layer_url")
print("Using model file: " + model_file_name)
# extract component file names from model files
nl_tree = ET.parse(os.path.join(in_dir, model_file_name))
nl_root = nl_tree.getroot()
components = []
for pop in nl_root.iterfind("sml_lnl:Population",ns_lnl):
component_file_name = pop.find("sml_lnl:Neuron",ns_lnl).get("url")
if not component_file_name == "SpikeSource":
components.append(component_file_name)
for proj in pop.iterfind("sml_lnl:Projection",ns_lnl):
component_file_name = proj.find("sml_lnl:Synapse",ns_lnl).find("sml_lnl:WeightUpdate",ns_lnl).get("url")
components.append(component_file_name)
component_file_name = proj.find("sml_lnl:Synapse",ns_lnl).find("sml_lnl:PostSynapse",ns_lnl).get("url")
components.append(component_file_name)
# remove duplicates by converting to a set and back to a list
components = list(set(components))
for component in components:
print("Using component file:" + component)
if os.path.isdir(args.m) and os.path.isdir(out_dir):
for component in components:
shutil.copy(os.path.join(in_dir,component), out_dir)
shutil.copy(os.path.join(in_dir,model_file_name), out_dir)
shutil.copy(os.path.join(in_dir,"experiment" + str(args.e) + ".xml"), out_dir)
exts = ['bin']
file_names = [fn for fn in os.listdir(in_dir) if any(fn.endswith(ext) for ext in exts)]
for file_name in file_names:
shutil.copy(os.path.join(in_dir,file_name), out_dir)
else:
print("Model directory does not exist!")
exit(0)
# check for experiment, model and component changes
recompile = False
if os.path.isdir(os.path.join(out_dir, "prev")):
# do differences
if not filecmp.cmp(os.path.join(out_dir, model_file_name),os.path.join(out_dir, "prev", model_file_name)):
recompile = True
# if model does not match we may have a different model entirely so stop here
if recompile == False:
if not filecmp.cmp(os.path.join(out_dir, "experiment" + str(args.e) + ".xml"),os.path.join(out_dir, "prev", "experiment" + str(args.e) + ".xml")):
recompile = True
for component in components:
if not filecmp.cmp(os.path.join(out_dir, component),os.path.join(out_dir, "prev", component)):
recompile = True
else:
recompile = True
if recompile is True:
print("Recompiling model...")
else:
print("Model has not changed - no recompile required")
# copy the new version over
if not os.path.isdir(os.path.join(out_dir, "prev")):
mkdir_p(os.path.join(out_dir, "prev"))
for component in components:
shutil.copy(os.path.join(out_dir,component), os.path.join(out_dir,"prev"))
shutil.copy(os.path.join(out_dir,model_file_name), os.path.join(out_dir,"prev"))
shutil.copy(os.path.join(out_dir,"experiment" + str(args.e) + ".xml"), os.path.join(out_dir,"prev"))
prog = ""
if os.name == "nt":
#vcvarsall.bat or vcbuildtools.bat
# Windows only
if os.path.isfile("C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\Build\\vcvarsall.bat"):
prog = '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\Build\\vcvarsall.bat" amd64'
if os.path.isfile("C:\\Program Files (x86)\\Microsoft Visual C++ Build Tools\\vcvarsall.bat"):
prog = '"C:\\Program Files (x86)\\Microsoft Visual C++ Build Tools\\vcvarsall.bat" amd64'
if os.path.isfile("C:\\Program Files (x86)\\Microsoft Visual C++ Build Tools\\vcbuildtools.bat"):
prog = '"C:\\Program Files (x86)\\Microsoft Visual C++ Build Tools\\vcbuildtools.bat" amd64'
if prog == "":
print("Windows build config script not found")
exit(0)
print("Windows build batch = " + prog)
else:
prog = "echo NIX"
print "On Linux / OSX"
# Determine whether we should run GeNN in CPU_ONLY mode
cpu_only = (os.environ.get("GENN_SPINEML_CPU_ONLY") is not None)
# check if GeNN initial compile complete
generate_executable = None
simulate_executable = None
if os.name == "nt":
config = "Release" if cpu_only else "Release_CUDA"
generate_executable = "spineml_generator_" + config + ".exe"
simulate_executable = "spineml_simulator_Release.exe"
backend_target = "single_threaded_cpu_backend" if cpu_only else "cuda_backend"
genn_library = "genn_Release.lib"
backend_library = "genn_" + backend_target + "_Release.lib"
if not os.path.isfile(os.path.join(genn_path,"lib",genn_library)):
print("Compiling LibGeNN")
os.system(prog + "&& cd " + genn_path + "&&" + "msbuild genn.sln /verbosity:minimal /t:genn /p:Configuration=Release")
if not os.path.isfile(os.path.join(genn_path,"lib", backend_library)):
print("Compiling backend")
os.system(prog + "&& cd " + genn_path + "&&" + "msbuild genn.sln /verbosity:minimal /t:" + backend_target + " /p:Configuration=Release")
if not os.path.isfile(os.path.join(genn_path,"bin",generate_executable)):
config = "Release" if cpu_only else "Release_CUDA"
print("Compiling Generate tool")
os.system(prog + "&& cd " + genn_path + "&&" + "msbuild spineml.sln /verbosity:minimal /t:spineml_generator /p:Configuration=" + config)
if not os.path.isfile(os.path.join(genn_path,"bin",simulate_executable)):
print("Compiling Simulate tool")
os.system(prog + "&& cd " + genn_path + "&&" + "msbuild spineml.sln /verbosity:minimal /t:spineml_simulator /p:Configuration=Release")
else:
makefile = "MakefileSingleThreadedCPU" if cpu_only else "MakefileCUDA"
generate_executable = "spineml_generator_single_threaded_cpu" if cpu_only else "spineml_generator_cuda"
simulate_executable = "spineml_simulator"
if not os.path.isfile(os.path.join(genn_path,"bin", generate_executable)):
print("Compiling Generate tool")
os.system("cd " + os.path.join(genn_path,"src", "spineml", "generator") + " && make -f " + makefile)
if not os.path.isfile(os.path.join(genn_path,"bin", simulate_executable)):
print("Compiling Simulate tool")
os.system("cd " + os.path.join(genn_path,"src", "spineml", "standalone_simulator") + " && make")
# Recompile if needed
if recompile is True:
f = open(os.path.join(out_dir,"time.txt"),'w')
f.write('*Compiling...')
f.close()
os.system(prog + "&&" + os.path.join(genn_path,"bin",generate_executable) + " " + os.path.join(out_dir,"experiment" + str(args.e) + ".xml"))
f = open(os.path.join(out_dir,"time.txt"),'w')
f.write('*Running...')
f.close()
os.system(prog + "&&" + os.path.join(genn_path,"bin",simulate_executable) + " " + os.path.join(out_dir,"experiment" + str(args.e) + ".xml"))
|
genn-team/genn
|
bin/convert_script_s2g.py
|
Python
|
gpl-2.0
| 9,593
|
[
"NEURON"
] |
18e2e8295841047f735fc8841b36f493bb45a468228952054b7c62acf7ca894b
|
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import sys
import urllib # For urllib.parse.unquote
from string import hexdigits
from operator import itemgetter
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
# Match a RFC 2047 word, looks like =?utf-8?q?someword?=
rfc2047_matcher = re.compile(r'''
=\? # literal =?
[^?]* # charset
\? # literal ?
[qQbB] # literal 'q' or 'b', case insensitive
\? # literal ?
.*? # encoded word
\?= # literal ?=
''', re.VERBOSE | re.MULTILINE)
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
syntactic_break = True
ew_combine_allowed = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
def startswith_fws(self):
return self[0].startswith_fws()
@property
def as_ew_allowed(self):
"""True if all top level tokens of this part may be RFC2047 encoded."""
return all(part.as_ew_allowed for part in self)
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
return _refold_parse_tree(self, policy=policy)
def pprint(self, indent=''):
print(self.ppstr(indent=indent))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=indent))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
class Phrase(TokenList):
token_type = 'phrase'
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
encode_as_ew = False
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', r'\(').replace(
')', r'\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
if x.local_part:
return x.addr_spec
else:
return quote_string(x.local_part) + x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
as_ew_allowed = True
class NoFoldLiteral(TokenList):
token_type = 'no-fold-literal'
as_ew_allowed = False
class AddrSpec(TokenList):
token_type = 'addr-spec'
as_ew_allowed = False
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
as_ew_allowed = False
class DisplayName(Phrase):
token_type = 'display-name'
ew_combine_allowed = False
@property
def display_name(self):
res = TokenList(self)
if len(res) == 0:
return res.value
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if len(self) != 0 and quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
as_ew_allowed = False
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
syntactic_break = False
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = {} # Using order preserving dict from Python 3.7+
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
# Our arbitrary error recovery is to ignore duplicate parameters,
# to use appearance order if there are duplicate rfc 2231 parts,
# and to ignore gaps. This mimics the error recovery of get_param.
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
# Else assume the *0* was missing...note that this is different
# from get_param, but we registered a defect for this earlier.
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
# We could get fancier here and look for a complete
# duplicate extended parameter and ignore the second one
# seen. But we're not doing that. The old code didn't.
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent RFC2231 parameter numbering"))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
# Set this false so that the value doesn't wind up on a new line even
# if it and the parameters would fit there but not on the first line.
syntactic_break = False
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
as_ew_allowed = False
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
as_ew_allowed = False
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
as_ew_allowed = False
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
as_ew_allowed = False
class MsgID(TokenList):
token_type = 'msg-id'
as_ew_allowed = False
def fold(self, policy):
# message-id tokens may not be folded.
return str(self) + policy.linesep
class MessageID(MsgID):
token_type = 'message-id'
class Header(TokenList):
token_type = 'header'
#
# Terminal classes and instances
#
class Terminal(str):
as_ew_allowed = True
ew_combine_allowed = True
syntactic_break = True
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
def pprint(self):
print(self.__class__.__name__ + '/' + self.token_type)
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def pop_trailing_ws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
def __str__(self):
return ''
class _InvalidEwError(errors.HeaderParseError):
"""Invalid encoded word found while parsing headers."""
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATOM_ENDS)))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(TOKEN_ENDS)))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATTRIBUTE_ENDS)))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(EXTENDED_ATTRIBUTE_ENDS)))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if (len(remstr) > 1 and
remstr[0] in hexdigits and
remstr[1] in hexdigits and
tok.count('?') < 2):
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except (ValueError, KeyError):
raise _InvalidEwError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
# Encoded words should be followed by a WS
if value and value[0] not in WSP:
ew.defects.append(errors.InvalidHeaderDefect(
"missing trailing whitespace after encoded-word"))
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
valid_ew = True
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except _InvalidEwError:
valid_ew = False
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
# Split in the middle of an atom if there is a rfc2047 encoded word
# which does not have WSP on both sides. The defect will be registered
# the next time through the loop.
# This needs to only be performed when the encoded word is valid;
# otherwise, performing it on an invalid encoded word can cause
# the parser to go in an infinite loop.
if valid_ew and rfc2047_matcher.search(tok):
tok, *remainder = value.partition('=?')
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
r"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
if value and value[0] == '"':
token, value = get_qcontent(value)
bare_quoted_string.append(token)
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if not value:
raise errors.HeaderParseError(
"Expected 'atom' or 'quoted-string' but found nothing.")
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if value and value[0] == '@':
raise errors.HeaderParseError('Invalid Domain')
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"addr-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
elif value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
def get_no_fold_literal(value):
""" no-fold-literal = "[" *dtext "]"
"""
no_fold_literal = NoFoldLiteral()
if not value:
raise errors.HeaderParseError(
"expected no-fold-literal but found '{}'".format(value))
if value[0] != '[':
raise errors.HeaderParseError(
"expected '[' at the start of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal('[', 'no-fold-literal-start'))
value = value[1:]
token, value = get_dtext(value)
no_fold_literal.append(token)
if not value or value[0] != ']':
raise errors.HeaderParseError(
"expected ']' at the end of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal(']', 'no-fold-literal-end'))
return no_fold_literal, value[1:]
def get_msg_id(value):
"""msg-id = [CFWS] "<" id-left '@' id-right ">" [CFWS]
id-left = dot-atom-text / obs-id-left
id-right = dot-atom-text / no-fold-literal / obs-id-right
no-fold-literal = "[" *dtext "]"
"""
msg_id = MsgID()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected msg-id but found '{}'".format(value))
msg_id.append(ValueTerminal('<', 'msg-id-start'))
value = value[1:]
# Parse id-left.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
# obs-id-left is same as local-part of add-spec.
token, value = get_obs_local_part(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-left in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text or obs-id-left"
" but found '{}'".format(value))
msg_id.append(token)
if not value or value[0] != '@':
msg_id.defects.append(errors.InvalidHeaderDefect(
"msg-id with no id-right"))
# Even though there is no id-right, if the local part
# ends with `>` let's just parse it too and return
# along with the defect.
if value and value[0] == '>':
msg_id.append(ValueTerminal('>', 'msg-id-end'))
value = value[1:]
return msg_id, value
msg_id.append(ValueTerminal('@', 'address-at-symbol'))
value = value[1:]
# Parse id-right.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
token, value = get_no_fold_literal(value)
except errors.HeaderParseError as e:
try:
token, value = get_domain(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-right in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text, no-fold-literal or obs-id-right"
" but found '{}'".format(value))
msg_id.append(token)
if value and value[0] == '>':
value = value[1:]
else:
msg_id.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on msg-id"))
msg_id.append(ValueTerminal('>', 'msg-id-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
return msg_id, value
def parse_message_id(value):
"""message-id = "Message-ID:" msg-id CRLF
"""
message_id = MessageID()
try:
token, value = get_msg_id(value)
except errors.HeaderParseError:
message_id.defects.append(errors.InvalidHeaderDefect(
"Expected msg-id but found {!r}".format(value)))
message_id.append(token)
return message_id
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError(
"section number has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '"':
token = ValueTerminal('"', 'DQUOTE')
value = value[1:]
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the remaining value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
#
# Header folding
#
# Header folding is complex, with lots of rules and corner cases. The
# following code does its best to obey the rules and handle the corner
# cases, but you can be sure there are few bugs:)
#
# This folder generally canonicalizes as it goes, preferring the stringified
# version of each token. The tokens contain information that supports the
# folder, including which tokens can be encoded in which ways.
#
# Folded text is accumulated in a simple list of strings ('lines'), each
# one of which should be less than policy.max_line_length ('maxlen').
#
def _steal_trailing_WSP_if_exists(lines):
wsp = ''
if lines and lines[-1] and lines[-1][-1] in WSP:
wsp = lines[-1][-1]
lines[-1] = lines[-1][:-1]
return wsp
def _refold_parse_tree(parse_tree, *, policy):
"""Return string of contents of parse_tree folded according to RFC rules.
"""
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or sys.maxsize
encoding = 'utf-8' if policy.utf8 else 'us-ascii'
lines = ['']
last_ew = None
wrap_as_ew_blocked = 0
want_encoding = False
end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
parts = list(parse_tree)
while parts:
part = parts.pop(0)
if part is end_ew_not_allowed:
wrap_as_ew_blocked -= 1
continue
tstr = str(part)
if part.token_type == 'ptext' and set(tstr) & SPECIALS:
# Encode if tstr contains special characters.
want_encoding = True
try:
tstr.encode(encoding)
charset = encoding
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# If policy.utf8 is false this should really be taken from a
# 'charset' property on the policy.
charset = 'utf-8'
want_encoding = True
if part.token_type == 'mime-parameters':
# Mime parameter folding (using RFC2231) is extra special.
_fold_mime_parameters(part, lines, maxlen, encoding)
continue
if want_encoding and not wrap_as_ew_blocked:
if not part.as_ew_allowed:
want_encoding = False
last_ew = None
if part.syntactic_break:
encoded_part = part.fold(policy=policy)[:-len(policy.linesep)]
if policy.linesep not in encoded_part:
# It fits on a single line
if len(encoded_part) > maxlen - len(lines[-1]):
# But not on this one, so start a new one.
newline = _steal_trailing_WSP_if_exists(lines)
# XXX what if encoded_part has no leading FWS?
lines.append(newline)
lines[-1] += encoded_part
continue
# Either this is not a major syntactic break, so we don't
# want it on a line by itself even if it fits, or it
# doesn't fit on a line by itself. Either way, fall through
# to unpacking the subparts and wrapping them.
if not hasattr(part, 'encode'):
# It's not a Terminal, do each piece individually.
parts = list(part) + parts
else:
# It's a terminal, wrap it as an encoded word, possibly
# combining it with previously encoded words if allowed.
last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
part.ew_combine_allowed, charset)
want_encoding = False
continue
if len(tstr) <= maxlen - len(lines[-1]):
lines[-1] += tstr
continue
# This part is too long to fit. The RFC wants us to break at
# "major syntactic breaks", so unless we don't consider this
# to be one, check if it will fit on the next line by itself.
if (part.syntactic_break and
len(tstr) + 1 <= maxlen):
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
last_ew = None
continue
if not hasattr(part, 'encode'):
# It's not a terminal, try folding the subparts.
newparts = list(part)
if not part.as_ew_allowed:
wrap_as_ew_blocked += 1
newparts.append(end_ew_not_allowed)
parts = newparts + parts
continue
if part.as_ew_allowed and not wrap_as_ew_blocked:
# It doesn't need CTE encoding, but encode it anyway so we can
# wrap it.
parts.insert(0, part)
want_encoding = True
continue
# We can't figure out how to wrap, it, so give up.
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
else:
# We can't fold it onto the next line either...
lines[-1] += tstr
return policy.linesep.join(lines) + policy.linesep
def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset):
"""Fold string to_encode into lines as encoded word, combining if allowed.
Return the new value for last_ew, or None if ew_combine_allowed is False.
If there is already an encoded word in the last line of lines (indicated by
a non-None value for last_ew) and ew_combine_allowed is true, decode the
existing ew, combine it with to_encode, and re-encode. Otherwise, encode
to_encode. In either case, split to_encode as necessary so that the
encoded segments fit within maxlen.
"""
if last_ew is not None and ew_combine_allowed:
to_encode = str(
get_unstructured(lines[-1][last_ew:] + to_encode))
lines[-1] = lines[-1][:last_ew]
if to_encode[0] in WSP:
# We're joining this to non-encoded text, so don't encode
# the leading blank.
leading_wsp = to_encode[0]
to_encode = to_encode[1:]
if (len(lines[-1]) == maxlen):
lines.append(_steal_trailing_WSP_if_exists(lines))
lines[-1] += leading_wsp
trailing_wsp = ''
if to_encode[-1] in WSP:
# Likewise for the trailing space.
trailing_wsp = to_encode[-1]
to_encode = to_encode[:-1]
new_last_ew = len(lines[-1]) if last_ew is None else last_ew
encode_as = 'utf-8' if charset == 'us-ascii' else charset
# The RFC2047 chrome takes up 7 characters plus the length
# of the charset name.
chrome_len = len(encode_as) + 7
if (chrome_len + 1) >= maxlen:
raise errors.HeaderParseError(
"max_line_length is too small to fit an encoded word")
while to_encode:
remaining_space = maxlen - len(lines[-1])
text_space = remaining_space - chrome_len
if text_space <= 0:
lines.append(' ')
continue
to_encode_word = to_encode[:text_space]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
while excess > 0:
# Since the chunk to encode is guaranteed to fit into less than 100 characters,
# shrinking it by one at a time shouldn't take long.
to_encode_word = to_encode_word[:-1]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
lines[-1] += encoded_word
to_encode = to_encode[len(to_encode_word):]
if to_encode:
lines.append(' ')
new_last_ew = len(lines[-1])
lines[-1] += trailing_wsp
return new_last_ew if ew_combine_allowed else None
def _fold_mime_parameters(part, lines, maxlen, encoding):
"""Fold TokenList 'part' into the 'lines' list as mime parameters.
Using the decoded list of parameters and values, format them according to
the RFC rules, including using RFC2231 encoding if the value cannot be
expressed in 'encoding' and/or the parameter+value is too long to fit
within 'maxlen'.
"""
# Special case for RFC2231 encoding: start from decoded values and use
# RFC2231 encoding iff needed.
#
# Note that the 1 and 2s being added to the length calculations are
# accounting for the possibly-needed spaces and semicolons we'll be adding.
#
for name, value in part.params:
# XXX What if this ';' puts us over maxlen the first time through the
# loop? We should split the header value onto a newline in that case,
# but to do that we need to recognize the need earlier or reparse the
# header, so I'm going to ignore that bug for now. It'll only put us
# one character over.
if not lines[-1].rstrip().endswith(';'):
lines[-1] += ';'
charset = encoding
error_handler = 'strict'
try:
value.encode(encoding)
encoding_required = False
except UnicodeEncodeError:
encoding_required = True
if utils._has_surrogates(value):
charset = 'unknown-8bit'
error_handler = 'surrogateescape'
else:
charset = 'utf-8'
if encoding_required:
encoded_value = urllib.parse.quote(
value, safe='', errors=error_handler)
tstr = "{}*={}''{}".format(name, charset, encoded_value)
else:
tstr = '{}={}'.format(name, quote_string(value))
if len(lines[-1]) + len(tstr) + 1 < maxlen:
lines[-1] = lines[-1] + ' ' + tstr
continue
elif len(tstr) + 2 <= maxlen:
lines.append(' ' + tstr)
continue
# We need multiple sections. We are allowed to mix encoded and
# non-encoded sections, but we aren't going to. We'll encode them all.
section = 0
extra_chrome = charset + "''"
while value:
chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
if maxlen <= chrome_len + 3:
# We need room for the leading blank, the trailing semicolon,
# and at least one character of the value. If we don't
# have that, we'd be stuck, so in that case fall back to
# the RFC standard width.
maxlen = 78
splitpoint = maxchars = maxlen - chrome_len - 2
while True:
partial = value[:splitpoint]
encoded_value = urllib.parse.quote(
partial, safe='', errors=error_handler)
if len(encoded_value) <= maxchars:
break
splitpoint -= 1
lines.append(" {}*{}*={}{}".format(
name, section, extra_chrome, encoded_value))
extra_chrome = ''
section += 1
value = value[splitpoint:]
if value:
lines[-1] += ';'
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/email/_header_value_parser.py
|
Python
|
apache-2.0
| 106,086
|
[
"CRYSTAL"
] |
ccc40efa801902ec7b2ad226a2e33e3e50a74d544fc315667dc5fe0c4174ea5b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO-0: If just the space group is selected and not the cell:
try to find the proper cell if it is not ambigous
(like P21212, P2122,,,),
TODO-1: Add in the CORRECT summary the Rmrgd-F, and total
overloaded refl.
TODO-2: Start multiple COLSPOT with different thresholds+ multiple IDXREF.
TODO-3: Generating plots !
"""
__version__ = "0.5.0.9"
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "18-12-2013"
__copyright__ = "Copyright (c) 2006-2014 Pierre Legrand"
__license__ = "New BSD http://www.opensource.org/licenses/bsd-license.php"
import os
import sys
import re
if sys.version_info <= (2, 4, 0):
from popen2 import Popen3
else:
from subprocess import Popen, PIPE
from XOconv.pycgtypes import mat3
from XOconv.pycgtypes import vec3
from XOconv.XOconv import reciprocal, UB_to_cellParam, BusingLevy
from pointless import pointless, is_pointless_installed
from xupy import XParam, xdsInp2Param, opWriteCl, \
saveLastVersion, LP_names, xdsinp_base, \
SPGlib, Lattice, resum_scaling, \
get_BravaisToSpgs, get_number_of_processors, \
EXCLUDE_ICE_RING
import XIO
PROGNAME = os.path.split(sys.argv[0])[1]
USAGE = """
Running XDS automatically...
USAGE: %s [OPTION]... FILES
FILES is for one or multiple diffraction image files.
OPTIONS:
-h, --help
Print this help message.
-1,-2,-3,-4,-5
Go directly to a particular step:
-1: XYCOOR + INIT
-2: COLSPOT
-3: IDXREF
-4: DEFPIX + INTEGRATE
-5: CORRECT
-a, --anomal
Distinguishes Friedel pairs for scaling, strategy and completeness
statistics. Default is no anomalous contribution.
-A, --Anomal
Like -a, but also set "STRICT_ABSORPTION_CORRECTION" to True.
It usually gives better scaling statistics with redundancy > 2.
-b, --beam-center-optimize-i
Starting from the initial given values, search and optimize the beam
center coordinates (given by -x, -y or extracted form the header).
Best solution is chosen after i-score ranking.
-B, --beam-center-optimize-z
Like -b/--beam-center-optimize-i, but best solution is chosen with
after a z-score ranking.
-c, --cell
Set the expected cell.
For example: -c "79 79 38 90 90 90"
-d, --distance
Set the detector to crystal distance.
-f, --reference FILE
Defines a reference data set used during the XPLAN & CORRECT steps.
For example: -f ../ref/XDS_ASCII.HKL
-F, --first-frame
Specify the first frame to be used in the DATA_RANGE (see also -L)
-i, --xds-input
Give direct XDS Keyword input.
For example: -i "DETECTOR_DISTANCE= 167.0 JOB= IDXREF AIR= 0.002"
-I, --ice
Exclude resolution ranges where ice-rings occurs (3.897, 3.669,
3.441, 2.671, 2.249, 2.249, 1.948, 1.918, 1.883, 1.721 A).
-L, --last-frame
Specify the last frame to be used in the DATA_RANGE (see also -F).
This can be useful in case of radiation damage.
-O, --oscillation
Set frame oscillation range in degree.
For example: -c 0.5
-n, --nthreads
Set the maximum number of threads to use. Default is to use the
maximum available.
For example: -n 4
-M, --orientation-matrix
Input crystal orientation matrix.
For example: -M XPARM.XDS
-p, --project
Set the project name. The default is the prefix taken from
image names. The working directory will be: xds_process_"project"
-r, --high-resolution
Set a high resolution cutoff. Default is 0 (no cutoff).
-R, --low-resolution
Set a low resolution cutoff. Default is 50 A.
-s, --spg
Set the expected space group using either the space group number
or simple string.
For example: -s 18 or -s P21212
-S, --strategy
Force to go for calculating strategy (XPLAN) and then stops.
-x, --beam-x
Set a new value for ORGX: X-coordinates (in pixels) of the
detector origin. It may be given in mm if the value is directly
ended by "mm", (e.g. -x 109.1mm).
-y, --beam-y
Set a new value for ORGY: Y-coordinates (in pixels) of the
detector origin. It may be given in mm if the value is directly
ended by "mm", (e.g. -y 106.4mm).
-W, --beam-center-swap
From the header recorded X and Y beam-center coordinate values,
try the 8 possible permutations and select the best one based on
z-score ranking. This very useful if indexing fails, the convention
for recording these values may not be identical from one synchrotron
to another.
-v, --verbose
Turn on verbose output.
-w, --wavelength
Set the x-ray wavelength.
--slow,
Set parameters to process either more accurately.
--weak,
Set parameters to index in case of weak spots.
--brute,
Try hard to index. To be used in resistant cases.
""" % PROGNAME
FMT_HELLO = """
Diffraction Setup Parameters:\n
Detector distance: %(DETECTOR_DISTANCE)8.2f mm
X-ray wavelength: %(X_RAY_WAVELENGTH)10.4f A
Oscillation range: %(OSCILLATION_RANGE)10.4f degree\n
Beam coordinate X: %(ORGX)8.1f pixel
Y: %(ORGY)8.1f pixel
Image range: %(DATA_RANGE)11s
"""
FMT_FINAL_STAT = """
Refined Parameters and Scaling Statistics
=========================================\n
Name template %(name)s
Data range %(image_start)5d to %(image_last)5d
Space group number %(spg_num)d
symbol %(spg_sym)s
Cell parameters %(cell)s
Resolution %(LowestReso)8.2f -%(reso)6.2f\
(%(resoL).2f - %(reso).2f)
Completeness %(compl)5.1f%% (%(complL).1f%%)
I/sigma(I) %(isig)6.2f (%(isigL).2f)
Rmeas %(rmeas)6.1f%% (%(rmeasL).1f%%)
Rsym %(rsym)7.2f%% (%(rsymL).1f%%)
Multiplicity %(multiplicity)10.1f
Compared %(compar)10d (%(comparL)d)
Measured %(total)10d
Unique %(unique)10d
Rejected misfits %(misfit)10d
Wilson scaling (B/Corr) %(wilson_b)10.1f (%(wilson_corr).2f)
"""
FMT_ABSENCES = " Systematic absent reflections measured %(AbsNum)6d \
with <Iabs>/<I> = %(AbsIav).1f%%\n"
FMT_ANOMAL = """
Anomalous pairs measured %(anoNum)10d
SigAno %(anoSig)10.3f (%(anoSigL).3f)
Anomalous Correlation %(anoCorr)10.1f%% (%(anoCorrL).1f%%)
"""
STEPMARK = re.compile(r"^( [*]{5} (\w{4,}) [*]{5} )")
INTEGRATE_STEP_RE = re.compile(r" PROCESSING OF IMAGES ")
INTEGRATE_MOSAICITY_RE = re.compile(r"CRYSTAL MOSAICITY \(DEGREES\)")
INTEGRATE_STRONG_RE = re.compile(r"REFLECTIONS ACCEPTED FOR REFINEMENT")
RRF, RRI = r"[\ ]+([0-9\.]+)", r"[\ ]+([\d]+) "
SCALE_RE = re.compile(r" "+RRI+r" (\d)"+RRF+r" ....... "+4*RRI+2*RRF)
XDS_HOME = os.getenv('XDS')
def _get_omatrix(_file):
omat = []
xparm = open(_file,'r').readlines()
spgcell = xparm[7].split()
spgn = int(spgcell[0])
cell = map(float, spgcell[1:])
for line in xparm[8:]:
omat.append(map(float, line.split()))
return spgn, cell, omat
def unpack_latticefit2(lattice_string):
"From lattice_string to Lattice object."
lats = lattice_string[2:].split()
latt = Lattice((map(float, lats[3:9])), lats[1])
latt.fit = float(lats[2])
latt.character = int(lats[0])
#latt.reindexing = tuple(map(int,ss[9:]))
return latt
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def make_xds_image_links(imagename_list, dir_name="img_links",
prefix="image", start_num=1):
"""All image names in the imagename_list are supposed to be part
of one continous sequence of collected images.
Todo:
- How to safely modulate PhiStart outside the [-180,180] range ?
"""
link_list = []
if dir_name not in os.listdir("."):
try:
_mkdir(dir_name)
except Exception, err:
print "Error\n", err
sys.exit(0)
#
dir_name = os.path.abspath(dir_name)
collect_im = {}
osc_ranges = []
for _image in imagename_list:
image = XIO.Image(_image)
if VERBOSE:
print _image
# How to safely modulate PhiStart outside the [-180,180] range?
if VERBOSE:
print "\tPhiStart %8.2f" % image.header['PhiStart']
if VERBOSE:
print "\tPhiWidth %8.2f" % image.header['PhiWidth']
collect_im[image.header['PhiStart']] = _image
osc_ranges.append(image.header['PhiWidth'])
if max(osc_ranges) != min(osc_ranges):
print "Error. Image list contains different oscillation range!"
sys.exit(0)
#
osc_starts = collect_im.keys()
osc_starts.sort()
for _osc in osc_starts:
_num = start_num+ (_osc-osc_starts[0])/osc_ranges[0]
link_name = os.path.join(dir_name, prefix+"_%04.0f.img" % _num)
if os.path.lexists(link_name) and os.path.islink(link_name):
if VERBOSE:
print "==> Removing existing link: %s" % link_name
os.remove(link_name)
os.symlink(os.path.abspath(collect_im[_osc]), link_name)
link_list.append(link_name)
return link_list
class XDSLogParserException(Exception):
"""This level of exception raises a recoverable error which can be fixed.
"""
class XDSExecError(Exception):
""
class XDSLogParser:
""" Parser for the xds *.LP files.
"""
def __init__(self, filename="", run_dir="",
verbose=False, raiseErrors=True):
self.results = {}
self.info = "XDS Parser"
self.fileType = "XDS"
self.verbose = verbose
#
if not run_dir:
run_dir = "./"
self.run_dir = run_dir
#
full_filename = os.path.join(self.run_dir, filename)
#
if filename:
try:
fp = open(full_filename, "r")
self.lp = fp.read()
fp.close()
except:
raise IOError, "Can't read file: %s" % full_filename
else:
self.lp = ""
# Catch Errors:
_err = self.lp.find(" !!! ERROR " )
_err_type = None
_err_level = None
if _err != -1:
_err_msg = self.lp[_err:]
if _err_msg.count(" CANNOT READ IMAGE "):
_err_type = "Some images connot be read"
_err_level = "WARNING"
# IDXREF ERROR Messages:
elif _err_msg.count("INSUFFICIENT PERCENTAGE (<"):
_err_type = "IDXREF. Percentage of indexed"
_err_type += " reflections bellow limit.\n"
_err_level = "WARNING"
elif _err_msg.count("ERROR IN REFINE !!! RETURN"):
_err_type = "IDXREF. Can't refine cell paramters."
_err_level = "FATAL"
elif _err_msg.count("USELESS DATA SET"):
_err_type = "INTEGRATE: USELESS DATA SET."
_err_type += " Not enough images or bad diffraction ?"
_err_level = "FATAL"
elif _err_msg.count("SOLUTION IS INACCURATE"):
_err_type = "IDXREF. Solution is inaccurate.\n"
_err_level = "WARNING"
elif _err_msg.count("INSUFFICIENT NUMBER OF ACCEPTED SPOTS."):
_err_type = "IDXREF. INSUFFICIENT NUMBER OF ACCEPTED SPOTS."
_err_level = "FATAL"
elif _err_msg.count("CANNOT INDEX REFLECTIONS"):
_err_type = "IDXREF. CANNOT INDEX REFLECTIONS."
_err_level = "FATAL"
elif _err_msg.count("CANNOT CONTINUE WITH A TWO-DIMENSIONAL"):
_err_type = "IDXREF. CANNOT INDEX REFLECTIONS."
_err_level = "FATAL"
else:
print "\n %s \n" % (self.lp[_err:-1])
sys.exit()
if _err_level in ("FATAL", "ERROR") and raiseErrors:
raise XDSExecError, (_err_level, _err_type)
if self.verbose and _err != -1:
print "\n !!! %s in %s" % (_err_level, _err_type)
if full_filename.count("INIT.LP"):
self.parse_init()
elif full_filename.count("COLSPOT.LP"):
self.parse_colspot()
elif full_filename.count("IDXREF.LP"):
self.parse_idxref()
elif full_filename.count("XPLAN.LP"):
self.parse_xplan()
elif full_filename.count("DEFPIX.LP"):
self.parse_defpix()
elif full_filename.count("INTEGRATE.LP"):
self.parse_integrate()
elif full_filename.count("CORRECT.LP"):
self.parse_correct()
else:
if filename:
raise IOError, "Don't know how to parse file: %s" % \
full_filename
def get_par(self, match, limit=75, func=None, multi_line=False,
start=0, before=False, match_end=None):
"Extract parameters from XDS .LP lines."
try:
if before:
limit = start
start = self.lp.index(match)-start
else:
start = self.lp.index(match, start) + len(match)
except Exception, err:
raise err
if match_end:
end = self.lp.index(match_end, start + 1)
else:
end = start+limit
if multi_line:
_raw = self.lp[start:end].split()
else:
_raw = self.lp[start:end].splitlines()[0].split()
if not func:
for var_type in (int, float, str):
try:
var_type(_raw[0])
func = var_type
except ValueError:
pass
if func:
break
if not func:
raise ValueError, "get_par function can't process value '%s'" \
% _raw
pars = map(func, _raw)
if len(pars) == 1:
return pars[0]
else: return pars
def _get_lattices_table(self):
"Extract lattice table"
st1 = self.lp.index("LATTICE- BRAVAIS- QUALITY")
_table = self.lp[st1:st1+6000].splitlines()[3:47]
return map(unpack_latticefit2, _table)
def _get_index_origins_table(self):
"Extract origin table"
st0 = self.lp.index(" DL\n ORIGIN\n")+14
st1 = self.lp.index(" SELECTED: INDEX_ORIGIN=")-2
return map(lambda s: \
map(float, s.split()), self.lp[st0:st1].splitlines())
def parse_init(self):
"Parse INIT.LP"
rdi, gpa = self.results, self.get_par
#
rdi["background_range"] = gpa("BACKGROUND_RANGE=")
rdi["mean_gain"] = gpa("MEAN GAIN VALUE")
rdi["min_gain"] = gpa("MINIMUM GAIN VALUE IN TABLE")
rdi["max_gain"] = gpa("MAXIMUM GAIN VALUE IN TABLE")
rdi["mean_background"] = gpa("KGROUND COUNTS IN A DATA IMAGE PIXEL")
#
prp = " Looking at images %(background_range)s\n"
prp += " Mean Gain: %(mean_gain).1f\n"
prp += " Min table gain: %(min_gain).2f\n"
prp += " Max table gain: %(max_gain).2f\n"
prp += " Mean Background: %(mean_background).1f\n"
if self.verbose:
print prp % rdi
return rdi, prp
def parse_colspot(self):
"Parse COLSPOT.LP"
rdi, gpa = self.results, self.get_par
#
rdi["strong_pixels"] = gpa("EXTRACTED FROM IMAGES")
rdi["weak_spots_ignored"] = gpa("WEAK SPOTS OMITTED")
rdi["out_of_center_spots"] = gpa("SPOT MAXIMUM OUT OF CENTER")
rdi["spot_number"] = self.get_spot_number()
rdi["time"] = gpa("elapsed wall-clock time", 11)
prp = " Number of spots found: %(spot_number)10d\n"
prp += " Out of center rejected: %(out_of_center_spots)10d\n"
prp += " Weak spots rejected: %(weak_spots_ignored)10d\n"
prp += " Number of spots accepted: %(spot_number)10d\n"
if self.verbose:
print prp % rdi
return rdi, prp
def parse_idxref(self):
"Parse IDXREF.LP"
rdi, gpa = self.results, self.get_par
#
rexp1 = r".* (\d+) OUT OF\ +(\d+) SPOTS INDEXED\..*"
rexp2 = r".* QX=\ +([\d|\.]+)\ +QY=\ +([\d|\.]+)"
#if "! ERROR !" in self.lp:
# raise XDSLogParserException, "Error while parsing XDS logfile"
nis, nts = map(int, re.match(rexp1, self.lp, re.DOTALL).groups())
qx, qy = map(float, re.match(rexp2, self.lp, re.DOTALL).groups())
meanPixel = (qx+qy)/2
rdi["indexed_spots"] = nis
rdi["total_spots"] = nts
rdi["indexed_percentage"] = 100.*nis/nts
#
st0 = self.lp.index("START OF INTEGRATION *****")
st1 = "STANDARD DEVIATION OF SPOT POSITION (PIXELS)"
st2 = "STANDARD DEVIATION OF SPINDLE POSITION (DEGREES)"
st3 = "UNIT CELL PARAMETERS"
st4 = "SPACE GROUP NUMBER"
st5 = "COORDINATES (PIXELS) OF DIRECT BEAM"
st6 = "SUBTREE POPULATION\n"
#
rdi["oscillation_range"] = gpa("OSCILLATION_RANGE=")
rdi["xy_spot_position_ESD"] = gpa(st1, start=st0)
rdi["z_spot_position_ESD"] = gpa(st2, start=st0)
rdi["index_origin_table"] = self._get_index_origins_table()
rdi["lattices_table"] = self._get_lattices_table()
rdi["refined_cell"] = gpa(st3, start=st0)
rdi["refined_cell_str"] = 6*"%.2f " % \
tuple(rdi["refined_cell"])
rdi["space_group_number"] = gpa(st4, start=st0)
rdi["direct_beam_pixels"] = gpa(st5, start=st0)
rdi["direct_beam_mm"] = rdi["direct_beam_pixels"][0]*qx, \
rdi["direct_beam_pixels"][1]*qy
rdi["bmx"], rdi["bmy"] = rdi["direct_beam_mm"]
rdi["bpx"], rdi["bpy"] = rdi["direct_beam_pixels"]
subtrees = gpa(st6, multi_line=True, func=int, match_end="\n\n ")
rdi["substrees"] = [subtrees[i] for i in range(1, len(subtrees), 2)]
origin_t = rdi["index_origin_table"]
origin_n = len(origin_t)
quality_t = [x[3] for x in origin_t if x[3] < 2.]
#rdi["index_score"] = reduce(lambda a,b: a+b, quality_t)/len(quality_t)
max_ot = min(origin_n, 5)
rdi["shift_pixel"] = origin_t[0][4]
rdi["shift_mm"] = origin_t[0][4]*meanPixel
prp = """ Unit cell parameters: %(refined_cell_str)s
Space group number: %(space_group_number)s
Indexed spots: %(indexed_percentage).1f%% (%(indexed_spots)d/%(total_spots)d)
Spot prediction ESD: %(xy_spot_position_ESD).2f pixels and %(z_spot_position_ESD).2f degrees
Refined beam position (in mm): (%(bmx)9.3f, %(bmy)9.3f)
Refined beam position (in pixels): (%(bpx)9.2f, %(bpy)9.2f)
Shift in beam position: %(shift_mm)9.2f mm (%(shift_pixel).1f pixels)\n"""
prp2 = " Size of the origin index table: %(origin_n)7d\n" % vars()
ppa, ppb = "\n\tQuality: ", "\n\tShift (mm): "
ppc, ppd = "\n\tShift (pixels):", "\n\tBeam X (mm): "
ppe, ppf = "\n\tBeam Y (mm): ", "\n\tIndex Origin: "
for i in range(max_ot):
ppa += "%9.2f," % (origin_t[i][3])
ppb += "%9.2f," % (origin_t[i][4]*meanPixel)
ppc += "%9.1f," % (origin_t[i][4])
ppd += "%9.1f," % (origin_t[i][5]*qx)
ppe += "%9.1f," % (origin_t[i][6]*qy)
ppf += "%3d%3d%3d," % tuple(origin_t[i][0:3])
prp2 += " Origin ranking for the best %d solutions: " % max_ot
prp2 += ppa[:-1] + ppb[:-1] + ppc[:-1]
prp2 += ppd[:-1] + ppe[:-1] + ppf[:-1] + "\n"
#prp += " Index origin score: %.2f\n" % (rdi["index_score"])
if self.verbose == 1:
print (prp + prp2) % rdi
elif self.verbose == 2:
print prp % rdi
return rdi, prp
def parse_defpix(self):
"Parse DEFPIX.LP"
rdi, gpa = self.results, self.get_par
rdi["value_range"] = gpa("TRUSTED_DETECTOR_PIXELS= ")
prp = " Value range for trusted detector pixels: %(value_range)s"
if self.verbose:
print prp % rdi
return rdi, prp
def parse_integrate(self):
"Parse INTEGRATE.LP"
rdi, gpa = self.results, self.get_par
rdi["reflections"] = gpa("REFLECTIONS SAVED ON FILE",
start=9, func=int, before=True)
rdi["divergence"] = gpa("BEAM_DIVERGENCE_E.S.D.= ")
rdi["mosaicity"] = gpa("REFLECTING_RANGE_E.S.D.= ")
prp = "\n Number of reflection integrated: %(reflections)d\n"
prp += " Estimated divergence: %(divergence).3f\n"
prp += " Estimated mosaicity: %(mosaicity).3f\n"
if self.verbose:
print prp % rdi
return rdi, prp
def parse_xplan(self):
"Parse XPLAN.LP"
rdi, gpa = self.results, self.get_par
rdi["spacegroup"] = gpa("SPACE_GROUP_NUMBER=")
rdi["unitcell"] = 6*" %8.2f" % tuple(gpa("UNIT_CELL_CONSTANTS="))
rdi["friedels_law"] = gpa("FRIEDEL'S_LAW=")[0]
st0 = self.lp.index(72*"*")
st1 = self.lp.index(72*"*", st0+72)
st2 = self.lp.index(72*"*", st1+72)
#
prp = " Friedel's law: %(friedels_law)s\n"
prp += " Spacegroup: %(spacegroup)d\n"
prp += " Unitcell: %(unitcell)s\n"
if self.verbose:
print prp % rdi
print
print self.lp[st0:st2]
return rdi, prp
def parse_correct(self):
"Parse CORRECT.LP"
rdi, gpa = self.results, self.get_par
try:
sp1 = self.lp.index(" INPUT DATA SET")
sp2 = self.lp.index(" INTEGRATE.HKL ", sp1)
K1s, K2s = map(float, self.lp[sp1+18: sp2].split())[:2]
rdi["IoverSigmaAsympt"] = 1/((K1s*(K2s+0.0004))**0.5)
except:
try:
sp1 = self.lp.index("a b ISa") + 23
rdi["IoverSigmaAsympt"] = float(self.lp[sp1:sp1+31].split()[2])
except:
rdi["IoverSigmaAsympt"] = 0.0
print " Upper theoritical limit of I/sigma: %8.3f" % \
rdi["IoverSigmaAsympt"]
#print " Variance estimate scaling (K1, K2): %8.3f, %12.3e" % \
# (4*K1s, (K2s/4+0.0001))
rdi["RMSd_spotPosition"] = gpa("SPOT POSITION (PIXELS)")
rdi["RMSd_spindlePosition"] = gpa("SPINDLE POSITION (DEGREES)")
rdi["Mosaicity"] = gpa("CRYSTAL MOSAICITY (DEGREES)")
r = gpa(" "+"-"*74+"\n")
rdi["I_sigma"], rdi["Rsym"] = r[2], r[4]
rdi["Compared"], rdi["Total"] = r[6], r[7]
### Select Diffraction range.
sp1 = self.lp.index("RESOLUTION RANGE I/Sigma")
sp2 = self.lp.index(10*"-", sp1)
_table = self.lp[sp1:sp2].splitlines()[3:-1]
_table = [ map(float, l[:26].split()[1:3]) for l in _table ]
rdi["HighResCutoff"] = self.get_proper_resolition_range(_table)
prp = ""
if rdi["Mosaicity"]:
prp += " RMSd spot position: %(RMSd_spotPosition)19.2f pix,"
prp += "%(RMSd_spindlePosition)6.2f deg.\n"
prp += " Refined Mosaicity: %(Mosaicity)29.2f deg.\n\n"
prp += " Rsym: %(Rsym)9.1f\n"
prp += " I/sigma: %(I_sigma)9.1f\n"
if rdi["HighResCutoff"]:
prp += " Suggested high resolution cutoff: %(HighResCutoff)9.2f"
prp += "\n Compared reflections: %(Compared)d\n"
prp += " Total number of measures: %(Total)d\n"
if self.verbose:
print prp % rdi
return rdi, prp
def get_proper_resolition_range(self, res_table):
"High res is selected when at least 3 values of I/sigma are below 1."
high_n, high_hit = [], None
for res, IoS in res_table:
if IoS < 1.:
high_n.append(res)
if not high_hit and len(high_n) == 3:
high_hit = high_n[0]
else:
high_n = []
#print "%8.3f %8.3f %s" % (res, IoS, IoS >= 1.)
if not high_hit and len(high_n) >= 1:
high_hit = high_n[0]
#print "Suggested high resolution cut-off: %.2f" % high_hit
return high_hit
def run_exec_str(self, execstr):
if sys.version_info <= (2, 4, 0):
spot_file = os.popen(execstr)
outp = spot_file.read()
spot_file.close()
else:
outp = Popen([execstr], stdout=PIPE, shell=True).communicate()[0]
return outp
def get_xds_version(self):
"Get the version of XDS"
_execstr = "cd /tmp; xds_par | grep VERSION"
wc_out = self.run_exec_str(_execstr)
return wc_out.strip()[24:-12].replace(")","")
def get_spot_number(self):
"Read the number of spot directly from SPOT.XDS"
_execstr = "wc -l %s/SPOT.XDS" % self.run_dir
wc_out = self.run_exec_str(_execstr)
return int(wc_out.split()[0])
MIN_SPOT_NUMBER = 200
LATTICE_GEOMETRIC_FIT_CUTOFF = 50
FRAMES_PER_COLSPOT_SEQUENCE = 16 # number of frames per sequence in COLSPOT.
JOB_STEPS = "INIT", "COLSPOT", "IDXREF", "INTEGRATE", "CORRECT"
SPOTFILENAME = "SPOT.XDS"
class XDS:
"Main class for runing xds step by step."
def __init__(self, obj=None, link_to_images=True):
"""Constructor for the Param classes from file or string."""
#
self.link_to_images = link_to_images
self.__cancelled = 0
self.__lastOutp = 0
self.mode = []
if XDS_HOME:
self.__execfile = os.path.join(XDS_HOME,"xds_par")
else:
self.__execfile = "xds_par"
self.running = 0
self.outp = []
self.run_dir = "."
self.status = None
self.inpParam = XParam()
self.collect_dir = "./"
self.link_name_to_image = "img"
self.running_processes = []
#
if type(obj) == file:
exec obj.read() in self.inpParam.__dict__
obj.close()
if type(obj) == str:
exec obj in self.inpParam.__dict__
def set_collect_dir(self, dirname):
"Set the collect directory"
if os.path.isdir(dirname):
self.collect_dir = dirname
else:
raise XIO.XIOError, "Can't find %s directory" % dirname
def _creat_process(self, _execstr):
"Return a process with pipe redirected IO."
if sys.version_info <= (2, 4, 0):
self.wait_value = -1
return Popen3(_execstr)
else:
self.wait_value = None
return Popen(_execstr, stdin=PIPE, stdout=PIPE,
stderr=PIPE, bufsize=1, close_fds=True,
universal_newlines=True)
def cancel(self):
"Cancel the job."
self.__cancelled = 1
def get_outp(self):
"Collect the latest output."
if not self.__cancelled:
nLine = len(self.outp)
diff = nLine - self.__lastOutp
self.__lastOutp = nLine
if diff:
return "".join(self.outp[-diff:])[:-1]
else:
return ""
def run(self, run_dir=None, rsave=None, verbose=True, async=False):
"Control the runing of the xds process and parse the output."
self.__cancelled = 0
self.running = 1
self.step = 0
self.step_name = ""
self.outp = []
self.init_dir = os.getcwd()
self.async = async
if run_dir:
self.run_dir = run_dir
if not self.run_dir:
self.run_dir = "."
result = 0
if self.run_dir:
if not os.path.exists(self.run_dir):
try:
os.mkdir(self.run_dir)
except OSError, err:
raise XIO.XIOError, \
("\nSTOP! Can't create xds working directory: %s\n" % \
self.run_dir)
if os.path.isdir(self.run_dir):
os.chdir(self.run_dir)
if self.link_to_images:
if not os.path.exists(self.link_name_to_image):
os.system("ln -sf '%s' %s" % (self.collect_dir, \
self.link_name_to_image))
#os.system("ln -sf .. %s" % (self.link_name_to_image))
#else:
# raise XIO.XIOError, \
# "STOP! Can't creat link %s in working directory: %s" \
# % (self.link_name_to_image, self.run_dir)
opWriteCl("XDS.INP", "%s" % self.inpParam)
#
# self.running_processes
xdsProcess = self._creat_process(self.__execfile)
_init_parse = True
overloaded_spots = 0
while self.running:
self.status = xdsProcess.poll()
if self.status != self.wait_value:
self.running = 0
break
if self.__cancelled:
os.kill(xdsProcess.pid, 9)
break
if self.wait_value == -1:
lines = xdsProcess.fromchild.readline()
else:
lines = xdsProcess.stdout.readline()
#lines = xdsProcess.communicate()
# ilines parsing of stdout
if self.step_name == "INTEGRATE":
if _init_parse:
print " Processing Mean #Strong ",
print "Estimated Overloaded"
print " Image Range refl./image ",
print "Mosaicity reflections\n"
table_int = []
_init_parse = False
if INTEGRATE_STEP_RE.search(lines):
print lines[44:50]+" - "+lines[56:-1],
nimages = int(lines[56:-1]) - int(lines[44:50]) + 1
elif INTEGRATE_STRONG_RE.search(lines):
print "%11.0f" % (float(lines.split()[0])/nimages),
elif INTEGRATE_MOSAICITY_RE.search(lines):
print " %11.3f" % float(lines.split()[3]),
print " %11d" % overloaded_spots
overloaded_spots = 0
hit = SCALE_RE.search(lines)
if hit:
table_int = hit.groups()
overloaded_spots += int(hit.groups()[3])
sm = STEPMARK.match(lines)
if sm:
self.step += 1
self.step_name = sm.group(2)
#if VERBOSE:
if verbose:
print "\n ---> Running job: %20s\n" % self.step_name
if lines:
self.outp.append(lines)
self.step += 1
self.step_name = "FINISHED"
if self.__cancelled:
result = -1
if rsave:
saveLastVersion(LP_names)
#if VERBOSE:
# print "End of XDS run"
os.chdir(self.init_dir)
return 1
#def run_idxref_optimize(self, number_of_test=4, verbose=False):
# "Run COLSPOT + DXREF with different spot search paramters"
# min_pixels = [4, 7, 10, 15]
# strong_pixel = [11, 9, 7, 5]
def spots_resolution_cutoff(self, res_cutoff, verbose=False):
"Read the SPOT.XDS file and filter spots using a resolution cutoff."
from math import atan2, sin
import shutil
#
spotsFileName = os.path.join(self.run_dir, "SPOT.XDS")
# Save the SPOT file and open a new one
shutil.copy(spotsFileName, spotsFileName+".bck")
spots = open(spotsFileName+".bck").readlines()
newspots = open(os.path.join(self.run_dir, SPOTFILENAME),"w")
# Get parameters for the resol calculation
xo, yo = self.inpParam["ORGX"], self.inpParam["ORGY"]
rx, ry = self.inpParam["QX"], self.inpParam["QY"]
D = self.inpParam["DETECTOR_DISTANCE"]
# the resolution calculation function
resolCal = lambda s, D, xo, yo, rx, ry: \
0.5/sin(atan2(((rx*(float(s[:10]) -xo))**2 +
(ry*(float(s[10:20])-yo))**2)**0.5,D)/2.)
filtredSpots = [s for s in spots \
if resolCal(s,D,xo,yo,rx,ry) >= res_cutoff]
#
newspots.writelines(filtredSpots)
ni, nf = len(spots), len(filtredSpots)
if verbose:
print ">> Selected spots with %.2f resolution cutoff:" % \
(res_cutoff),
print "%d / %d (%.1f%%)" % (nf, ni, nf*100./ni)
newspots.close()
def run_init(self):
"Runs the 2 first steps: XYCORR and INIT"
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
#self.inpParam["TRUSTED_REGION"] = [0, 1.20]
self.inpParam["JOB"] = "XYCORR", "INIT"
i1, i2 = self.inpParam["DATA_RANGE"]
#if "slow" in self.mode:
# default is min of 3 degrees or 8 images.
dPhi = self.inpParam["OSCILLATION_RANGE"]
if BRUTE:
bkgr = i1, i1+40
elif SLOW or WEAK:
bkgr = i1, min(i2, min(i1+15, i1+int(7./dPhi)))
else:
bkgr = i1, min(i2, min(i1+7, i1+int(3./dPhi)))
self.inpParam["BACKGROUND_RANGE"] = bkgr
self.run(rsave=True)
res = XDSLogParser("INIT.LP", run_dir=self.run_dir, verbose=1)
return res.results
def run_colspot(self):
"Runs the COLSPOT step."
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
self.inpParam["JOB"] = "COLSPOT",
self.inpParam["MAXIMUM_NUMBER_OF_PROCESSORS"] = 1
self.inpParam["MAXIMUM_NUMBER_OF_JOBS"] = NUMBER_OF_PROCESSORS
_trial = 0
# DEFAULT=3.2 deg., SLOW=6.4 deg., FAST=1.6 deg.
dPhi = self.inpParam["OSCILLATION_RANGE"]
frames_per_colspot_sequence = FRAMES_PER_COLSPOT_SEQUENCE
if "slow" in self.mode:
frames_per_colspot_sequence = int(round(6.4/dPhi, 0))
elif "fast" in self.mode:
frames_per_colspot_sequence = int(round(1.6/dPhi, 0))
elif BRUTE:
frames_per_colspot_sequence = int(round(60./dPhi, 0))
self.inpParam["VALUE_RANGE_FOR_TRUSTED_DETECTOR_PIXELS"] = \
5000, 30000
self.inpParam["STRONG_PIXEL"] = 4.5
else:
frames_per_colspot_sequence = int(round(3.2/dPhi, 0))
if "weak" in self.mode:
self.inpParam["STRONG_PIXEL"] = 4.5
self.inpParam["MINIMUM_NUMBER_OF_PIXELS_IN_A_SPOT"] -= 1
frames_per_colspot_sequence = int(round(12.8/dPhi, 0))
# Selecting spot range(s),
# self.inpParam["SPOT_RANGE"] is set to Collect.imageRanges by the
# xds export function XIO
cfo = XIO.Collect("foo_001.bar")
cfo.imageNumbers = cfo._ranges_to_sequence(self.inpParam["SPOT_RANGE"])
#
min_fn, max_fn = self.inpParam["DATA_RANGE"]
_fpcs = frames_per_colspot_sequence
_2fpcs = 1 + 2 * frames_per_colspot_sequence
if (max_fn - min_fn + 1) >= _2fpcs:
# use two range ex: i-i+2, f-2,f
# with f at maximum 90 degre distance
max_frame = min(max_fn, min_fn + int(89./dPhi + _fpcs))
spot_ranges = ((min_fn, min_fn + _fpcs - 1),
(max_frame - _fpcs + 1, max_frame))
else:
spot_ranges = (min_fn, min(min_fn + _2fpcs - 1, max_fn)),
# Restrict to matching collected images...
self.inpParam["SPOT_RANGE"] = cfo.lookup_imageRanges(False, \
mask_range=spot_ranges)
if BRUTE:
self.inpParam["SPOT_RANGE"] = (min_fn, int(89./dPhi + _fpcs)),
self.run(rsave=True)
_rs = " Image range(s) for spot collection: "
for sub_range in self.inpParam["SPOT_RANGE"]:
_rs += (" [%d - %d]," % tuple(sub_range))
print _rs[:-1] + "\n"
res = XDSLogParser("COLSPOT.LP", run_dir=self.run_dir, verbose=1)
while res.results["spot_number"] < MIN_SPOT_NUMBER and _trial < 4:
_trial += 1
min_pixels = int(self.inpParam["MINIMUM_NUMBER_OF_PIXELS_IN_A_SPOT"])
self.inpParam["MINIMUM_NUMBER_OF_PIXELS_IN_A_SPOT"] = max(min_pixels-1, 1)
self.inpParam["STRONG_PIXEL"] -= 1.
#self.inpParam["SPOT_MAXIMUM_CENTROID"] += 1
print "Insuficiant number of spot (minimum set to %d)." % \
MIN_SPOT_NUMBER
print "Recollecting spots. Trial number %d" % _trial
self.run(rsave=True)
res = XDSLogParser("COLSPOT.LP", run_dir=self.run_dir, verbose=1)
return res.results
def run_idxref(self, beam_center_search=False, ranking_mode="ZSCORE",
beam_center_swap=False):
"Runs the IDXREF step. Can try to search for better beam_center."
res = None
test_results = []
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
self.inpParam["JOB"] = "IDXREF",
# this prevent bad spot to be included.
saved_trusted_region = self.inpParam["TRUSTED_REGION"]
if saved_trusted_region[1] > 0.98:
self.inpParam["TRUSTED_REGION"] = [0, 0.98]
self.run(rsave=True)
try:
res = XDSLogParser("IDXREF.LP", run_dir=self.run_dir, verbose=1)
except XDSExecError, err:
print " !!! ERROR in", err[1], "\n"
if err[0] == "FATAL" and not (beam_center_swap or beam_center_search):
sys.exit()
except Exception, err:
print err
sys.exit()
qx, qy = self.inpParam["QX"], self.inpParam["QY"]
dist = self.inpParam["DETECTOR_DISTANCE"]
det_x = vec3(self.inpParam["DIRECTION_OF_DETECTOR_X-AXIS"])
det_y = vec3(self.inpParam["DIRECTION_OF_DETECTOR_Y-AXIS"])
det_z = det_x.cross(det_y)
det_params = dist, det_x, det_y, det_z, qx, qy
#RD["indexed_percentage"] < 70. or \
#if beam_center_search or RD["xy_spot_position_ESD"] > 2. or \
# RD["z_spot_position_ESD"] > 2*self.inpParam["OSCILLATION_RANGE"]:
if res:
test_results.append(res.results)
if beam_center_swap:
x, y = self.inpParam["ORGX"], self.inpParam["ORGY"]
mx, my = self.inpParam["NX"] - x, self.inpParam["NY"] - y
origins = [[y, x], [mx, my], [my, mx],
[ x, my], [y, mx], [mx, y], [my, x]]
for origin in origins:
self.inpParam["ORGX"] = origin[0]
self.inpParam["ORGY"] = origin[1]
print " Testing beam coordinate: (%.2fmm, %.2fmm) = " % \
(origin[0]*qx, origin[1]*qy),
print " %.1f, %.1f" % (origin[0], origin[1])
self.run(rsave=True, verbose=False)
try:
test_results.append(XDSLogParser("IDXREF.LP",
run_dir=self.run_dir,
verbose=0, raiseErrors=True).results)
except XDSExecError, err:
print "\t\tError in", err
if beam_center_search:
RD = res.results
print " Number of possible beam coordinates: %d" % \
len(RD["index_origin_table"])
maxTestOrigin = min(60, len(RD["index_origin_table"]))
origins = RD["index_origin_table"][:maxTestOrigin]
for origin in origins:
# We first need to calculate the beam_origin from the
# beam_coordinate and beam_vector given in the table
beam = vec3(origin[7:10])
beam_origin = get_beam_origin(origin[5:7], beam, det_params)
self.inpParam["ORGX"] = beam_origin[0]
self.inpParam["ORGY"] = beam_origin[1]
self.inpParam["INCIDENT_BEAM_DIRECTION"] = tuple(beam)
#print "DEBUG: %7.1f %7.1f - %7.1f %7.1f" % \
# (coorx, coory, self.inpParam["ORGX"], self.inpParam["ORGY"])
print " Testing beam coordinate: (%.2fmm, %.2fmm) = " % \
(origin[5]*qx, origin[6]*qy),
print " %.1f, %.1f" % (origin[5], origin[6])
self.run(rsave=True, verbose=False)
try:
test_results.append(XDSLogParser("IDXREF.LP",
run_dir=self.run_dir,
verbose=0, raiseErrors=True).results)
except XDSExecError, err:
print "\t\tError in", err
if beam_center_search or beam_center_swap:
print "\n"
# Need to lookup in the results for the beam-center giving
best_index_rank = rank_indexation(test_results, ranking_mode)
#for o in origins:
# print origins.index(o), o[:-3]
best_origin = origins[best_index_rank[ranking_mode]-1]
if VERBOSE:
print best_index_rank
#fmt = "%4i%4i%4i%7.2f%7.2f%8.1f%8.1f%9.5f%9.5f%9.5f"
print "best_index_rank", best_index_rank[ranking_mode]
#print "best_origin", fmt % tuple(best_origin)
if beam_center_search:
best_beam = vec3(best_origin[7:10])
best_beam_coor = best_origin[5:7]
best_beam_orig = get_beam_origin(best_beam_coor,
best_beam, det_params)
self.inpParam["ORGX"], self.inpParam["ORGY"] = best_beam_orig
self.inpParam["INCIDENT_BEAM_DIRECTION"] = tuple(best_beam)
else:
self.inpParam["ORGX"], self.inpParam["ORGY"] = best_origin
# Running again with updated best parameters
self.run(rsave=True)
res = XDSLogParser("IDXREF.LP", run_dir=self.run_dir)
# Set back the Trusted_region to larger values.
self.inpParam["TRUSTED_REGION"] = saved_trusted_region
return res.results
def check_fileout(self, fileout):
"Checking normal terminaison."
if not os.path.exists(os.path.join(self.run_dir, fileout)):
err = "Abnormal terminaison. Can't locate file: '%s'" % fileout
print err
raise Exception(err)
def run_xplan(self, ridx=None):
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
"Running the strategy."
self.inpParam["MAXIMUM_NUMBER_OF_PROCESSORS"] = NUMBER_OF_PROCESSORS
self.inpParam["MAXIMUM_NUMBER_OF_JOBS"] = 1
select_strategy(ridx, self.inpParam)
print "\n Starting strategy calculation."
self.inpParam["JOB"] = "IDXREF",
self.run(rsave=True)
res = XDSLogParser("IDXREF.LP", run_dir=self.run_dir, verbose=2)
# Select just the internal circle of the detector.
self.inpParam["JOB"] = "DEFPIX", "XPLAN"
self.run(rsave=True)
res = XDSLogParser("XPLAN.LP", run_dir=self.run_dir, verbose=1)
return res.results
def run_integrate(self, image_ranges):
"Running INTEGRATE."
if BRUTE:
self.inpParam["DELPHI"] = 20.
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
self.inpParam["MAXIMUM_NUMBER_OF_PROCESSORS"] = NUMBER_OF_PROCESSORS
self.inpParam["MAXIMUM_NUMBER_OF_JOBS"] = 1
if ("slow" in self.mode) or BRUTE:
self.inpParam["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA_BETA"] = 13
self.inpParam["NUMBER_OF_PROFILE_GRID_POINTS_ALONG_GAMMA"] = 13
"Runs the 2 first steps: DEFPIX and INTEGRATE"
self.inpParam["JOB"] = "DEFPIX",
self.run(rsave=True)
res = XDSLogParser("DEFPIX.LP", run_dir=self.run_dir, verbose=1)
if len(image_ranges) >= 1:
self.inpParam["JOB"] = "INTEGRATE",
self.run(rsave=True)
res = XDSLogParser("INTEGRATE.LP", run_dir=self.run_dir, verbose=1)
self.check_fileout("INTEGRATE.HKL")
#else:
# #print "\n Error in the INTEGRATE step:"
# print "\n Image range:", image_ranges
# print " Multi-sweep integration not yet implemanted. Sorry.\n"
# sys.exit(0)
return res.results
def run_pre_correct(self):
"""Runs a first pass of CORRECT to evaluate high_res and
point group.
"""
def _get_cell(_file):
_txt_file = open(_file,'r').readlines()
if "XPARM.XDS" in _txt_file[0]:
return map(float, (_txt_file[3]).split()[1:])
else:
return map(float, (_txt_file[7]).split()[1:])
if XDS_INPUT:
self.inpParam.mix(xdsInp2Param(inp_str=XDS_INPUT))
# run pointless on INTEGRATE.HKL
if not is_pointless_installed():
print "!! Warning. Pointless program not installed."
print " -> Skipping pointless analysis."
likely_spg = [["P1", 0],]
new_cell = False
else:
print " Pointless analysis on the INTEGRATE.HKL file"
print " "+44*"="
try:
likely_spg, new_cell = pointless(dir_name=self.run_dir,
hklinp="INTEGRATE.HKL")
except:
raise
print " -> ERROR. While running Pointless. Skipped"
likely_spg = [["P1", 0],]
new_cell = False
self.inpParam["JOB"] = "CORRECT",
if not SPG:
# run first CORRECT in P1 with the cell used for integration.
# read the cell parameters from the XPARM.XDS file
self.inpParam["SPACE_GROUP_NUMBER"] = 1
try:
xparm_file = os.path.join(self.run_dir, "XPARM.XDS")
self.inpParam["UNIT_CELL_CONSTANTS"] = _get_cell(xparm_file)
except:
os.chdir("..")
self.inpParam["UNIT_CELL_CONSTANTS"] = _get_cell(xparm_file)
# run CORRECT
self.run(rsave=True)
res = XDSLogParser("CORRECT.LP", run_dir=self.run_dir, verbose=1)
L, H = self.inpParam["INCLUDE_RESOLUTION_RANGE"]
newH = res.results["HighResCutoff"]
if newH > H and not RES_HIGH:
H = newH
if SPG:
spg_choosen = SPG
else:
spg_choosen = likely_spg[0][1]
# Re-order pointless cell-axes in case of orthorombic SPG.
spgSplit = likely_spg[0][0].split()
# if cell is coming from pointless, it need reordering
# in orthorombic cases
if new_cell:
a, b, c, A, B, G = new_cell
if spg_choosen == 18:
if spgSplit[1] == "2":
new_cell = [b, c, a, A, B, G]
elif spgSplit[2] == "2":
new_cell = [a, c, b, A, B, G]
elif spg_choosen == 17:
if spgSplit[1] == "21":
new_cell = [b, c, a, A, B, G]
elif spgSplit[2] == "21":
new_cell = [a, c, b, A, B, G]
else:
new_cell = self.inpParam["UNIT_CELL_CONSTANTS"]
lattice = Lattice(new_cell, symmetry=spg_choosen)
lattice.idealize()
self.inpParam["UNIT_CELL_CONSTANTS"] = lattice.cell
#reidx_mat = likely_spg[0][-1]
#new_cell = new_reidx_cell(self.inpParam["UNIT_CELL_CONSTANTS"],
return (L, H), spg_choosen
def run_correct(self, res_cut=(1000, 0), spg_num=0):
"Runs the last step: CORRECT"
if res_cut[1]:
print " -> New high resolution limit: %.2f Å" % res_cut[1]
self.inpParam["INCLUDE_RESOLUTION_RANGE"] = res_cut
if spg_num:
print " -> Using spacegroup: %s #%d" % \
(SPGlib[spg_num][1], spg_num)
lattice = Lattice(self.inpParam["UNIT_CELL_CONSTANTS"],
symmetry=spg_num)
lattice.idealize()
self.inpParam["UNIT_CELL_CONSTANTS"] = lattice.cell
self.inpParam["JOB"] = "CORRECT",
self.inpParam["SPACE_GROUP_NUMBER"] = spg_num
self.run(rsave=True)
res = XDSLogParser("CORRECT.LP", run_dir=self.run_dir, verbose=1)
s = resum_scaling(lpf=os.path.join(self.run_dir,"CORRECT.LP"))
if not s:
print "\nERROR while running CORRECT"
sys.exit()
s["image_start"], s["image_last"] = self.inpParam["DATA_RANGE"]
s["name"] = os.path.basename(self.inpParam["NAME_TEMPLATE_OF_DATA_FRAMES"])
print s.last_table
print FMT_FINAL_STAT % vars(s)
if s.absent:
print FMT_ABSENCES % vars(s)
if self.inpParam["FRIEDEL'S_LAW"] == "FALSE":
print FMT_ANOMAL % vars(s)
def run_scaleLaueGroup(self):
"""Runs the CORRECT step with reindexation for all the selected Laue
group
1 - Get the selected bravais Lattices from IDXREF
2 - Filtrate the equivalents (same geometry and reindexation)
3 - For each one of the selected lattices:
in a seperated dir,
for all the laue symmetry compatible with
the bravais lattice geometry run the CORRECT scaling
4 - Rank all the scaling from the parsing of all the CORRECT.LP
"""
return 1 #res.resutls
def rank_indexation(indexations, ranking_mode="ISCORE"):
"Rank indexations obtained using different beam-center coordinates."
best_beam_center = None
rank_items = ["indexed_percentage", "xy_spot_position_ESD",
"z_spot_position_ESD", "quality_contrast","i_score"]
rank_table = {}
for items in rank_items:
rank_table[items] = []
prp = " Indexed spots: %(indexed_percentage).1f%%"
prp += " (%(indexed_spots)d/%(total_spots)d)\n"
prp += " Spot prediction ESD: %(xy_spot_position_ESD).2f "
prp += "pixels and %(z_spot_position_ESD).2f degrees"
nind = 0
i_score = []
for indexation in indexations:
nind += 1
print " Test indexation number: %d" % nind
print prp % indexation
#
origin_t = indexation["index_origin_table"]
quality_contrast = origin_t[1][3] - origin_t[0][3]
indexation["quality_contrast"] = quality_contrast
indexation["i_score"] = indexation["indexed_percentage"]/(
2*indexation["xy_spot_position_ESD"] +
indexation["z_spot_position_ESD"]/ \
indexation["oscillation_range"])
i_score.append(indexation["i_score"])
#
for items in rank_items:
rank_table[items].append(indexation[items])
#
print " Contrast in the quality of indexation: ", quality_contrast
pp4, pp6 = "\n\tQuality: ", "\n\tShift (pixels):"
pp7, pp8 = "\n\tBeam X (pixel):", "\n\tBeam Y (pixel):"
pp9 = "\n\tIndex Origin: "
for i in range(min(len(origin_t), 5)):
pp4 += "%9.2f," % (origin_t[i][3])
pp6 += "%9.1f," % (origin_t[i][4])
pp7 += "%9.1f," % (origin_t[i][5])
pp8 += "%9.1f," % (origin_t[i][6])
pp9 += "%3d%3d%3d," % tuple(origin_t[i][0:3])
#
print pp4[:-1] + pp6[:-1] + pp7[:-1] + pp8[:-1] + pp9[:-1] + "\n"
#
z_table = {}
print "%22s: " % "Test number", " %3d"*nind % tuple(range(1, nind+1))
for item in rank_table:
isorted = rank_table[item][:]
if item in ["indexed_percentage", "quality_contrast", "i_score"]:
reverse = True
else:
reverse = False
isorted.sort(reverse=reverse)
#
rank = [isorted.index(i) + 1 for i in rank_table[item]]
print "%22s: " % item,
print " %3d"*len(rank) % tuple(rank)
z_table[item] = rank
#
z_score = []
for idq in range(len(z_table["quality_contrast"])):
z_score.append(z_table["quality_contrast"][idq] +
z_table["xy_spot_position_ESD"][idq] +
z_table["z_spot_position_ESD"][idq])
print "%22s: " % "z_score",
print " %3d"*len(z_score) % tuple(z_score)
z_best_index = z_score.index(min(z_score))
i_best_index = i_score.index(max(i_score))
best_beam_center = {}
best_beam_center["ISCORE"] = \
indexations[i_best_index]["index_origin_table"][0][5:7]
best_beam_center["ZSCORE"] = \
indexations[z_best_index]["index_origin_table"][0][5:7]
if ranking_mode == "ISCORE":
zflag, iflag = " ", "***"
else:
iflag, zflag = " ", "***"
_best = best_beam_center[ranking_mode]
fmt1 = "%s Best %s_score rank: %3d for Solution #%-3d"
fmt2 = " beamx=%7.1f beamy=%7.1f"
print
print fmt1 % (iflag, "I", 1, i_best_index+1),
print fmt2 % tuple(best_beam_center["ISCORE"])
print fmt1 % (zflag, "Z", min(z_score), z_best_index+1),
print fmt2 % tuple(best_beam_center["ZSCORE"])
return {"ISCORE":i_best_index, "ZSCORE": z_best_index}
def get_beam_origin(beam_coor, beam_vec, det_parameters):
"Calculate beam_origin from beam_coordinate."
dist, det_x, det_y, det_z, qx, qy = det_parameters
beamOx, beamOy, beamOz = beam_coor[0]*qx, beam_coor[1]*qy, beam_vec*det_z
return (beamOx - beam_vec*det_x*dist/beamOz)/qx, \
(beamOy - beam_vec*det_y*dist/beamOz)/qy
def new_reidx_cell(init_cell, reidx_mat):
"""Applies the reindexing card to initial cell parameters and
return a new cell"""
UB = BusingLevy(reciprocal(init_cell))
REIDX = mat3(reidx_mat)
return reciprocal(UB_to_cellParam(REIDX*UB))
#def resolution2trustedRegion(high_res, dist, beam_center, pixel_size, npixel):
# Usefull for the IDXREF stage. One can use the TRUSTED_REGION keyword to
# cut unwanted spots at low or high resolution.
# different mode can be used. Internal, external or midle.
# Internal: set the smallest RMAX radius,
# External: set the biggest RMAX radius and Midle...
#def write_autoPar(adpPar):
# ""
# link_name_to_image = "img"
# newdir = adpPar["prefix"] + "adp_process"
# #
# if not os.path.exists(newdir):
# try: os.mkdir(newdir)
# except:
# raise XIO.XIOError, \
# "STOP! Can't creat adp working directory:", newdir
# if os.path.isdir(newdir):
# img_dir = os.path.abspath(adpPar["img_dir"])
# os.chdir(newdir)
# if not os.path.exists(link_name_to_image) or \
# os.path.islink(link_name_to_image):
# os.system("ln -sf %s %s" % (img_dir, link_name_to_image))
# adpPar["img_dir"] = link_name_to_image
# #
# keys = adpPar.keys()
# keys.sort()
# paramStr = "".join(["%s = %s\n" % (k, adpPar[k]) for k in keys])
# opWriteCl("auto.par", paramStr)
# os.chdir("..")
def parse_spacegroup(spginp):
"Try to interpret spg input string from command line."
spg_found = False
try:
spg_int = int(spginp)
spg_found = True
except ValueError:
#spg_int = 0
spginp_up = spginp.upper()
for spgn in SPGlib:
if spginp_up in SPGlib[spgn]:
spg_int = spgn
spg_found = True
break
if spg_found:
if spg_int == 0:
spg_int = 1
spg_info = SPGlib[spg_int]
spg_str = " Imposed Space group: %s, number %d" % \
(spg_info[1], spg_int)
else:
raise Exception, "\nERROR: Unrecognised space group: %s\n" % spginp
return spg_int, spg_info, spg_str
def select_strategy(idxref_results, xds_par):
"Interactive session to select strategy parameters."
sel_spgn = SPG #xds_par["SPACE_GROUP_NUMBER"]
sel_ano = xds_par["FRIEDEL'S_LAW"]
#print xds_par["UNIT_CELL_CONSTANTS"]
valid_inp = False
bravais_to_spgs = get_BravaisToSpgs()
# Select LATTICE
while not valid_inp:
def_sel = 1
if sel_spgn != 0:
# choose the lattice solution according to the selected spg.
i = 0
for LAT in idxref_results["lattices_table"]:
if LAT.fit <= LATTICE_GEOMETRIC_FIT_CUTOFF:
i += 1
if sel_spgn in bravais_to_spgs[LAT.Bravais_type]:
def_sel = i
selection = raw_input("\n Select a solution number [%d]: " % def_sel)
# If the selection is not compatible with the spg, set not valid
_sel = selection.split()
selnum = 1
try:
if len(_sel) == 1:
selnum = int(_sel[0])
valid_inp = True
elif len(_sel) == 0:
selnum = def_sel
valid_inp = True
else:
raise Exception, "Invalid selection input."
except Exception, err:
print "\n ERROR. ", err
sel_lat = idxref_results["lattices_table"][selnum-1]
if sel_spgn == 0:
sel_spgn = sel_lat.symmetry_num
valid_inp = False
# Select SPACEGROUP
print " Possible spacegroup for this lattice are:\n"
for spgsymb in bravais_to_spgs[sel_lat.Bravais_type]:
print " %15s, number: %3d" % (SPGlib[spgsymb][1], spgsymb)
while not valid_inp:
selection = raw_input("\n Select the spacegroup [%s, %d]: "
% (SPGlib[sel_spgn][1], sel_spgn))
_sel = selection.split()
try:
if len(_sel) == 1:
sel_spgn, _spg_info, _spg_str = parse_spacegroup(_sel[0])
# selSpgS = _spg_info[1]
valid_inp = True
elif len(_sel) == 0:
valid_inp = True
else:
raise Exception, "Invalid selection input."
if sel_spgn not in bravais_to_spgs[sel_lat.Bravais_type]:
valid_inp = False
msg = "Inconsistant combinaison of Bravais lattice"
msg += " and spacegroup.\n For this Bravais Lattice"
msg += " (%s), spacegroup should be one of these:\n\n" % \
(sel_lat.Bravais_type)
for spgsymb in bravais_to_spgs[sel_lat.Bravais_type]:
msg += " %15s, number: %3d\n" % \
(SPGlib[spgsymb][1], spgsymb)
raise Exception, msg
except Exception, err:
print "\n ERROR. ", err
valid_inp = False
# Select ANOMALOUS
while not valid_inp:
if sel_ano == "TRUE":
txt3 = "N/y"
else:
txt3 = "Y/n"
selection = raw_input(" Anomalous [%s]: " % txt3)
try:
_ans = selection.strip()
if _ans == "":
valid_inp = True
elif _ans[0] in "Yy":
xds_par["FRIEDEL'S_LAW"] = "FALSE"
valid_inp = True
elif _ans[0] in "Nn":
xds_par["FRIEDEL'S_LAW"] = "TRUE"
valid_inp = True
else:
raise Exception, "Invalid answer [Y/N]."
except Exception, err:
print "\n ERROR. ", err
print "\n Selected cell paramters: ", sel_lat
if sel_spgn > 2:
sel_lat.idealize()
print " Idealized cell parameters: ", sel_lat.prt()
xds_par["UNIT_CELL_CONSTANTS"] = sel_lat.prt()
xds_par["SPACE_GROUP_NUMBER"] = sel_spgn
return xds_par
if __name__ == "__main__":
import getopt
short_opt = "123456aAbBc:d:f:F:i:IL:O:M:n:p:s:Sr:R:x:y:vw:WSF"
long_opt = ["anomal",
"Anomal",
"beam-x=",
"beam-y=",
"ice",
"spg=",
"strategy",
"high-resolution=",
"low-resolution=",
"last-frame",
"first-frame",
"cell=",
"distance",
"reference=",
"oscillation",
"orientation-matrix=",
"nthreads=",
"project",
"beam-center-optimize-i",
"beam-center-optimize-z",
"beam-center-swap",
"xds-input=",
"verbose",
"wavelength=",
"slow", "weak", "brute"]
if len(sys.argv) == 1:
print USAGE
sys.exit(2)
try:
opts, inputf = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError:
# print help information and exit:
print USAGE
sys.exit(2)
NUMBER_OF_PROCESSORS = min(32, get_number_of_processors())
# Use a maximum of 32 proc. by job. Change it if you whant another limit.
WARNING = ""
VERBOSE = False
DEBUG = False
WEAK = False
ANOMAL = False
ICE = False
STRICT_CORR = False
BEAM_X = 0
BEAM_Y = 0
SPG = 0
STRATEGY = False
RES_HIGH = 0
DISTANCE = 0
OSCILLATION = 0
ORIENTATION_MATRIX = False
PROJECT = ""
WAVELENGTH = 0
RES_LOW = 50
FIRST_FRAME = 0
LAST_FRAME = 0
REFERENCE = False
_beam_center_optimize = False
_beam_center_ranking = "ZSCORE"
_beam_center_swap = False
CELL = ""
XDS_INPUT = ""
_beam_in_mm = False
SLOW = False
FAST = False
BRUTE = False
STEP = 1
for o, a in opts:
if o == "-v":
VERBOSE = True
if o in ("-a", "--anomal"):
ANOMAL = True
if o in ("-A", "--Anomal"):
ANOMAL = True
STRICT_CORR = True
if o in ("-I", "--ici"):
ICE = True
if o[1] in "123456":
STEP = int(o[1])
if o in ("-s", "--spg"):
SPG, _spg_info, _spg_str = parse_spacegroup(a)
if o in ("-i", "--xds-input"):
XDS_INPUT = a
if o in ("-c", "--cell"):
CELL = a
if o in ("-d", "--distance"):
DISTANCE = float(a)
if o in ("-f", "--reference"):
if os.path.isfile(a):
REFERENCE = str(a)
else:
print "\n ERROR: Can't open reference file %s." % a
print " STOP!\n"
sys.exit()
if o in ("-F", "--first-frame"):
FIRST_FRAME = int(a)
if o in ("-L", "--last-frame"):
LAST_FRAME = int(a)
if o in ("-O", "--oscillation"):
OSCILLATION = float(a)
if o in ("-M", "--orientation-matrix"):
if os.path.isfile(a):
ORIENTATION_MATRIX = str(a)
else:
print "\n ERROR: Can't open orientation matrix file %s." % a
print " STOP!\n"
sys.exit()
if o in ("-n","--nthreads"):
NUMBER_OF_PROCESSORS = int(a)
if o in ("-p", "--project"):
PROJECT = str(a)
if o in ("-S", "--strategy"):
STRATEGY = True
if o in ("-w", "--wavelength"):
WAVELENGTH = float(a)
if o in ("-r", "--high-resolution"):
RES_HIGH = float(a)
if o in ("-R", "--low-resolution"):
RES_LOW = float(a)
if o in ("-x", "--beam_x"):
if "mm" in a:
_beam_in_mm = True
a = a.replace("mm","")
BEAM_X = float(a)
if o in ("-y", "--beam_y"):
if "mm" in a:
_beam_in_mm = True
a = a.replace("mm","")
BEAM_Y = float(a)
if o in ("-b", "--beam-center-optimize-i"):
_beam_center_optimize = True
_beam_center_ranking = "ISCORE"
if o in ("-B", "--beam-center-optimize-z"):
_beam_center_optimize = True
_beam_center_ranking = "ZSCORE"
if o in ("-W", "--beam-center-swap"):
_beam_center_swap = True
if o in ("--slow"):
SLOW = True
if o in ("--brute"):
BRUTE = True
if o in ("--weak"):
WEAK = True
if o in ("-h", "--help"):
print USAGE
sys.exit()
if not inputf:
print "\nFATAL ERROR. No image file specified.\n"
sys.exit(2)
elif not os.path.isfile(inputf[0]):
print "\nFATAL ERROR. Image file %s not found.\n" % inputf[0]
sys.exit(2)
else:
# TODO cycle over input_file with try/except to avoid XIOError
_coll = XIO.Collect(inputf[0])
if not PROJECT:
newDir = "xds_process_" + _coll.prefix
else:
newDir = "xds_process_" + PROJECT
#
_linkimages = False
if not _coll.isContinuous(inputf):
print "Discontinous naming scheme, creating ling."
_linkimages = True
link_dir_name = "img_links"
inputf = make_xds_image_links(inputf,
os.path.join(newDir,link_dir_name),
"image")
#collect.setDirectory(link_dir_name)
#collect.prefix = prefix
try:
collect = XIO.Collect(inputf)
collect.interpretImage()
collect.image.info()
collect.lookup_imageRanges(forceCheck=False)
except XIO.XIOError, _mess:
print _mess
print "\nError: Can't access to file(s) %s.\nStop." % inputf
sys.exit(2)
imgDir = collect.directory
newPar = collect.export("xds")
#import pprint
#pprint.pprint(newPar)
# Update some default values defined by XIO.export_xds:
# In case no beam origin is defined, take the detector center.
if newPar["ORGX"] == 0:
newPar["ORGX"] = newPar["NX"]/2.
if newPar["ORGY"] == 0:
newPar["ORGY"] = newPar["NY"]/2.
# This is to correct the starting angle in case first image is not 1.
newPar["STARTING_ANGLE"] = newPar["STARTING_ANGLE"] - \
newPar["OSCILLATION_RANGE"]*(newPar["DATA_RANGE"][0] - 1)
newPar["STRONG_PIXEL"] = 6
newPar["RESOLUTION_SHELLS"] = 15.0, 7.0, newPar["_HIGH_RESOL_LIMIT"]
newPar["TEST_RESOLUTION_RANGE"] = 20, newPar["_HIGH_RESOL_LIMIT"]+1.5
newrun = XDS()
if _beam_in_mm:
BEAM_X = BEAM_X / newPar["QX"]
BEAM_Y = BEAM_Y / newPar["QY"]
if ANOMAL:
newPar["FRIEDEL'S_LAW"] = "FALSE"
else:
newPar["FRIEDEL'S_LAW"] = "TRUE"
if STRICT_CORR:
newPar["STRICT_ABSORPTION_CORRECTION"] = "TRUE"
if BEAM_X:
newPar["ORGX"] = BEAM_X
if BEAM_Y:
newPar["ORGY"] = BEAM_Y
if FIRST_FRAME:
newPar["DATA_RANGE"][0] = FIRST_FRAME
if LAST_FRAME:
newPar["DATA_RANGE"][1] = LAST_FRAME
if ICE:
newPar.update(EXCLUDE_ICE_RING)
if SPG and CELL:
newPar["SPACE_GROUP_NUMBER"] = SPG
newPar["UNIT_CELL_CONSTANTS"] = CELL
elif SPG and not CELL:
WARNING = " WARNING: Spacegroup is defined but not cell."
WARNING += " Waiting for indexation for setting cell."
elif CELL and not SPG:
WARNING = " WARNING: Cell is defined but not spacegroup,"
WARNING += " setting spacegroup to P1."
newPar["SPACE_GROUP_NUMBER"] = 1
newPar["UNIT_CELL_CONSTANTS"] = CELL
if DISTANCE:
newPar["DETECTOR_DISTANCE"] = DISTANCE
if REFERENCE:
if REFERENCE[0] == "/" or REFERENCE[0] == "~":
newPar["REFERENCE_DATA_SET"] = REFERENCE
else:
newPar["REFERENCE_DATA_SET"] = "../"+REFERENCE
if OSCILLATION:
newPar["OSCILLATION_RANGE"] = OSCILLATION
if ORIENTATION_MATRIX:
try:
_spg, cell, omat = _get_omatrix(ORIENTATION_MATRIX)
SPG, _spg_info, _spg_str = parse_spacegroup(_spg)
newPar["SPACE_GROUP_NUMBER"] = SPG
newPar["UNIT_CELL_CONSTANTS"] = cell
newPar["UNIT_CELL_A_AXIS"] = omat[0]
newPar["UNIT_CELL_B_AXIS"] = omat[1]
newPar["UNIT_CELL_C_AXIS"] = omat[2]
except:
print "\nERROR Can't import orientation matrix from: %s" % \
ORIENTATION_MATRIX
sys.exit()
if WAVELENGTH:
newPar["X_RAY_WAVELENGTH"] = WAVELENGTH
#if XDS_INPUT:
# newPar.update(xdsInp2Param(inp_str=XDS_INPUT))
if "_HIGH_RESOL_LIMIT" in newPar:
newPar["INCLUDE_RESOLUTION_RANGE"] = RES_LOW, \
newPar["_HIGH_RESOL_LIMIT"]
if RES_HIGH:
newPar["INCLUDE_RESOLUTION_RANGE"] = RES_LOW, RES_HIGH
if _linkimages:
collect.setDirectory(link_dir_name)
else:
collect.setDirectory(newrun.link_name_to_image)
newPar["NAME_TEMPLATE_OF_DATA_FRAMES"] = collect.xdsTemplate
if "SPECIFIC_KEYWORDS" in newPar.keys():
specific_keys = newPar["SPECIFIC_KEYWORDS"]
del newPar["SPECIFIC_KEYWORDS"]
else:
specific_keys = ""
newrun.inpParam.mix(xdsInp2Param(inp_str=xdsinp_base+specific_keys))
newrun.inpParam.mix(newPar)
newrun.set_collect_dir(os.path.abspath(imgDir))
newrun.run_dir = newDir
#print newPar
# Setting DELPHI as a fct of OSCILLATION_RANGE, MODE and NPROC
_MIN_DELPHI = 5. # in degree
_DELPHI = NUMBER_OF_PROCESSORS * newrun.inpParam["OSCILLATION_RANGE"]
while _DELPHI < _MIN_DELPHI:
_DELPHI *= 2
newrun.inpParam["DELPHI"] = _DELPHI
if SLOW:
newrun.inpParam["DELPHI"] *= 2
newrun.mode.append("slow")
if WEAK:
newrun.mode.append("weak")
#print "XDS env Variable= %s" % XDS_HOME
print "\n Simplified XDS Processing"
print "\n xds version: %18s" % XDSLogParser().get_xds_version()
print " xdsme version: %18s" % __version__
print FMT_HELLO % vars(newrun.inpParam)
print " Selected resolution range: %.2f - %.2f A" % \
newPar["INCLUDE_RESOLUTION_RANGE"]
print " Number of processors available: %3d\n" % NUMBER_OF_PROCESSORS
if WARNING:
print WARNING
if SPG:
print _spg_str
#newrun.run()
R1 = R2 = R3 = R4 = R5 = None
if STEP > 1:
print "\n Starting at step: %d (%s)\n" % (STEP, JOB_STEPS[STEP-1])
if STEP <= 1:
R1 = newrun.run_init()
if STEP <= 2:
R2 = newrun.run_colspot()
if STEP <= 3:
if RES_HIGH:
print " Applying a SPOT RESOLUTION CUTOFF: %.2f A" % RES_HIGH
# July 2013: spot resolution cutoff is now included in xds
#newrun.spots_resolution_cutoff(RES_HIGH, verbose=True)
R3 = newrun.run_idxref(_beam_center_optimize,
_beam_center_ranking,
_beam_center_swap)
if R3:
i = 0
_selected_cell = []
print " TABLE OF POSSIBLE LATTICES:\n"
print " num Symm quality mult a b c",
print " alpha beta gamma"
print " "+"-"*67
fmt_lat = "%3d) %5s %7.2f %4d %s"
for LAT in R3["lattices_table"]:
if LAT.fit <= LATTICE_GEOMETRIC_FIT_CUTOFF:
i += 1
print fmt_lat % (i, LAT.symmetry_str1,
LAT.fit, LAT.multiplicity, LAT)
# If not multiple possible solutions (like P2, or P1...)try to define
# unitcell from spacegroup.
#if _spg and not _cell:
if (len(collect.imageRanges) > 1) or STRATEGY:
newrun.run_xplan(ridx=R3)
if STEP <= 4:
R4 = newrun.run_integrate(collect.imageRanges)
if STEP <= 5:
(h, l), spgn = newrun.run_pre_correct()
newrun.run_correct((h, l), spgn)
|
jsburg/xdsme
|
XDS/XDS.py
|
Python
|
bsd-3-clause
| 73,694
|
[
"CRYSTAL"
] |
eb60c2274781c28ac3acf2c1f53099231f672e4da498ce1086bf4288d07d0aaf
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
urljoin,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_stdout(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
audio_in_video_stream = {}
last_info = {}
last_media = {}
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
media = parse_m3u8_attributes(line)
media_type = media.get('TYPE')
if media_type in ('VIDEO', 'AUDIO'):
group_id = media.get('GROUP-ID')
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, media.get('NAME')):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
if group_id and not audio_in_video_stream.get(group_id):
audio_in_video_stream[group_id] = False
formats.append(f)
else:
# When there is no URI in EXT-X-MEDIA let this tag's
# data be used by regular URI lines below
last_media = media
if media_type == 'AUDIO' and group_id:
audio_in_video_stream[group_id] = True
elif line.startswith('#') or not line.strip():
continue
else:
tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media.get('NAME')
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': manifest_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
if audio_in_video_stream.get(last_info.get('AUDIO')) is False and f['vcodec'] != 'none':
# TODO: update acodec for audio only formats with the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_info = {}
last_media = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
'url': media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
'url': segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
fragments.append({
'url': representation_ms_info['segment_urls'][segment_index],
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({'url': initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
for fragment in f['fragments']:
fragment['url'] = urljoin(base_url, fragment['url'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
width = int_or_none(track.get('MaxWidth'))
height = int_or_none(track.get('MaxHeight'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
ext = determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>video|audio)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/+])/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
http_base_url = 'http' + url_base
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': protocol + url_base,
'format_id': protocol,
'protocol': protocol,
})
return formats
@staticmethod
def _find_jwplayer_data(webpage):
mobj = re.search(
r'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
return mobj.group('options')
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._parse_json(
self._find_jwplayer_data(webpage), video_id,
transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = []
for source in video_data['sources']:
source_url = self._proto_relative_url(source['file'])
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, this_video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, this_video_id, mpd_id=mpd_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like 1080p.
height = int_or_none(self._search_regex(
r'^(\d{3,})[pP]$', source.get('label') or '',
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
Dunkas12/BeepBoopBot
|
lib/youtube_dl/extractor/common.py
|
Python
|
gpl-3.0
| 114,911
|
[
"VisIt"
] |
23457e58782876f3a1926c136abd432a9ef256eee2b45fa35914972b0c065235
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
import unittest
import bs4
import MooseDocs
from MooseDocs.common import moose_docs_file_tree
from MooseDocs.testing import MarkdownTestCase
class TestTemplate(MarkdownTestCase):
EXTENSIONS = ['MooseDocs.extensions.template', 'MooseDocs.extensions.app_syntax', 'meta']
@classmethod
def updateExtensions(cls, configs):
"""
Method to change the arguments that come from the configuration file for
specific tests. This way one can test optional arguments without permanently
changing the configuration file.
"""
configs['MooseDocs.extensions.template']['template'] = 'testing.html'
configs['MooseDocs.extensions.app_syntax']['hide']['framework'].append('/Functions')
configs['MooseDocs.extensions.app_syntax']['hide']['phase_field'].append('/ICs')
@classmethod
def setUpClass(cls):
super(TestTemplate, cls).setUpClass()
# Use BoxMarker.md to test Doxygen and Code lookups
config = dict(base='docs/content',
include=['docs/content/documentation/systems/Adaptivity/Markers/*'])
root = moose_docs_file_tree({'framework': config})
node = root.findall('/BoxMarker')[0]
cls.html = cls.parser.convert(node)
#with open(node.markdown(), 'r') as fid:
# cls.html = fid.read()
cls.soup = bs4.BeautifulSoup(cls.html, "html.parser")
def testContent(self):
self.assertIsNotNone(self.soup.find('h1'))
self.assertIn('BoxMarker', self.html)
def testDoxygen(self):
a = str(self.soup)
self.assertIsNotNone(a)
self.assertIn('classBoxMarker.html', a)
self.assertIn('Doxygen', a)
def testCode(self):
html = str(self.soup)
self.assertIn('href="https://github.com/idaholab/moose/blob/master/framework/include/'\
'markers/BoxMarker.h"', html)
self.assertIn('href="https://github.com/idaholab/moose/blob/master/framework/src/'\
'markers/BoxMarker.C"', html)
def testHidden(self):
md = '!syntax objects /Functions'
html = self.convert(md)
gold = '<a class="moose-bad-link" data-moose-disable-link-error="1" ' \
'href="/Functions/framework/ParsedVectorFunction.md">ParsedVectorFunction</a>'
self.assertIn(gold.format(MooseDocs.MOOSE_DIR.rstrip('/')), html)
def testPolycrystalICs(self):
md = '[Foo](/ICs/PolycrystalICs/index.md)'
html = self.convert(md)
gold = '<a class="moose-bad-link" href="/ICs/PolycrystalICs/index.md">'
self.assertIn(gold, html)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
liuwenf/moose
|
python/MooseDocs/tests/template/test_template.py
|
Python
|
lgpl-2.1
| 4,058
|
[
"MOOSE"
] |
b7d64fdf3589dc7eb708f8202a4ae39a4aec31e33e8c096aeedc401f516b3150
|
##Built-In Libraries##
import time
import csv
import string
##Third-Party Libraries##
import numpy as np
from PIL import Image,ImageTk
import wolframalpha
##Other TP Files##
import mnist_training as mnist #See File for Citations
import unpackData as unpack #See File for Citations
class Network(object):
def __init__(self,other): #other is a list containing the number of
#neurons per layer. A net with three inputs, a two-node hidden layer,
#and one output would be represented as [3,2,1]
self.numLayers=len(other)
self.netSize=other
self.learningRate=0.3 #learning rate is the step size for gradient
#descent. The ideal learning rate varies by net.
self.count = 0 #count interations for auto_stop
self.biases=[np.random.randn(i,1)/32 \
for i in self.netSize[1:]]
#this initializes a 2D list of random, normally distributed values
#which represent the initial bias values for each node
self.weights=[np.random.randn(b,a)/32 \
for (a,b) in zip(self.netSize[:-1],self.netSize[1:])]
#initializes a 3D list of weights associated with each neuron.
def save(self,title):
f = open("WeightsMNIST"+str(title)+".txt","wb+")
np.save(f,self.weights)
g = open("BiasesMNIST"+str(title)+".txt","wb+")
np.save(g,self.biases)
print ("Saved!")
def feedforward(self, a):
#a is the input matrix of size (n,1) where n is the number of neurons
#in the first row
for index in range(len(self.netSize)-1):
#np.dot performs matrix multiplication in 2D and regular dot
#product in 1D assuming the dimensions are correct
nextLayer=np.dot(self.weights[index],a)
#nextLayer is the weighted sum of all the previous inputs
#arranged as a vector based on how many neurons are in the
#next row
a=sigmoid(nextLayer+self.biases[index])
#the sigmoid takes in the weighted array and adds the bias and
#returns an array with the same dimensions just with modified
#values
return a
def MBGD(self,trainX,trainY,batchsize=1,test_x=None,test_y=None):
#print ("learning rate:",self.learningRate)
if not isinstance(test_x,type(None)): #check if test data is provided
before = testAccuracy(self,test_x,test_y)
combinedData = list(zip(trainX,trainY)) #combine x,y for shuffling
np.random.shuffle(combinedData) #randomly shuffle it
shuffled = list(zip(*combinedData)) #unzip/separate it
for batch in range(len(trainX)//batchsize): #goes through batches
start = batch*batchsize #start of batch interval
end = start+batchsize #end of batch interval (start-end=batchsize)
updateW = [np.zeros(w.shape) for w in
self.weights] # zero vector with
# same shape as self.weights
updateB = [np.zeros(b.shape) for b in
self.biases] # zero vector with
# same shape as self.biases
for index in range(start,end): #loop through individual batch
x = shuffled[0][index]
y = shuffled[1][index]
#entry = np.reshape(x,(1024,1))
#exit = np.reshape(y,(94,1))
gradB,gradW=self.backprop(x,y) #graident vectors with the same
#shape as self.weights and self.biases with the gradients
#of the cost function computed by the backpropgation algorithm
updateW=[uw+gw for uw,gw in zip(updateW,gradW)]
updateB=[ub+gb for ub,gb in zip(updateB,gradB)]
for index in range(len(self.weights)): #update Weights
weights = self.weights[index]
update = (self.learningRate)*updateW[index]/batchsize
newVal=weights-update
self.weights[index]=newVal
for index in range(len(self.biases)): #update Biases
newVal=(self.biases[index]-
(self.learningRate*updateB[index]/batchsize))
self.biases[index]=newVal
if not isinstance(test_x,type(None)): #check if test data is provided
after = testAccuracy(self,test_x,test_y)
if after-before<=0: #if accuracy has gone down
self.count+=1
self.learningRate/=2 #lower the learning rate
###NOT MY CODE#### (backprop) #taken from Neural Networks and Deep Learning
#book online by Michael Nielsen
###Not my code (Backprop)###
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = costDerivative(activations[-1], y) * \
sigmoidPrime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.numLayers):
z = zs[-l]
sp = sigmoidPrime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
###^^^^^^Not My Code^^^^^^### (backprop written by Michael Nielsen)
def setNet(self):
self.weights = [np.array([[1,2],[3,2],[1,2]]),np.array([[3,2,1]])]
self.biases = [np.array([[1],[1],[1]]),np.array([[1]])]
###Math Functions###
def cost(actual,ideal): #takes in (n,1) and (n,1) arrays where actual is the
#output of the network and #ideal is the ideal result. Returns MSE
#according to the cost function
MSE=0 #MSE is Mean Squared Error
for index in range(len(actual)):
errorSq=(actual[index][0]-ideal[index][0])**2
MSE+=errorSq
MSE/=(2*len(actual)) #WLOG we can use actual, we could also use ideal
return MSE
def costDerivative(actual,ideal):
return (actual-ideal)
from scipy.special import expit #expit is a built-in sigmoid function with high
#floating point arithmetic accuracy
def sigmoid(z):
return expit(z)
def sigmoidPrime(z):
return sigmoid(z)*(1-sigmoid(z))
###TESTING FUNCTIONS###
def testAccuracy(net,trainX,trainY):
n = len(trainY)
seen = []
count = 0
for index in range(len(trainX)):
output = np.argmax(net.feedforward(np.reshape(trainX[index],(1024,1)))) #np.reshape for non-mnist
#output = np.argmax(net.feedforward(trainX[index]))
expected = np.argmax(trainY[index])
#print ("Output:",output,"expected:",expected)
count += (output==expected)
if output==expected:
if expected not in seen:
seen.append(expected)
return count/n
def trainNet(netSize,trainX,trainY,epochs,testx=1,testy=1):
np.seterr(all="raise")
net=Network(netSize)
for i in range(epochs):
if net.count==10:
net.save(net.count-10)
print ("accuracy has gone down and up 10 times")
a=time.time()
currentAccuracy = testAccuracy(net,trainX,trainY)
if not isinstance(testx,int):
otherAccuracy = testAccuracy(net,testx,testy)
print("Accuracy before run", i, ":", currentAccuracy)
if not isinstance(testx,int):
print("Accuracy of Test Data:",otherAccuracy)
net.MBGD(trainX,trainY,100,trainX,trainY)
print ("Time after run",i,":",(time.time()-a))
return net
def testSmallDataset(): #tests on small batch (1500) saved to txt file
netsize = [1024,500,369]
trainx = 1-(loadTestingData()[0]/255)
trainy = loadTestingData()[1]
net = trainNet(netsize,trainx,trainy,100)
#print (net.feedforward(np.reshape(trainx[5],(1024,1))))
print ("Done!")
def testReal():
trnx,trny,tstx,tsty = TRAIN_X(),TRAIN_Y(),TEST_X(),TEST_Y()
netsize = [1024,200,94]
net = trainNet(netsize,trnx,trny,10,tstx,tsty)
accuracy = testAccuracy(net,tstx,tsty)
net.save("hasy")
return ("Accuracy With Test Data: "+str(accuracy))
def testExistingHasy():
trnx, trny, tstx, tsty = TRAIN_X(), TRAIN_Y(), TEST_X(), TEST_Y()
net = createNet()
for i in range(5):
print ("accuracy before run",str(i),str(testAccuracy(net,tstx,tsty)))
net.MBGD(trnx,trny,batchsize=100,test_x=tstx,test_y=tsty)
accuracy = testAccuracy(net,tstx,tsty)
net.save("hasy")
print ("Accuracy with Test Data: "+str(accuracy))
def trainExistingMnist():
net = createMNIST()
train_x = scaletestData(mnist.load_data_wrapper()[0])
train_y = mnist.load_data_wrapper()[1]
test_x = scaletestData(mnist.load_data_wrapper()[2])
test_y = mnist.load_data_wrapper()[3]
for i in range(5):
net.MBGD(train_x,train_y,batchsize=100,test_x=test_x,test_y=test_y)
print ("accuracy after run",i,testAccuracy(net,test_x,test_y))
net.save('JUSTDIDTHISTONIGHT')
print ("saved and done")
#for HASY
def loadWeightsAndBiases():
weights = np.load("WeightsMNISThasy.txt")
biases = np.load("BiasesMNISThasy.txt")
return list(weights),list(biases)
#for HASY
def createNet():
net = Network([1024,200,94])
net.weights=loadWeightsAndBiases()[0]
net.biases=loadWeightsAndBiases()[1]
return net
#for MNIST
def loadMnist():
weights = np.load("WeightsMNISTJUSTDIDTHISTONIGHT.txt")
biases = np.load("BiasesMNISTJUSTDIDTHISTONIGHT.txt")
return list(weights),list(biases)
#for MNIST
def createMNIST():
net = Network([784,100,10])
net.weights = loadMnist()[0]
net.biases = loadMnist()[1]
return net
def savePic(datax,datay,index):
arrayyy = datax[index]
ind = np.argmax(datay[index])
print ("index:",ind)
print ("done!")
def mnistTest(epochs):
netsize = [784, 100, 10]
train_x = scaletestData(mnist.load_data_wrapper()[0])
train_y = mnist.load_data_wrapper()[1]
test_x = scaletestData(mnist.load_data_wrapper()[2])
test_y = mnist.load_data_wrapper()[3]
net = trainNet(netsize,train_x,train_y,epochs,test_x,test_y)
net.save('JUSTDIDTHISTONIGHT')
print ("saved and done!")
###CREATE AND LOAD TESTING DATA###
def TRAIN_X():
x = scaletestData(unpack.loadData()[0][0])
print (x[0])
return x
def TRAIN_Y():
return unpack.loadData()[0][1]
def TEST_X():
return scaletestData(unpack.loadData()[1][0])
def TEST_Y():
return unpack.loadData()[1][1]
def TRN_X():
return setUpTestData()[0]
def TRN_Y():
return setUpTestData()[1]
def TST_X():
return setUpTestData()[2]
def TST_Y():
return setUpTestData()[3]
def saveData():
np.savetxt("debuggingX",DEBUG_X)
np.savetxt("debuggingY",DEBUG_Y)
print ("saved files")
def setUpTestData():
testx = TEST_X()
testy = TEST_Y()
trn_x,trn_y,tst_x,tst_y=[],[],[],[]
for i in range(len(testx)):
if i%10==0:
tst_x.append(testx[i])
tst_y.append(testy[i])
else:
trn_x.append(testx[i])
trn_y.append(testy[i])
return trn_x,trn_y,tst_x,tst_y
def loadTestingData():
debug_x = np.loadtxt("debuggingX")
debug_y = np.loadtxt("debuggingY")
return debug_x,debug_y
def scaletestData(x):
zeros,ones = (0,0)
for i in range(len(x)):
for value in range(len(x[i])):
if x[i][value]!=0:
x[i][value]=0
zeros+=1
else:
x[i][value]=1
ones+=1
if ones > zeros:
raise Exception("Check your scaling, most of the image is 1's")
return x
###IMPORT AND IDENTIFY IMAGES###
def latexCommand(index):
line=index+1
file = open("C:/Users/Joe/Documents/S17/15-112/Term Project/HASYv2/symbols3.csv",'r')
reader = csv.reader(file)
for i,row in enumerate(reader):
if i==line:
return row[1]
print ("you fucked up")
def formatURL(input,data):
inp = str(input)
inp = inp.strip()
inp = inp.replace("+","%2B")
inp = inp.replace(" ","+")
inp+="%3F"
inp+="&width="
inp+=str(data.width//2-data.margin)
return inp
#Wolfrom Alpha API Key obtained from wolframalpha.com
#Used the Wolfram Alpha module which can be pip installed via 'wolframalpha'
import urllib
def wolframAlpha(input,data):
appID = "4YUQ4H-EUKJ63VXG2"
url = 'http://api.wolframalpha.com/v1/simple?appid=4YUQ4H-EUKJ63VXG2&i='
query = formatURL(input,data)
url+=query
#Following syntax loosely taken from
# "http://stackoverflow.com/questions/40911170/ \n
# python-how-to-read-an-image-from-a-url
try:
image = Image.open(urllib.request.urlopen(url))
image.save("wolframTemp.gif","gif")
return 1 #to differentiate between returning None
except:
print ("Connect to the internet or enter a valid query")
return None
###GRAPHICS AND GUI###
# mouseEventsDemo.py
# TAKEN AND MODIFIED FROM 15-112 WEBSITE #
from tkinter import (Tk,ALL,PhotoImage,Canvas,simpledialog,messagebox,
Frame,Label,Entry,NW,CENTER)
###BUTTONS AND WINDOWS###
class Window1(simpledialog.Dialog): #taken from the 15-112 website
def body(self, master):
self.modalResult = None
Label(master, text="Correct Symbol \n \
(press OK without entering anything \n \
if the digit is already correct):").grid(row=0)
self.e1 = Entry(master)
self.e1.grid(row=0, column=1)
return self.e1 # initial focus
def apply(self):
first = self.e1.get()
self.modalResult = (first)
def showDialog(data): #taken from the 15-112 website
dialog = Window1(data.root)
return dialog.modalResult
class Button(object):
def __init__(self,x0,y0,x,y,fgcolor,bgcolor,data,text,textcolor="black"):
self.x = x0
self.y = y0
self.dims=(x0-x//2,y0-y//2,x0+x//2,y0+y//2)
self.normalColor=fgcolor
self.clickedColor=bgcolor
self.textColor=textcolor
self.text=text
self.data = data
def drawNormal(self,canvas):
canvas.create_rectangle(self.dims,fill=self.normalColor)
canvas.create_text(self.x,self.y,text = self.text,fill=self.textColor,
font=("Bradley Hand ITC",14,"bold"),justify=CENTER)
def drawClicked(self,canvas):
canvas.create_rectangle(self.dims,fill=self.clickedColor)
canvas.create_text(self.x, self.y, text=self.text, fill=self.textColor)
def inBoundaries(self,x,y):
x0,y0,x1,y1=self.dims
if x<=x1 and x>=x0 and y<=y1 and y>=y0:
return True
return False
def drawImage(self,canvas):
image = self.data.eraserImage
canvas.create_image(self.x,self.y,image=image)
###MODEL###
def startButton(data):
data.splash = False
data.draw = True
data.erase = False
def classifyButton(data):
classifySymbols(data)
convertClassification(data)
def clearButton(data):
data.symbol = set()
data.classification = []
data.characters = []
def correctButton(data):
train_x,train_y=[],[]
findBoundaries(data)
for (symbol,i) in zip(data.characters,list(range(len(data.characters)))):
(left, top, right, bottom) = symbol
mnist = resizeImagetoSquare(left, top, right, bottom, data)[0]
hasy = resizeImagetoSquare(left, top, right, bottom, data)[1]
classification = data.classification[i]
data.highlight = classification
try:
correct = str(showDialog(data))
data.highlight = ""
if correct == "" or None:
continue
data.classification[i] = correct
onlineLearning(data,hasy,mnist,correct)
except:
continue
convertClassification(data)
def solveButton(data):
if wolframAlpha(data.printable,data) == None:
return None
wolframAlpha(data.printable,data)
showWolframAlpha(data)
data.solved = True
data.draw = False
def backButton(data):
data.splash=True
data.about=False
data.draw = False
data.symbol = set()
data.classification = []
data.characters = []
data.erase = False
def aboutButton(data):
data.about=True
data.splash=False
def solveBackButton(data):
clearButton(data)
data.solved = False
data.draw = True
def eraseButton(data):
data.erase = not(data.erase)
if data.erase == False:
data.eraserImage = PhotoImage(file="eraser.gif")
else:
data.eraserImage = PhotoImage(file="chalk.gif")
def make2dList(rows,cols): #makes 2d list- similar to 15-112 website
return [[0 for i in range(cols)] for i in range(rows)]
def findBoundaries(data):
#positions of every symbol drawn
rows = data.height//data.squaresize
cols = data.width//data.squaresize
data.image = make2dList(rows,cols)
imageList = data.image
#create 2D list of 1's and 0's modeling the entire image
for position in data.symbol:
row,col = position[1],position[0]
imageList[row][col] = 1
left,right,top,bottom = -1,-1,-1,-1
intermediate = -2 #helps determine where the left edge is in the middle
#of the page
# find the top and bottom-most shits
for row in range(rows):
for col in range(cols):
if imageList[row][col]==1:
if top==-1:
top = row #saves highest row
bottom = row #saves lowest row
# find left-most boundary (lowest x-val):
for col in range(len(imageList[0])):
count = 0
for row in range(rows):
if imageList[row][col]==1:
count+=1
#saves the right-most column in which there is a black pixel
right = col
if left==-1 or left==intermediate:
#saves the left-most column in which there is a black pixel
left = col
if count==0 and right>left:
boundaries = (left, top, right + 1, bottom + 1)
if boundaries not in data.characters:
data.characters.append(boundaries)
intermediate = left
return data.characters
def resizeImagetoSquare(left,top,right,bottom,data):
image = make2dList((bottom-top),(right-left))
for i in range(left,right):
for j in range(top,bottom):
image[j-top][i-left] = data.image[j][i]
#Remove white space at top and bottom
for row in image:
if 1 not in row:
image.remove(row)
#convert to image
image = Image.fromarray(np.array(image))
hasyimage = image
##For MNIST##
imageCenter = image.resize((20,20))
imarr = np.array(imageCenter)
mnistImage = np.zeros((28,28))
mnistImage[4:24,4:24] = imarr
##For HASY##
hasyimage = hasyimage.resize((32,32))
hasyimage = np.array(hasyimage)
return np.reshape(mnistImage,(784,1)),np.reshape(hasyimage,(1024,1))
def findSquare(x,y,data):
squaresize = data.squaresize
i = x//squaresize
j = y//squaresize
if (i,j) not in data.symbol:
data.symbol.add((i,j))
if (i+1,j) not in data.symbol:
data.symbol.add((i+1,j))
if (i,j+1) not in data.symbol:
data.symbol.add((i,j+1))
if (i+1,j+1) not in data.symbol:
data.symbol.add((i+1,j+1))
def findErase(x,y,data):
squaresize = data.squaresize
i = x // squaresize
j = y // squaresize
if (i, j) in data.symbol:
data.symbol.remove((i, j))
if (i + 1, j) in data.symbol:
data.symbol.remove((i + 1, j))
if (i, j + 1) in data.symbol:
data.symbol.remove((i, j + 1))
if (i + 1, j + 1) in data.symbol:
data.symbol.remove((i + 1, j + 1))
def init(data):
#Misc.
data.margin = 20
data.printable=""
data.buttonColor = "grey"
data.splash = True
data.about = False
data.solved = False
data.draw = False
data.erase = False
data.symbol = set()
data.size = 110
data.squaresize = data.height // data.size
data.image = []
data.classification = []
data.highlight = ""
data.net = createNet()
data.net2 = createMNIST()
data.characters = [] # list containing tuples of the (left,top,right,bottom)
#Images
data.pic = PhotoImage(file="background.gif") #taken from www.123rf.com
data.drawpic= PhotoImage(file="blackboard.gif") #taken from www.123RF.com
data.eraserImage = PhotoImage(file="eraser.gif") #taken from www.123rf.com
data.aboutpic = PhotoImage(file="about_screen.gif") #created on my own
#Buttons
data.startButton = Button(data.width//3,3*data.height//4,90,50,
data.buttonColor,"white",data,"Start")
data.classifyButton = Button(data.width//3-5,5*data.height//6,90,50,
data.buttonColor,"white",data,"Classify")
data.clearButton = Button(5+2*data.width//3,5*data.height//6,90,50,
data.buttonColor,"white",data,"Clear")
data.correctButton = Button(data.width//3-5,5*data.height//6,90,50,
data.buttonColor,"white",data,"Correct It")
data.solveButton = Button(data.width//2,5*data.height//6,140,50,
data.buttonColor,"white",data,"Send To \n Wolfram Alpha!")
data.backButton = Button(45,20,90,40,data.buttonColor,"white",data,"Back")
data.aboutButton = Button(2*data.width//3,3*data.height//4,90,50,
data.buttonColor,"white",data,"About")
data.solveBackButton = Button(45,20,90,40,data.buttonColor,'white',
data,"Back")
data.eraseButton = Button(data.width-45,20,90,40,data.buttonColor,"white",
data,"shouln't ever be displayed")
def classifySymbols(data):
findBoundaries(data)
for symbol in data.characters:
(left, top, right, bottom) = symbol
mnist = resizeImagetoSquare(left, top, right, bottom, data)[0]
hasy = resizeImagetoSquare(left, top, right, bottom, data)[1]
hasy2 = data.net.feedforward(hasy)
mnist2 = data.net2.feedforward(mnist)
exclusions = [0,78, 79, 80,87]
for i in exclusions:
hasy2[i][0] = 0
has = np.amax(hasy2)
mnst = (np.amax(mnist2))*1.05
if has >= mnst:
line = np.argmax(hasy2)
data.classification.append(str(latexCommand(line)))
else:
index = np.argmax(mnist2)
data.classification.append(str(index))
def onlineLearning(data,hasyarray,mnistarray,inp):
inputy = str(inp)
if inputy in string.digits:
net = data.net2
array = mnistarray
index = int(inp)
title = 'JUSTDIDTHISTONIGHT'
else:
net = data.net
array = hasyarray
index = findIndex(inp)
title = 'hasy'
testY = make2dList(net.netSize[2],1)
testY[int(index)][0] = 1
net.MBGD([array],[testY])
net.save(title)
def findIndex(input):
file = open(
"C:/Users/Joe/Documents/S17/15-112/Term Project/HASYv2/symbols3.csv", 'r')
reader = csv.reader(file)
for i, row in enumerate(reader):
if str(input).lower()==str(row[1]).lower():
return i-1
print ("Symbol Name Not Recognized")
return None
def convertClassification(data):
classification = ""
for i in data.classification:
classification+=i+' '
exclusions = ["f",'m','s','l','+','-','*','/']
for index in range(1,len(classification)-1):
try:
if ((classification[index - 1] in string.ascii_letters) and \
(classification[index + 1] in string.digits) and \
(classification[index-1] not in exclusions) and \
classification[index] == " "):
classification=classification[:index]+"^"+classification[index+1:]
except:
continue
for index in range(1,len(classification)-1):
try:
if ((classification[index-1] in string.digits) and \
(classification[index+1] in string.digits) and \
classification[index]==" "):
classification = classification[:index]+classification[index+1:]
except:
continue
data.printable=classification
def showWolframAlpha(data):
im = Image.open("wolframtemp.gif")
(width,height) = im.size
h = data.height-data.margin
im1 = im.crop(box=(0,0,width,h))
im1.save("temp1.gif")
try:
im2 = im.crop(box=(0,h,width,height))
except:
im2 = im.crop(box=(0,h,width,2*h))
im2.save("temp2.gif")
data.im1 = PhotoImage(file="temp1.gif")
data.im2 = PhotoImage(file="temp2.gif")
###CONTROLLER###
def leftReleased(event, data):
setEventInfo(event, data, "leftReleased")
data.leftPosn = (event.x, event.y)
def setEventInfo(event, data, eventName):
ctrl = ((event.state & 0x0004) != 0)
shift = ((event.state & 0x0001) != 0)
msg = ""
if ctrl: msg += "ctrl-"
if shift: msg += "shift-"
msg += eventName
msg += " at " + str((event.x, event.y))
data.info = msg
def mouseMotion(event,data):
setEventInfo(event, data, "mouseMotion")
data.motionPosn = (event.x, event.y)
def leftPressed(event, data):
setEventInfo(event, data, "leftPressed")
data.leftPosn = (event.x, event.y)
if data.splash==True:
if data.startButton.inBoundaries(event.x,event.y):
startButton(data)
if data.aboutButton.inBoundaries(event.x,event.y):
aboutButton(data)
elif data.about==True:
if data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.draw==True:
if data.classification != []: #Already Classified
if data.correctButton.inBoundaries(event.x, event.y):
correctButton(data)
elif data.solveButton.inBoundaries(event.x,event.y):
solveButton(data)
elif data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.clearButton.inBoundaries(event.x, event.y):
clearButton(data)
else: #still drawing
if data.classifyButton.inBoundaries(event.x,event.y):
classifyButton(data)
elif data.backButton.inBoundaries(event.x, event.y):
backButton(data)
elif data.clearButton.inBoundaries(event.x, event.y):
clearButton(data)
elif data.eraseButton.inBoundaries(event.x,event.y):
eraseButton(data)
elif data.solved==True:
if data.solveBackButton.inBoundaries(event.x,event.y):
solveBackButton(data)
def leftMoved(event, data):
setEventInfo(event, data, "leftMoved")
data.leftPosn = (event.x, event.y)
if data.splash==False and data.erase==False:
findSquare(event.x, event.y, data)
elif data.erase==True:
findErase(event.x,event.y,data)
def timerFired(data): pass
def keyPressed(event, data): pass
###VIEW###
def createGrid(canvas,data):
width = data.width
height = data.height
squaresize = data.squaresize
for square in data.symbol:
i,j = square[0],square[1]
x0,y0,x1,y1=i*squaresize,j*squaresize,(i+1)*squaresize,(j+1)*squaresize
canvas.create_rectangle(x0,y0,x1,y1,fill="white",outline = "white")
def drawSplashScreen(canvas,data):
if data.splash == True:
#background image
canvas.create_image(data.width//2,data.height//2, image=data.pic)
#above code (background image) taken from 15-112 website
data.startButton.drawNormal(canvas)
data.aboutButton.drawNormal(canvas)
elif data.about==True:
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
im = data.aboutpic
canvas.create_image(data.width//2,data.height//2+15,image=im)
def drawClassifyScreen(canvas,data):
if data.draw==True: #Draw Screen
if data.classification != []: #already classified
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
canvas.create_text(data.width//2,data.height//2,text=str(data.printable),
font=("Bradley Hand ITC", 20,"bold"),fill="white")
data.correctButton.drawNormal(canvas)
data.solveButton.drawNormal(canvas)
if data.highlight != "":
text = "Classified Symbol: "+str(data.highlight)
canvas.create_text(data.width//2,2*data.height//3,
text=text,font=("Bradley Hand ITC", 20,"bold"),fill="white")
else:
canvas.create_image(data.width//2,data.height//2,image=data.drawpic)
createGrid(canvas,data)
drawSquareBoundaries(canvas,data)
canvas.create_text(data.width//2,data.height//10,text="Write Math",
font=("Bradley Hand ITC",20,"bold"),fill="white")
data.classifyButton.drawNormal(canvas)
data.eraseButton.drawImage(canvas)
data.clearButton.drawNormal(canvas)
data.backButton.drawNormal(canvas)
elif data.splash==False: #About Screen
data.backButton.drawNormal(canvas)
def drawSolvedScreen(canvas,data):
if data.solved == True:
im1 = data.im1
im2 = data.im2
canvas.create_rectangle(0,0,data.width,data.height,fill="white")
canvas.create_image(20,20,anchor=NW,image=im1)
canvas.create_image(data.width//2,20,anchor=NW,image=im2)
data.solveBackButton.drawNormal(canvas)
def drawSquareBoundaries(canvas,data):
for positions in data.characters:
(x0,y0,x1,y1) = positions
s = data.squaresize
canvas.create_rectangle(x0*s,y0*s,x1*s,y1*s,fill="",outline="black")
def redrawAll(canvas, data):
drawSplashScreen(canvas,data)
drawClassifyScreen(canvas,data)
drawSolvedScreen(canvas,data)
####################################
# use the run function as-is
####################################
def run(width=800, height=400):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
# Note changes #1:
def mouseWrapper(mouseFn, event, canvas, data):
mouseFn(event, data)
#redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 20 # milliseconds
root = Tk()
data.root = root
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.grid()
# set up events
# Note changes #2:
root.bind("<Button-1>", lambda event:
mouseWrapper(leftPressed, event, canvas, data))
#root.bind("<Button-3>", lambda event:
#mouseWrapper(rightPressed, event, canvas, data))
canvas.bind("<Motion>", lambda event:
mouseWrapper(mouseMotion, event, canvas, data))
canvas.bind("<B1-Motion>", lambda event:
mouseWrapper(leftMoved, event, canvas, data))
#canvas.bind("<B3-Motion>", lambda event:
#mouseWrapper(rightMoved, event, canvas, data))
root.bind("<B1-ButtonRelease>", lambda event:
mouseWrapper(leftReleased, event, canvas, data))
#root.bind("<B3-ButtonRelease>", lambda event:
#mouseWrapper(rightReleased, event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
#Overview of Citations#
#Backpropogation Algorithm-- within my Network Class there is a method called
#backprop(x,y) which was taken entirely and without modification from Michael
#Neilsen's book Neural Networks and Deep Learning. This can be found online at
#http://neuralnetworksanddeeplearning.com/chap1.html
#Several of my functions for loading data call external python files called
#'mnist_training' and 'unpackData'. These files are a mixture of my own code
#and code written by others. See the files for more detailed citations
#I use the wolframAlpha module which is a nice way of accessing the Wolfram API
#more information about the module and API can be found at www.wolframalpha.com
#My pop-up dialog class was taken with light modifications from the 15-112 website
#under miscelaneous tkinter demos
#The entirety of my graphics is built of events_example0.py from the 15-112 website
#the run function is modified from mouse-pressed examples posted on the 15-112 website
#any media (pictures) used in this project were are clearly cited in a comment
#to the right of where they are first called. (generally in init)
#In a couple locations I load or save files using code taken and modified from
#stackoverflow.com. The exact URL's of these can be found next to the usage.
#All above citations can be found next to their location in my code#
|
josephwkim/mathnotes
|
neuralNet.py
|
Python
|
mit
| 35,008
|
[
"NEURON"
] |
5725dcbfaca7eefd6f93ffb8a6697ed533964b0aea255ee0c4a2d94a484ec9c8
|
# -*- coding: utf-8 -*-
from pyaxiom.netcdf import CFDataset
class IncompleteMultidimensionalTimeseriesProfile(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
assert dsg.featureType.lower() == 'timeseriesprofile'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
zvar = dsg.z_axes()[0]
assert len(zvar.dimensions) > 1
# Not ragged
o_index_vars = dsg.get_variables_by_attributes(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 0
r_index_vars = dsg.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
assert len(r_index_vars) == 0
except AssertionError:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
|
axiom-data-science/pyaxiom
|
pyaxiom/netcdf/sensors/dsg/timeseriesProfile/im.py
|
Python
|
mit
| 1,478
|
[
"NetCDF"
] |
533af11ac9ebe039de044d51fc4d8186c96cd15280dc3fa73a2da192680a8100
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# functional - functionality backend helpers
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""This module contains general functions used by the modules in
the functionality dir.
"""
import os
# REJECT_UNSET is not used directly but exposed to functionality
from shared.base import requested_page
from shared.findtype import is_user
from shared.httpsclient import extract_client_cert, extract_client_openid
from shared.safeinput import validated_input, REJECT_UNSET
from shared.useradm import expire_oid_sessions
def warn_on_rejects(rejects, output_objects):
"""Helper to fill in output_objects in case of rejects"""
if rejects:
for (key, err_list) in rejects.items():
for err in err_list:
output_objects.append({'object_type': 'error_text',
'text': 'input parsing error: %s: %s: %s'
% (key, err[0], err[1])})
def merge_defaults(user_input, defaults):
"""Merge default values from defaults dict into user_input so
that any missing fields get the default value and the rest
remain untouched.
"""
for (key, val) in defaults.items():
if not user_input.has_key(key):
user_input[key] = val
def prefilter_input(user_arguments_dict, prefilter_map):
"""Apply filters from filter_map to user_arguments_dict values inline"""
for (key, prefilter) in prefilter_map.items():
if user_arguments_dict.has_key(key):
orig = user_arguments_dict[key]
if isinstance(orig, basestring):
res = prefilter(orig)
else:
res = [prefilter(i) for i in orig]
user_arguments_dict[key] = res
def validate_input(
user_arguments_dict,
defaults,
output_objects,
allow_rejects,
prefilter_map=None,
):
"""A wrapper used by most back end functionality"""
# always allow output_format and underscore cache-prevention dummy, we
# don't want to use unnecessary lines in all scripts to specify this
defaults['output_format'] = ['allow_me']
defaults['_'] = ['allow_me']
if prefilter_map:
prefilter_input(user_arguments_dict, prefilter_map)
(accepted, rejected) = validated_input(user_arguments_dict,
defaults)
warn_on_rejects(rejected, output_objects)
if rejected.keys() and not allow_rejects:
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Input arguments were rejected - not allowed for this script!'
})
return (False, output_objects)
return (True, accepted)
def validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects,
require_user=True,
filter_values=None,
environ=None,
):
"""A wrapper used by most back end functionality - redirects to sign up
if client_id is missing.
"""
logger = configuration.logger
if environ is None:
environ = os.environ
creds_error = ''
if not client_id:
creds_error = "Invalid or missing user credentials"
elif require_user and not is_user(client_id, configuration.mig_server_home):
creds_error = "No such user (%s)" % client_id
if creds_error and not requested_page().endswith('logout.py'):
output_objects.append({'object_type': 'error_text', 'text'
: creds_error
})
# Redirect to sign-up cert page trying to guess relevant choices
signup_url = os.path.join(configuration.migserver_https_sid_url,
'cgi-sid', 'signup.py')
signup_query = ''
if not client_id:
output_objects.append(
{'object_type': 'text', 'text': '''Apparently you do not
already have access to %s, but you can sign up:''' % configuration.short_title
})
output_objects.append({'object_type': 'link', 'text': signup_url,
'destination': signup_url + signup_query})
output_objects.append(
{'object_type': 'text', 'text': '''If you already signed up and
received a user certificate you probably just need to import it in your
browser.'''})
else:
output_objects.append(
{'object_type': 'text', 'text': '''Apparently you already have
suitable credentials and just need to sign up for a local %s account on:''' % \
configuration.short_title})
if extract_client_cert(configuration, environ) is None:
# Force logout/expire session cookie here to support signup
identity = extract_client_openid(configuration, environ,
lookup_dn=False)
if identity:
logger.info("expire openid user %s" % identity)
(success, _) = expire_oid_sessions(configuration, identity)
else:
logger.info("no openid user logged in")
output_objects.append({'object_type': 'link', 'text': signup_url,
'destination': signup_url + signup_query})
return (False, output_objects)
(status, retval) = validate_input(user_arguments_dict, defaults,
output_objects, allow_rejects, filter_values)
return (status, retval)
|
heromod/migrid
|
mig/shared/functional.py
|
Python
|
gpl-2.0
| 6,295
|
[
"Brian"
] |
fa5469fa7dbad77fc33bb11750f1cc2aab647f95ad3afca99f40cbc2c581b8a3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestFreetypeTextMapperBigger(vtk.test.Testing.vtkTest):
def testFreetypeTextMapperBigger(self):
currentFontSize = 55
defaultText = "MmNnKk @"
textColor = [246, 255, 11]
bgColor = [56, 56, 154]
for i in range(0, len(textColor)):
textColor[i] /= 255.0
bgColor[i] /= 255.0
renWin = vtk.vtkRenderWindow()
renWin.SetSize(790, 450)
ren = vtk.vtkRenderer()
ren.SetBackground(bgColor)
renWin.AddRenderer(ren)
families = ["Arial", "Courier", "Times"]
attributes = [[0, 0], [1, 1]] # bold, italic
def SetAttributesText(attrib):
""" Expects a list of attributes of size 2, returns a string """
s = ""
if attrib[0] != 0:
s += "b"
if attrib[1] != 0:
s += "i"
return ','.join(list(s))
mapper = dict()
actor = dict()
pos = 0
for i, family in enumerate(families):
for j, attrib in enumerate(attributes):
pos += 1
txt = ""
txtAttrib = SetAttributesText(attrib)
if len(txtAttrib) != 0:
txt = family + " (" + SetAttributesText(attrib) + "): " + defaultText
else:
txt = family + ": " + defaultText
idx = ''.join(map(str, [i, j]))
mapper.update({idx:vtk.vtkTextMapper()})
mapper[idx].SetInput(txt)
tprop = mapper[idx].GetTextProperty()
eval('tprop.SetFontFamilyTo' + family + '()')
tprop.SetColor(textColor)
tprop.SetBold(attrib[0])
tprop.SetItalic(attrib[1])
tprop.SetFontSize(currentFontSize)
actor.update({idx:vtk.vtkActor2D()})
actor[idx].SetMapper(mapper[idx])
actor[idx].SetDisplayPosition(10, pos * (currentFontSize + 5))
ren.AddActor(actor[idx])
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
renWin.Render()
if __name__ == "__main__":
vtk.test.Testing.main([(TestFreetypeTextMapperBigger, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/OpenGL/Testing/Python/TestFreetypeTextMapperBigger.py
|
Python
|
gpl-3.0
| 3,056
|
[
"VTK"
] |
7cf4c143d148453d8098892a42ec47234bbe207bd35daa8fb2f8cf6c27884f87
|
from __future__ import absolute_import
import numpy as np
import os
import shutil
import tempfile
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib.pyplot import Artist, savefig, clf, cm
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib.testing.compare import compare_images
from numpy import cos, sin, pi
from shapely.geometry import Polygon, LineString, Point
from six.moves import xrange
from .util import unittest
from geopandas import GeoSeries, GeoDataFrame, read_file
# If set to True, generate images rather than perform tests (all tests will pass!)
GENERATE_BASELINE = False
BASELINE_DIR = os.path.join(os.path.dirname(__file__), 'baseline_images', 'test_plotting')
TRAVIS = bool(os.environ.get('TRAVIS', False))
class PlotTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
return
def tearDown(self):
shutil.rmtree(self.tempdir)
return
def _compare_images(self, ax, filename, tol=10):
""" Helper method to do the comparisons """
assert isinstance(ax, Artist)
if GENERATE_BASELINE:
savefig(os.path.join(BASELINE_DIR, filename))
savefig(os.path.join(self.tempdir, filename))
err = compare_images(os.path.join(BASELINE_DIR, filename),
os.path.join(self.tempdir, filename),
tol, in_decorator=True)
if err:
raise ImageComparisonFailure('images not close: %(actual)s '
'vs. %(expected)s '
'(RMS %(rms).3f)' % err)
def test_poly_plot(self):
""" Test plotting a simple series of polygons """
clf()
filename = 'poly_plot.png'
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(1, 0), (2, 0), (2, 1)])
polys = GeoSeries([t1, t2])
ax = polys.plot()
self._compare_images(ax=ax, filename=filename)
def test_point_plot(self):
""" Test plotting a simple series of points """
clf()
filename = 'points_plot.png'
N = 10
points = GeoSeries(Point(i, i) for i in xrange(N))
ax = points.plot()
self._compare_images(ax=ax, filename=filename)
def test_line_plot(self):
""" Test plotting a simple series of lines """
clf()
filename = 'lines_plot.png'
N = 10
lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])
ax = lines.plot()
self._compare_images(ax=ax, filename=filename)
@unittest.skipIf(TRAVIS, 'Skip on Travis (fails even though it passes locally)')
def test_plot_GeoDataFrame_with_kwargs(self):
"""
Test plotting a simple GeoDataFrame consisting of a series of polygons
with increasing values using various extra kwargs.
"""
clf()
filename = 'poly_plot_with_kwargs.png'
ts = np.linspace(0, 2*pi, 10, endpoint=False)
# Build GeoDataFrame from a series of triangles wrapping around in a ring
# and a second column containing a list of increasing values.
r1 = 1.0 # radius of inner ring boundary
r2 = 1.5 # radius of outer ring boundary
def make_triangle(t0, t1):
return Polygon([(r1*cos(t0), r1*sin(t0)),
(r2*cos(t0), r2*sin(t0)),
(r1*cos(t1), r1*sin(t1))])
polys = GeoSeries([make_triangle(t0, t1) for t0, t1 in zip(ts, ts[1:])])
values = np.arange(len(polys))
df = GeoDataFrame({'geometry': polys, 'values': values})
# Plot the GeoDataFrame using various keyword arguments to see if they are honoured
ax = df.plot(column='values', colormap=cm.RdBu, vmin=+2, vmax=None, figsize=(8, 4))
self._compare_images(ax=ax, filename=filename)
class TestPySALPlotting(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import pysal as ps
except ImportError:
raise unittest.SkipTest("PySAL is not installed")
pth = ps.examples.get_path("columbus.shp")
cls.tracts = read_file(pth)
def test_legend(self):
ax = self.tracts.plot(column='CRIME', scheme='QUANTILES', k=3,
colormap='OrRd', legend=True)
labels = [t.get_text() for t in ax.get_legend().get_texts()]
expected = [u'0.00 - 26.07', u'26.07 - 41.97', u'41.97 - 68.89']
self.assertEqual(labels, expected)
if __name__ == '__main__':
unittest.main()
|
perrygeo/geopandas
|
tests/test_plotting.py
|
Python
|
bsd-3-clause
| 4,605
|
[
"COLUMBUS"
] |
79128b4208a2f6b8ef0e3a0be562259f71954cc2568466a957886a4f4c1692af
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm
from zerver.lib.actions import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from typing import Any, Text
import os
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
denmark_stream_id = Stream.objects.get(name='Denmark').id
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/%d/members" % (denmark_stream_id,),
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test@zulip.com", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test@zulip.com", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test@zulip.com", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test@zulip.com", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[Text]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test@zulip.com", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.datetime.utcnow())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
|
amyliu345/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 50,543
|
[
"VisIt"
] |
bedb559f13cc2096e9fdb2d069260c2444332233e8fe7c324d73f12db954c70c
|
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2020 Thomas Reichenbach (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ase.io import Trajectory
from ase.units import GPa, kB, fs
import numpy as np
from ase.md.langevin import Langevin
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from matscipy import pressurecoupling as pc
from io import open
# Parameters
dt = 1.0 * fs # MD time step
C11 = 500.0 * GPa # material constant
M_factor = 1.0 # scaling factor for lid mass during equilibration
# 1.0 will give fast equilibration for expensive
# calculators
Pdir = 2 # index of cell axis along normal pressure is applied
P = 5.0 * GPa # target normal pressure
v = 0.0 # no sliding yet, only apply pressure
vdir = 0 # index of cell axis along sliding happens
T = 300.0 # target temperature for thermostat
# thermostat is applied in the third direction which
# is neither pressure nor sliding direction and only
# in the middle region between top and bottom.
# This makes sense for small systems which cannot have
# a dedicated thermostat region.
t_langevin = 75.0 * fs # time constant for Langevin thermostat
gamma_langevin = 1. / t_langevin # derived Langevin parameter
t_integrate = 1000.0 * fs # simulation time
steps_integrate = int(t_integrate / dt) # number of simulation steps
atoms = ASE_ATOMS_OBJECT # put a specific system here
bottom_mask = BOOLEAN_NUMPY_ARRAY_TRUE_FOR_FIXED_BOTTOM_ATOMS # depends on system
top_mask = BOOLEAN_NUMPY_ARRAY_TRUE_FOR_CONSTRAINT_TOP_ATOMS # depends on system
# save masks for sliding simulations or restart runs
np.savetxt("bottom_mask.txt", bottom_mask)
np.savetxt("top_mask.txt", top_mask)
# set up calculation:
damp = pc.FixedMassCriticalDamping(C11, M_factor)
slider = pc.SlideWithNormalPressureCuboidCell(top_mask, bottom_mask, Pdir, P, vdir, v, damp)
atoms.set_constraint(slider)
# if we start from local minimum, zero potential energy, use double temperature for
# faster temperature convergence in the beginning:
MaxwellBoltzmannDistribution(atoms, 2 * kB * T)
# clear momenta in constraint regions, otherwise lid might run away
atoms.arrays['momenta'][top_mask, :] = 0
atoms.arrays['momenta'][bottom_mask, :] = 0
calc = ASE_CALCULATOR_OBJECT # put a specific calculator here
atoms.set_calculator(calc)
# only thermalize middle region in one direction
temps = np.zeros((len(atoms), 3))
temps[slider.middle_mask, slider.Tdir] = kB * T
gammas = np.zeros((len(atoms), 3))
gammas[slider.middle_mask, slider.Tdir] = gamma_langevin
integrator = Langevin(atoms, dt, temps, gammas, fixcm=False)
trajectory = Trajectory('equilibrate_pressure.traj', 'w', atoms)
log_handle = open('log_equilibrate.txt', 'w', 1, encoding='utf-8') # 1 means line buffered
logger = pc.SlideLogger(log_handle, atoms, slider, integrator)
# log can be read using pc.SlideLog (see docstring there)
logger.write_header()
logger() # step 0
trajectory.write() # step 0
integrator.attach(logger)
integrator.attach(trajectory)
integrator.run(steps_integrate)
log_handle.close()
trajectory.close()
|
libAtoms/matscipy
|
examples/pressure_coupling/equilibrate_pressure.py
|
Python
|
lgpl-2.1
| 3,869
|
[
"ASE",
"Matscipy"
] |
9db2c635ed3509905bffbf27d2e3fedbe44269b763a0185ee50ae43d8bd82bab
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import TypeFunctor, TypeMutator, TypeVisitor
from tvm.relay.ty import (
TypeVar,
IncompleteType,
TensorType,
FuncType,
TupleType,
TypeRelation,
RefType,
GlobalTypeVar,
TypeCall,
)
from tvm.relay.adt import TypeData
def check_visit(typ):
try:
ef = TypeFunctor()
ef.visit(typ)
assert False
except NotImplementedError:
pass
ev = TypeVisitor()
ev.visit(typ)
tvm.ir.assert_structural_equal(TypeMutator().visit(typ), typ, map_free_vars=True)
def test_type_var():
tv = TypeVar("a")
check_visit(tv)
def test_incomplete_type():
it = IncompleteType()
check_visit(it)
def test_tensor_type():
tt = TensorType([])
check_visit(tt)
def test_func_type():
tv = TypeVar("tv")
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
ft = FuncType([tt], tt, type_params=[tv])
check_visit(ft)
def test_tuple_type():
tt = TupleType([TupleType([])])
check_visit(tt)
def test_type_relation():
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
attrs = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
tp = TypeVar("tp")
tf = FuncType([], TupleType([]), [], [])
tt = TensorType([1, 2, 3], "float32")
tr = TypeRelation(func, [tp, tf, tt], 2, attrs)
check_visit(tr)
def test_ref_type():
rt = RefType(TupleType([]))
check_visit(rt)
def test_global_type_var():
gtv = GlobalTypeVar("gtv")
check_visit(gtv)
def test_type_call():
tc = TypeCall(GlobalTypeVar("tf"), [TupleType([])])
check_visit(tc)
def test_type_data():
td = TypeData(GlobalTypeVar("td"), [TypeVar("tv")], [])
check_visit(td)
if __name__ == "__main__":
test_type_var()
test_incomplete_type()
test_tensor_type()
test_func_type()
test_tuple_type()
test_type_relation()
test_ref_type()
test_global_type_var()
test_type_call()
test_type_data()
|
sxjscience/tvm
|
tests/python/relay/test_type_functor.py
|
Python
|
apache-2.0
| 2,826
|
[
"VisIt"
] |
e3992c81043f0114ba712d3f257f5b3b71e15f05d5bf03fcb5e6270d8a0379e6
|
#!/usr/bin/env python
##
## @file validateSBML.py
## @brief Validates one or more SBML files
## @author Akiya Jouraku (translated from libSBML C++ examples)
## @author Ben Bornstein
## @author Michael Hucka
##
## $Id$
## $HeadURL$
##
## This file is part of libSBML. Please visit http://sbml.org for more
## information about SBML, and the latest version of libSBML.
##
import sys
sys.path.append('installer/libsbml/lib/python/dist-packages/libsbml')
import os.path
import time
import libsbml
class validateSBML:
def __init__(self, ucheck):
self.reader = libsbml.SBMLReader()
self.ucheck = ucheck
self.numinvalid = 0
def validate(self, file):
if not os.path.exists(file):
print "[Error] %s : No such file." % (infile)
self.numinvalid += 1
return
start = time.time()
sbmlDoc = libsbml.readSBML(file)
stop = time.time()
timeRead = (stop - start)*1000
errors = sbmlDoc.getNumErrors()
seriousErrors = False
numReadErr = 0
numReadWarn = 0
errMsgRead = ""
if errors > 0:
for i in range(errors):
severity = sbmlDoc.getError(i).getSeverity()
if (severity == libsbml.LIBSBML_SEV_ERROR) or (severity == libsbml.LIBSBML_SEV_FATAL):
seriousErrors = True
numReadErr += 1
else:
numReadWarn += 1
oss = libsbml.ostringstream()
sbmlDoc.printErrors(oss)
errMsgRead = oss.str()
# If serious errors are encountered while reading an SBML document, it
# does not make sense to go on and do full consistency checking because
# the model may be nonsense in the first place.
numCCErr = 0
numCCWarn = 0
errMsgCC = ""
skipCC = False;
timeCC = 0.0
if seriousErrors:
skipCC = True;
errMsgRead += "Further consistency checking and validation aborted."
self.numinvalid += 1;
else:
sbmlDoc.setConsistencyChecks(libsbml.LIBSBML_CAT_UNITS_CONSISTENCY, self.ucheck)
start = time.time()
failures = sbmlDoc.checkConsistency()
stop = time.time()
timeCC = (stop - start)*1000
if failures > 0:
isinvalid = False;
for i in range(failures):
severity = sbmlDoc.getError(i).getSeverity()
if (severity == libsbml.LIBSBML_SEV_ERROR) or (severity == libsbml.LIBSBML_SEV_FATAL):
numCCErr += 1
isinvalid = True;
else:
numCCWarn += 1
if isinvalid:
self.numinvalid += 1;
oss = libsbml.ostringstream()
sbmlDoc.printErrors(oss)
errMsgCC = oss.str()
#
# print results
#
print " filename : %s" % (file)
print " file size (byte) : %d" % (os.path.getsize(file))
print " read time (ms) : %f" % (timeRead)
if not skipCC :
print " c-check time (ms) : %f" % (timeCC)
else:
print " c-check time (ms) : skipped"
print " validation error(s) : %d" % (numReadErr + numCCErr)
if not skipCC :
print " (consistency error(s)): %d" % (numCCErr)
else:
print " (consistency error(s)): skipped"
print " validation warning(s) : %d" % (numReadWarn + numCCWarn)
if not skipCC :
print " (consistency warning(s)): %d" % (numCCWarn)
else:
print " (consistency warning(s)): skipped"
if errMsgRead or errMsgCC:
print
print "===== validation error/warning messages =====\n"
if errMsgRead :
print errMsgRead
if errMsgCC :
print "*** consistency check ***\n"
print errMsgCC
def main (args):
"""usage: validateSBML.py [-u] inputfile1 [inputfile2 ...]
-u skips unit consistency check
"""
if len(args) < 2:
print main.__doc__
sys.exit(1)
elif (len(args) == 1) and (args[1] == "-u"):
print main.__doc__
sys.exit(1)
enableUnitCCheck = True
if args[1] == "-u":
enableUnitCCheck = False
validator = validateSBML(enableUnitCCheck)
fnum = 0
for i in range(1,len(args)):
if args[i] == "-u":
continue
print "---------------------------------------------------------------------------"
validator.validate(args[i])
fnum += 1
numinvalid = validator.numinvalid
print "---------------------------------------------------------------------------"
print "Validated %d files, %d valid files, %d invalid files" % (fnum, fnum - numinvalid, numinvalid)
if not enableUnitCCheck:
print "(Unit consistency checks skipped)"
if numinvalid > 0:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
ejfresch/qdc
|
validateSBML.py
|
Python
|
gpl-3.0
| 4,671
|
[
"VisIt"
] |
4534de227c8256ac7551d98eafc63b29516c79e6e05e06c28f8aa28f8cfd583f
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2007 Javier Kohen
# Copyright (C) 2008 Philipp Wolfer
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
from functools import reduce
from inspect import getfullargspec
import operator
import re
import unicodedata
from picard import config
from picard.metadata import (
MULTI_VALUED_JOINER,
Metadata,
)
from picard.plugin import ExtensionPoint
from picard.util import uniqify
class ScriptError(Exception):
pass
class ScriptParseError(ScriptError):
pass
class ScriptEndOfFile(ScriptParseError):
pass
class ScriptSyntaxError(ScriptParseError):
pass
class ScriptUnknownFunction(ScriptError):
pass
class ScriptText(str):
def eval(self, state):
return self
def normalize_tagname(name):
if name.startswith('_'):
return "~" + name[1:]
return name
class ScriptVariable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<ScriptVariable %%%s%%>' % self.name
def eval(self, state):
return state.context.get(normalize_tagname(self.name), "")
FunctionRegistryItem = namedtuple("FunctionRegistryItem",
["function", "eval_args",
"argcount"])
Bound = namedtuple("Bound", ["lower", "upper"])
class ScriptFunction(object):
def __init__(self, name, args, parser):
try:
argnum_bound = parser.functions[name].argcount
argcount = len(args)
if argnum_bound and not (argnum_bound.lower <= argcount
and (argnum_bound.upper is None
or len(args) <= argnum_bound.upper)):
raise ScriptError(
"Wrong number of arguments for $%s: Expected %s, got %i at position %i, line %i"
% (name,
str(argnum_bound.lower)
if argnum_bound.upper is None
else "%i - %i" % (argnum_bound.lower, argnum_bound.upper),
argcount,
parser._x,
parser._y))
except KeyError:
raise ScriptUnknownFunction("Unknown function '%s'" % name)
self.name = name
self.args = args
def __repr__(self):
return "<ScriptFunction $%s(%r)>" % (self.name, self.args)
def eval(self, parser):
function, eval_args, num_args = parser.functions[self.name]
if eval_args:
args = [arg.eval(parser) for arg in self.args]
else:
args = self.args
return function(parser, *args)
class ScriptExpression(list):
def eval(self, state):
result = []
for item in self:
result.append(item.eval(state))
return "".join(result)
def isidentif(ch):
return ch.isalnum() or ch == '_'
class ScriptParser(object):
r"""Tagger script parser.
Grammar:
text ::= [^$%] | '\$' | '\%' | '\(' | '\)' | '\,'
argtext ::= [^$%(),] | '\$' | '\%' | '\(' | '\)' | '\,'
identifier ::= [a-zA-Z0-9_]
variable ::= '%' identifier '%'
function ::= '$' identifier '(' (argument (',' argument)*)? ')'
expression ::= (variable | function | text)*
argument ::= (variable | function | argtext)*
"""
_function_registry = ExtensionPoint()
_cache = {}
def __raise_eof(self):
raise ScriptEndOfFile("Unexpected end of script at position %d, line %d" % (self._x, self._y))
def __raise_char(self, ch):
#line = self._text[self._line:].split("\n", 1)[0]
#cursor = " " * (self._pos - self._line - 1) + "^"
#raise ScriptSyntaxError("Unexpected character '%s' at position %d, line %d\n%s\n%s" % (ch, self._x, self._y, line, cursor))
raise ScriptSyntaxError("Unexpected character '%s' at position %d, line %d" % (ch, self._x, self._y))
def read(self):
try:
ch = self._text[self._pos]
except IndexError:
return None
else:
self._pos += 1
self._px = self._x
self._py = self._y
if ch == '\n':
self._line = self._pos
self._x = 1
self._y += 1
else:
self._x += 1
return ch
def unread(self):
self._pos -= 1
self._x = self._px
self._y = self._py
def parse_arguments(self):
results = []
while True:
result, ch = self.parse_expression(False)
results.append(result)
if ch == ')':
# Only an empty expression as first argument
# is the same as no argument given.
if len(results) == 1 and results[0] == []:
return []
return results
def parse_function(self):
start = self._pos
while True:
ch = self.read()
if ch == '(':
name = self._text[start:self._pos-1]
if name not in self.functions:
raise ScriptUnknownFunction("Unknown function '%s'" % name)
return ScriptFunction(name, self.parse_arguments(), self)
elif ch is None:
self.__raise_eof()
elif not isidentif(ch):
self.__raise_char(ch)
def parse_variable(self):
begin = self._pos
while True:
ch = self.read()
if ch == '%':
return ScriptVariable(self._text[begin:self._pos-1])
elif ch is None:
self.__raise_eof()
elif not isidentif(ch) and ch != ':':
self.__raise_char(ch)
def parse_text(self, top):
text = []
while True:
ch = self.read()
if ch == "\\":
ch = self.read()
if ch == 'n':
text.append('\n')
elif ch == 't':
text.append('\t')
elif ch not in "$%(),\\":
self.__raise_char(ch)
else:
text.append(ch)
elif ch is None:
break
elif not top and ch == '(':
self.__raise_char(ch)
elif ch in '$%' or (not top and ch in ',)'):
self.unread()
break
else:
text.append(ch)
return ScriptText("".join(text))
def parse_expression(self, top):
tokens = ScriptExpression()
while True:
ch = self.read()
if ch is None:
if top:
break
else:
self.__raise_eof()
elif not top and ch in ',)':
break
elif ch == '$':
tokens.append(self.parse_function())
elif ch == '%':
tokens.append(self.parse_variable())
else:
self.unread()
tokens.append(self.parse_text(top))
return (tokens, ch)
def load_functions(self):
self.functions = {}
for name, item in ScriptParser._function_registry:
self.functions[name] = item
def parse(self, script, functions=False):
"""Parse the script."""
self._text = script
self._pos = 0
self._px = self._x = 1
self._py = self._y = 1
self._line = 0
if not functions:
self.load_functions()
return self.parse_expression(True)[0]
def eval(self, script, context=None, file=None):
"""Parse and evaluate the script."""
self.context = context if context is not None else Metadata()
self.file = file
self.load_functions()
key = hash(script)
if key not in ScriptParser._cache:
ScriptParser._cache[key] = self.parse(script, True)
return ScriptParser._cache[key].eval(self)
def enabled_tagger_scripts_texts():
"""Returns an iterator over the enabled tagger scripts.
For each script, you'll get a tuple consisting of the script name and text"""
if not config.setting["enable_tagger_scripts"]:
return []
return [(s_name, s_text) for _s_pos, s_name, s_enabled, s_text in config.setting["list_of_scripts"] if s_enabled and s_text]
def register_script_function(function, name=None, eval_args=True,
check_argcount=True):
"""Registers a script function. If ``name`` is ``None``,
``function.__name__`` will be used.
If ``eval_args`` is ``False``, the arguments will not be evaluated before being
passed to ``function``.
If ``check_argcount`` is ``False`` the number of arguments passed to the
function will not be verified."""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations = getfullargspec(function)
required_kwonlyargs = len(kwonlyargs)
if kwonlydefaults is not None:
required_kwonlyargs -= len(kwonlydefaults.keys())
if required_kwonlyargs:
raise TypeError("Functions with required keyword-only parameters are not supported")
args = len(args) - 1 # -1 for the parser
varargs = varargs is not None
defaults = len(defaults) if defaults else 0
argcount = Bound(args - defaults, args if not varargs else None)
if name is None:
name = function.__name__
ScriptParser._function_registry.register(function.__module__,
(name, FunctionRegistryItem(
function, eval_args,
argcount if argcount and check_argcount else False)
)
)
def _compute_int(operation, *args):
return str(reduce(operation, map(int, args)))
def _compute_logic(operation, *args):
return operation(args)
def _get_multi_values(parser, multi, separator):
if isinstance(separator, ScriptExpression):
separator = separator.eval(parser)
if separator == MULTI_VALUED_JOINER:
# Convert ScriptExpression containing only a single variable into variable
if (isinstance(multi, ScriptExpression) and
len(multi) == 1 and
isinstance(multi[0], ScriptVariable)):
multi = multi[0]
# If a variable, return multi-values
if isinstance(multi, ScriptVariable):
return parser.context.getall(normalize_tagname(multi.name))
# Fall-back to converting to a string and splitting if haystack is an expression
# or user has overridden the separator character.
multi = multi.eval(parser)
return multi.split(separator) if separator else [multi]
def func_if(parser, _if, _then, _else=None):
"""If ``if`` is not empty, it returns ``then``, otherwise it returns ``else``."""
if _if.eval(parser):
return _then.eval(parser)
elif _else:
return _else.eval(parser)
return ''
def func_if2(parser, *args):
"""Returns first non empty argument."""
for arg in args:
arg = arg.eval(parser)
if arg:
return arg
return ''
def func_noop(parser, *args):
"""Does nothing :)"""
return ''
def func_left(parser, text, length):
"""Returns first ``num`` characters from ``text``."""
try:
return text[:int(length)]
except ValueError:
return ""
def func_right(parser, text, length):
"""Returns last ``num`` characters from ``text``."""
try:
return text[-int(length):]
except ValueError:
return ""
def func_lower(parser, text):
"""Returns ``text`` in lower case."""
return text.lower()
def func_upper(parser, text):
"""Returns ``text`` in upper case."""
return text.upper()
def func_pad(parser, text, length, char):
try:
return char * (int(length) - len(text)) + text
except ValueError:
return ""
def func_strip(parser, text):
return re.sub(r"\s+", " ", text).strip()
def func_replace(parser, text, old, new):
return text.replace(old, new)
def func_in(parser, text, needle):
if needle in text:
return "1"
else:
return ""
def func_inmulti(parser, haystack, needle, separator=MULTI_VALUED_JOINER):
"""Searches for ``needle`` in ``haystack``, supporting a list variable for
``haystack``. If a string is used instead, then a ``separator`` can be
used to split it. In both cases, it returns true if the resulting list
contains exactly ``needle`` as a member."""
needle = needle.eval(parser)
return func_in(parser, _get_multi_values(parser, haystack, separator), needle)
def func_rreplace(parser, text, old, new):
try:
return re.sub(old, new, text)
except re.error:
return text
def func_rsearch(parser, text, pattern):
try:
match = re.search(pattern, text)
except re.error:
return ""
if match:
try:
return match.group(1)
except IndexError:
return match.group(0)
return ""
def func_num(parser, text, length):
try:
format_ = "%%0%dd" % min(int(length), 20)
except ValueError:
return ""
try:
value = int(text)
except ValueError:
value = 0
return format_ % value
def func_unset(parser, name):
"""Unsets the variable ``name``."""
name = normalize_tagname(name)
# Allow wild-card unset for certain keys
if name in ('performer:*', 'comment:*', 'lyrics:*'):
name = name[:-1]
for key in list(parser.context.keys()):
if key.startswith(name):
del parser.context[key]
return ""
try:
del parser.context[name]
except KeyError:
pass
return ""
def func_delete(parser, name):
"""
Deletes the variable ``name``.
This will unset the tag with the given name and also mark the tag for
deletion on save.
"""
parser.context.delete(normalize_tagname(name))
return ""
def func_set(parser, name, value):
"""Sets the variable ``name`` to ``value``."""
if value:
parser.context[normalize_tagname(name)] = value
else:
func_unset(parser, name)
return ""
def func_setmulti(parser, name, value, separator=MULTI_VALUED_JOINER):
"""Sets the variable ``name`` to ``value`` as a list; splitting by the passed string, or "; " otherwise."""
return func_set(parser, name, value.split(separator) if value and separator else value)
def func_get(parser, name):
"""Returns the variable ``name`` (equivalent to ``%name%``)."""
return parser.context.get(normalize_tagname(name), "")
def func_copy(parser, new, old):
"""Copies content of variable ``old`` to variable ``new``."""
new = normalize_tagname(new)
old = normalize_tagname(old)
parser.context[new] = parser.context.getall(old)[:]
return ""
def func_copymerge(parser, new, old):
"""Copies content of variable ``old`` and appends it into variable ``new``, removing duplicates. This is normally
used to merge a multi-valued variable into another, existing multi-valued variable."""
new = normalize_tagname(new)
old = normalize_tagname(old)
newvals = parser.context.getall(new)
oldvals = parser.context.getall(old)
parser.context[new] = uniqify(newvals + oldvals)
return ""
def func_trim(parser, text, char=None):
"""Trims all leading and trailing whitespaces from ``text``. The optional
second parameter specifies the character to trim."""
if char:
return text.strip(char)
else:
return text.strip()
def func_add(parser, x, y, *args):
"""Adds ``y`` to ``x``.
Can be used with an arbitrary number of arguments.
Eg: $add(x, y, z) = ((x + y) + z)
"""
try:
return _compute_int(operator.add, x, y, *args)
except ValueError:
return ""
def func_sub(parser, x, y, *args):
"""Subtracts ``y`` from ``x``.
Can be used with an arbitrary number of arguments.
Eg: $sub(x, y, z) = ((x - y) - z)
"""
try:
return _compute_int(operator.sub, x, y, *args)
except ValueError:
return ""
def func_div(parser, x, y, *args):
"""Divides ``x`` by ``y``.
Can be used with an arbitrary number of arguments.
Eg: $div(x, y, z) = ((x / y) / z)
"""
try:
return _compute_int(operator.floordiv, x, y, *args)
except ValueError:
return ""
def func_mod(parser, x, y, *args):
"""Returns the remainder of ``x`` divided by ``y``.
Can be used with an arbitrary number of arguments.
Eg: $mod(x, y, z) = ((x % y) % z)
"""
try:
return _compute_int(operator.mod, x, y, *args)
except ValueError:
return ""
def func_mul(parser, x, y, *args):
"""Multiplies ``x`` by ``y``.
Can be used with an arbitrary number of arguments.
Eg: $mul(x, y, z) = ((x * y) * z)
"""
try:
return _compute_int(operator.mul, x, y, *args)
except ValueError:
return ""
def func_or(parser, x, y, *args):
"""Returns true, if either ``x`` or ``y`` not empty.
Can be used with an arbitrary number of arguments. The result is
true if ANY of the arguments is not empty.
"""
if _compute_logic(any, x, y, *args):
return "1"
else:
return ""
def func_and(parser, x, y, *args):
"""Returns true, if both ``x`` and ``y`` are not empty.
Can be used with an arbitrary number of arguments. The result is
true if ALL of the arguments are not empty.
"""
if _compute_logic(all, x, y, *args):
return "1"
else:
return ""
def func_not(parser, x):
"""Returns true, if ``x`` is empty."""
if not x:
return "1"
else:
return ""
def func_eq(parser, x, y):
"""Returns true, if ``x`` equals ``y``."""
if x == y:
return "1"
else:
return ""
def func_ne(parser, x, y):
"""Returns true, if ``x`` not equals ``y``."""
if x != y:
return "1"
else:
return ""
def func_lt(parser, x, y):
"""Returns true, if ``x`` is lower than ``y``."""
try:
if int(x) < int(y):
return "1"
except ValueError:
pass
return ""
def func_lte(parser, x, y):
"""Returns true, if ``x`` is lower than or equals ``y``."""
try:
if int(x) <= int(y):
return "1"
except ValueError:
pass
return ""
def func_gt(parser, x, y):
"""Returns true, if ``x`` is greater than ``y``."""
try:
if int(x) > int(y):
return "1"
except ValueError:
pass
return ""
def func_gte(parser, x, y):
"""Returns true, if ``x`` is greater than or equals ``y``."""
try:
if int(x) >= int(y):
return "1"
except ValueError:
pass
return ""
def func_len(parser, text=""):
return str(len(text))
def func_lenmulti(parser, multi, separator=MULTI_VALUED_JOINER):
return func_len(parser, _get_multi_values(parser, multi, separator))
def func_performer(parser, pattern="", join=", "):
values = []
for name, value in parser.context.items():
if name.startswith("performer:") and pattern in name:
values.append(value)
return join.join(values)
def func_matchedtracks(parser, *args):
# only works in file naming scripts, always returns zero in tagging scripts
if parser.file and parser.file.parent:
return str(parser.file.parent.album.get_num_matched_tracks())
return "0"
def func_is_complete(parser):
if (parser.file and parser.file.parent
and parser.file.parent.album.is_complete()):
return "1"
return "0"
def func_firstalphachar(parser, text="", nonalpha="#"):
if len(text) == 0:
return nonalpha
firstchar = text[0]
if firstchar.isalpha():
return firstchar.upper()
else:
return nonalpha
def func_initials(parser, text=""):
return "".join(a[:1] for a in text.split(" ") if a[:1].isalpha())
def func_firstwords(parser, text, length):
try:
length = int(length)
except ValueError:
length = 0
if len(text) <= length:
return text
else:
if text[length] == ' ':
return text[:length]
return text[:length].rsplit(' ', 1)[0]
def func_startswith(parser, text, prefix):
if text.startswith(prefix):
return "1"
return "0"
def func_endswith(parser, text, suffix):
if text.endswith(suffix):
return "1"
return "0"
def func_truncate(parser, text, length):
try:
length = int(length)
except ValueError as e:
length = None
return text[:length].rstrip()
def func_swapprefix(parser, text, *prefixes):
"""
Moves the specified prefixes to the end of text.
If no prefix is specified 'A' and 'The' are taken as default.
"""
# Inspired by the swapprefix plugin by Philipp Wolfer.
text, prefix = _delete_prefix(parser, text, *prefixes)
if prefix != '':
return text + ', ' + prefix
return text
def func_delprefix(parser, text, *prefixes):
"""
Deletes the specified prefixes.
If no prefix is specified 'A' and 'The' are taken as default.
"""
# Inspired by the swapprefix plugin by Philipp Wolfer.
return _delete_prefix(parser, text, *prefixes)[0]
def _delete_prefix(parser, text, *prefixes):
"""
Worker function to deletes the specified prefixes.
Returns remaining string and deleted part separately.
If no prefix is specified 'A' and 'The' used.
"""
# Inspired by the swapprefix plugin by Philipp Wolfer.
if not prefixes:
prefixes = ('A', 'The')
text = text.strip()
rx = '(' + r'\s+)|('.join(map(re.escape, prefixes)) + r'\s+)'
match = re.match(rx, text)
if match:
pref = match.group()
return text[len(pref):], pref.strip()
return text, ''
def func_eq_any(parser, x, *args):
"""
Return True if one string matches any of one or more other strings.
$eq_any(a,b,c ...) is functionally equivalent to $or($eq(a,b),$eq(a,c) ...)
Example: $if($eq_any(%artist%,foo,bar,baz),$set(engineer,test))
"""
# Inspired by the eq2 plugin by Brian Schweitzer.
return '1' if x in args else ''
def func_ne_all(parser, x, *args):
"""
Return True if one string doesn't match all of one or more other strings.
$ne_all(a,b,c ...) is functionally equivalent to $and($ne(a,b),$ne(a,c) ...)
Example: $if($ne_all(%artist%,foo,bar,baz),$set(engineer,test))
"""
# Inspired by the ne2 plugin by Brian Schweitzer.
return '1' if x not in args else ''
def func_eq_all(parser, x, *args):
"""
Return True if all string are equal.
$eq_all(a,b,c ...) is functionally equivalent to $and($eq(a,b),$eq(a,c) ...)
Example: $if($eq_all(%albumartist%,%artist%,Justin Bieber),$set(engineer,Meat Loaf))
"""
for i in args:
if x != i:
return ''
return '1'
def func_ne_any(parser, x, *args):
"""
Return True if all strings are not equal.
$ne_any(a,b,c ...) is functionally equivalent to $or($ne(a,b),$ne(a,c) ...)
Example: $if($ne_any(%albumartist%,%trackartist%,%composer%),$set(lyricist,%composer%))
"""
return func_not(parser, func_eq_all(parser, x, *args))
def func_title(parser, text):
# GPL 2.0 licensed code by Javier Kohen, Sambhav Kothari
# from https://github.com/metabrainz/picard-plugins/blob/2.0/plugins/titlecase/titlecase.py
"""
Title-case a text - capitalizes first letter of every word
like: from "Lost in the Supermarket" to "Lost In The Supermarket"
Example: $set(album,$title(%album%))
"""
if not text:
return ""
capitalized = text[0].capitalize()
capital = False
for i in range(1, len(text)):
t = text[i]
if t in "’'" and text[i-1].isalpha():
capital = False
elif iswbound(t):
capital = True
elif capital and t.isalpha():
capital = False
t = t.capitalize()
else:
capital = False
capitalized += t
return capitalized
def iswbound(char):
# GPL 2.0 licensed code by Javier Kohen, Sambhav Kothari
# from https://github.com/metabrainz/picard-plugins/blob/2.0/plugins/titlecase/titlecase.py
""" Checks whether the given character is a word boundary """
category = unicodedata.category(char)
return "Zs" == unicodedata.category(char) or "Sk" == unicodedata.category(char) or "P" == unicodedata.category(char)[0]
register_script_function(func_if, "if", eval_args=False)
register_script_function(func_if2, "if2", eval_args=False)
register_script_function(func_noop, "noop", eval_args=False)
register_script_function(func_left, "left")
register_script_function(func_right, "right")
register_script_function(func_lower, "lower")
register_script_function(func_upper, "upper")
register_script_function(func_pad, "pad")
register_script_function(func_strip, "strip")
register_script_function(func_replace, "replace")
register_script_function(func_rreplace, "rreplace")
register_script_function(func_rsearch, "rsearch")
register_script_function(func_num, "num")
register_script_function(func_unset, "unset")
register_script_function(func_delete, "delete")
register_script_function(func_set, "set")
register_script_function(func_setmulti, "setmulti")
register_script_function(func_get, "get")
register_script_function(func_trim, "trim")
register_script_function(func_add, "add")
register_script_function(func_sub, "sub")
register_script_function(func_div, "div")
register_script_function(func_mod, "mod")
register_script_function(func_mul, "mul")
register_script_function(func_or, "or")
register_script_function(func_and, "and")
register_script_function(func_not, "not")
register_script_function(func_eq, "eq")
register_script_function(func_ne, "ne")
register_script_function(func_lt, "lt")
register_script_function(func_lte, "lte")
register_script_function(func_gt, "gt")
register_script_function(func_gte, "gte")
register_script_function(func_in, "in")
register_script_function(func_inmulti, "inmulti", eval_args=False)
register_script_function(func_copy, "copy")
register_script_function(func_copymerge, "copymerge")
register_script_function(func_len, "len")
register_script_function(func_lenmulti, "lenmulti", eval_args=False)
register_script_function(func_performer, "performer")
register_script_function(func_matchedtracks, "matchedtracks", eval_args=False)
register_script_function(func_is_complete, "is_complete")
register_script_function(func_firstalphachar, "firstalphachar")
register_script_function(func_initials, "initials")
register_script_function(func_firstwords, "firstwords")
register_script_function(func_startswith, "startswith")
register_script_function(func_endswith, "endswith")
register_script_function(func_truncate, "truncate")
register_script_function(func_swapprefix, "swapprefix", check_argcount=False)
register_script_function(func_delprefix, "delprefix", check_argcount=False)
register_script_function(func_eq_any, "eq_any", check_argcount=False)
register_script_function(func_ne_all, "ne_all", check_argcount=False)
register_script_function(func_eq_all, "eq_all", check_argcount=False)
register_script_function(func_ne_any, "ne_any", check_argcount=False)
register_script_function(func_title, "title")
|
mineo/picard
|
picard/script.py
|
Python
|
gpl-2.0
| 28,202
|
[
"Brian"
] |
77202aaa411a014c0a879dd06a5a76bf0626db2ccc2b634dc85ebf1ca5ff61ff
|
import os
import copy
import math
import functools
import numpy as np
from ddapp import transformUtils
from ddapp.asynctaskqueue import AsyncTaskQueue
from ddapp import objectmodel as om
from ddapp import visualization as vis
from ddapp import robotstate
from ddapp import segmentation
from ddapp import planplayback
from ddapp.pointpicker import PointPicker
from ddapp import vtkAll as vtk
from ddapp.simpletimer import SimpleTimer
from ddapp import affordanceupdater
from ddapp.debugVis import DebugData
from ddapp import affordanceitems
from ddapp import ikplanner
import ioUtils
class MappingDemo(object):
def __init__(self, robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepPlanner, atlasDriver, lhandDriver, rhandDriver, multisenseDriver, view, sensorJointController, planPlaybackFunction):
self.planPlaybackFunction = planPlaybackFunction
self.robotStateModel = robotStateModel
self.playbackRobotModel = playbackRobotModel
self.ikPlanner = ikPlanner
self.manipPlanner = manipPlanner
self.footstepPlanner = footstepPlanner
self.atlasDriver = atlasDriver
self.lhandDriver = lhandDriver
self.rhandDriver = rhandDriver
self.multisenseDriver = multisenseDriver
self.sensorJointController = sensorJointController
self.view = view
# live operation flags:
self.visOnly = False
self.planFromCurrentRobotState = True
useDevelopment = False
if (useDevelopment):
self.visOnly = True
self.planFromCurrentRobotState = False
self.optionalUserPromptEnabled = True
self.requiredUserPromptEnabled = True
self.plans = []
# top level switch between BDI or IHMC (locked base) and MIT (moving base and back)
self.lockBack = True
self.lockBase = True
self.constraintSet = []
self.targetSweepType = 'orientation' # gaze or orientation - but i've had problems with the gaze constraint
self.coneThresholdDegrees = 5.0 # 0 is ok for reaching but often too tight for a trajectory
self.boxLength = 0.3
# Switch between simulation/visualisation and real robot operation
def setMode(self, mode='visualization'):
'''
Switches between visualization and real robot operation.
mode='visualization'
mode='robot'
'''
if (mode == 'visualization'):
print "Setting mode to VISUALIZATION"
self.useDevelopment = True
self.visOnly = True
self.planFromCurrentRobotState = False
else:
print "Setting mode to ROBOT OPERATION"
self.useDevelopment = False
self.visOnly = False
self.planFromCurrentRobotState = True
def addPlan(self, plan):
self.plans.append(plan)
def planPostureFromDatabase(self, groupName, postureName, side='left'):
startPose = self.getPlanningStartPose()
endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, groupName, postureName, side=side)
newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)
self.addPlan(newPlan)
######### Target Focused Functions ##################################################################
def spawnTargetAffordance(self):
for obj in om.getObjects():
if obj.getProperty('Name') == 'target':
om.removeFromObjectModel(obj)
targetFrame = transformUtils.frameFromPositionAndRPY([0.6,0.2,0.6],[180,0,90])
folder = om.getOrCreateContainer('affordances')
z = DebugData()
z.addLine(np.array([0,0,0]), np.array([-self.boxLength,0,0]), radius=0.02) # main bar
z.addLine(np.array([-self.boxLength,0,0]), np.array([-self.boxLength,0,self.boxLength]), radius=0.02) # main bar
z.addLine(np.array([-self.boxLength,0,self.boxLength]), np.array([0,0,self.boxLength]), radius=0.02) # main bar
z.addLine(np.array([0,0,self.boxLength]), np.array([0,0,0]), radius=0.02) # main bar
targetMesh = z.getPolyData()
self.targetAffordance = vis.showPolyData(targetMesh, 'target', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder, alpha=0.3)
self.targetAffordance.actor.SetUserTransform(targetFrame)
self.targetFrame = vis.showFrame(targetFrame, 'target frame', parent=self.targetAffordance, visible=False, scale=0.2)
self.targetFrame = self.targetFrame.transform
params = dict(length=self.boxLength, otdf_type='target', friendly_name='target')
self.targetAffordance.setAffordanceParams(params)
self.targetAffordance.updateParamsFromActorTransform()
def drawTargetPath(self):
path = DebugData()
for i in range(1,len(self.targetPath)):
p0 = self.targetPath[i-1].GetPosition()
p1 = self.targetPath[i].GetPosition()
path.addLine ( np.array( p0 ) , np.array( p1 ), radius= 0.005)
pathMesh = path.getPolyData()
self.targetPathMesh = vis.showPolyData(pathMesh, 'target frame desired path', color=[0.0, 0.3, 1.0], parent=self.targetAffordance, alpha=0.6)
self.targetPathMesh.actor.SetUserTransform(self.targetFrame)
def resetTargetPath(self):
for obj in om.getObjects():
if obj.getProperty('Name') == 'target frame desired':
om.removeFromObjectModel(obj)
for obj in om.getObjects():
if obj.getProperty('Name') == 'target frame desired path':
om.removeFromObjectModel(obj)
def computeNextTargetFrame(self):
assert self.targetAffordance
t = transformUtils.frameFromPositionAndRPY(self.nextPosition, [0, 0, 0])
self.faceTransformLocal = transformUtils.copyFrame(t) # copy required
t.Concatenate(self.targetFrame)
self.faceFrameDesired = vis.showFrame(t, 'target frame desired', parent=self.targetAffordance, visible=False, scale=0.2)
######### Higher Level Planning Functions ##################################################################
def computeNextRoomFrame(self):
assert self.targetAffordance
t = transformUtils.frameFromPositionAndRPY(self.nextPosition, [0, 0, 0])
self.faceTransformLocal = transformUtils.copyFrame(t) # copy required
t.Concatenate(self.targetFrame)
self.faceFrameDesired = vis.showFrame(t, 'target frame desired', parent=self.targetAffordance, visible=False, scale=0.2)
def planRoomReach(self):
# A single one shot gaze-constrained reach: place xyz at goal and align y-axis of hand with x-axis of goal
self.initConstraintSet()
self.addConstraintForTargetFrame(self.startFrame, 1)
self.planTrajectory()
def getRoomSweepFrames(self, rotateHandFrame=False):
topFrame = transformUtils.frameFromPositionAndRPY([0.65,0.0,0.8],[160,0,90])
yawFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,0,self.currentYawDegrees])
if rotateHandFrame:
fixHandFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,-90,0])
topFrame.PreMultiply()
topFrame.Concatenate( fixHandFrame )
topFrame.PostMultiply()
topFrame.Concatenate( yawFrame )
bottomFrame = transformUtils.frameFromPositionAndRPY([0.6,0.0,0.4],[210,0,90])
yawFrame = transformUtils.frameFromPositionAndRPY([0,0.0,0],[0,0,self.currentYawDegrees])
if rotateHandFrame:
bottomFrame.PreMultiply()
bottomFrame.Concatenate( fixHandFrame )
bottomFrame.PostMultiply()
bottomFrame.Concatenate( yawFrame )
if (self.fromTop):
self.startFrame = vis.showFrame(topFrame, 'frame start', visible=False, scale=0.1,parent=self.mapFolder)
self.endFrame = vis.showFrame(bottomFrame, 'frame end', visible=False, scale=0.1,parent=self.mapFolder)
else:
self.startFrame = vis.showFrame(bottomFrame, 'frame start', visible=False, scale=0.1,parent=self.mapFolder)
self.endFrame = vis.showFrame(topFrame, 'frame end', visible=False, scale=0.1,parent=self.mapFolder)
def planRoomSweep(self):
self.initConstraintSet()
faceFrameDesired = transformUtils.frameInterpolate(self.startFrame.transform , self.endFrame.transform, 0)
vis.showFrame(faceFrameDesired, 'frame 0', visible=True, scale=0.1,parent=self.mapFolder)
self.addConstraintForTargetFrame(faceFrameDesired, 0)
faceFrameDesired = transformUtils.frameInterpolate(self.startFrame.transform , self.endFrame.transform, 1.0/3.0)
vis.showFrame(faceFrameDesired, 'frame 1', visible=True, scale=0.1,parent=self.mapFolder)
self.addConstraintForTargetFrame(faceFrameDesired, 1)
faceFrameDesired = transformUtils.frameInterpolate(self.startFrame.transform , self.endFrame.transform, 2.0/3.0)
vis.showFrame(faceFrameDesired, 'frame 2', visible=True, scale=0.1,parent=self.mapFolder)
self.addConstraintForTargetFrame(faceFrameDesired, 2)
faceFrameDesired = transformUtils.frameInterpolate(self.startFrame.transform , self.endFrame.transform, 3.0/3.0)
vis.showFrame(faceFrameDesired, 'frame 3', visible=True, scale=0.1,parent=self.mapFolder)
self.addConstraintForTargetFrame(faceFrameDesired, 3)
#self.ikPlanner.ikServer.maxDegreesPerSecond = self.speedLow
self.planTrajectory()
#self.ikPlanner.ikServer.maxDegreesPerSecond = self.speedHigh
def moveRoomSweepOnwards(self):
self.currentYawDegrees = self.currentYawDegrees - 20
self.fromTop = not self.fromTop
def planTargetReach(self):
# A single one shot gaze-constrained reach: place xyz at goal and align y-axis of hand with x-axis of goal
worldToTargetFrame = vis.updateFrame(self.targetFrame, 'gaze goal', visible=False, scale=0.2, parent=om.getOrCreateContainer('affordances'))
self.initConstraintSet()
self.addConstraintForTargetFrame(worldToTargetFrame, 1)
self.planTrajectory()
######### Lower Level Planning Functions ##################################################################
def planTrajectory(self):
self.ikPlanner.ikServer.usePointwise = False
plan = self.constraintSet.runIkTraj()
self.addPlan(plan)
def initConstraintSet(self):
# create constraint set
startPose = self.getPlanningStartPose()
startPoseName = 'gaze_plan_start'
endPoseName = 'gaze_plan_end'
self.ikPlanner.addPose(startPose, startPoseName)
self.ikPlanner.addPose(startPose, endPoseName)
self.constraintSet = ikplanner.ConstraintSet(self.ikPlanner, [], startPoseName, endPoseName)
self.constraintSet.endPose = startPose
# add body constraints
bodyConstraints = self.ikPlanner.createMovingBodyConstraints(startPoseName, lockBase=self.lockBase, lockBack=self.lockBack, lockLeftArm=self.graspingHand=='right', lockRightArm=self.graspingHand=='left')
self.constraintSet.constraints.extend(bodyConstraints)
def addConstraintForTargetFrame(self,goalFrame, t):
if (self.targetSweepType is 'orientation'):
self.appendPositionOrientationConstraintForTargetFrame(goalFrame, t)
elif (self.targetSweepType is 'gaze'):
# align the palmGazeAxis axis (on the hand) with the vector 'targetAxis' from worldToTargetFrame?
palmGazeAxis = self.ikPlanner.getPalmToHandLink(self.graspingHand).TransformVector([0,1,0])
self.appendPositionGazeConstraintForTargetFrame(goalFrame, t, targetAxis=[0.0, 0.0, 1.0], bodyAxis=palmGazeAxis)
def appendPositionGazeConstraintForTargetFrame(self, goalFrame, t, targetAxis=[-1.0, 0.0, 0.0], bodyAxis=[-1.0, 0.0, 0.0]):
gazeConstraint = self.ikPlanner.createGazeGraspConstraint(self.graspingHand, goalFrame, self.graspToHandLinkFrame, self.coneThresholdDegrees , targetAxis, bodyAxis)
gazeConstraint.tspan = [t, t]
self.constraintSet.constraints.insert(0, gazeConstraint)
positionConstraint, _ = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand, goalFrame, self.graspToHandLinkFrame)
positionConstraint.tspan = [t, t]
self.constraintSet.constraints.append(positionConstraint)
def appendPositionOrientationConstraintForTargetFrame(self, goalFrame, t):
positionConstraint, orientationConstraint = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand, goalFrame, self.graspToHandLinkFrame)
positionConstraint.tspan = [t, t]
orientationConstraint.tspan = [t, t]
self.constraintSet.constraints.append(positionConstraint)
self.constraintSet.constraints.append(orientationConstraint)
### End Planning Functions ####################################################################
########## Glue Functions #####################################################################
def printAsync(self, s):
yield
print s
def optionalUserPrompt(self, message):
if not self.optionalUserPromptEnabled:
return
yield
result = raw_input(message)
if result != 'y':
raise Exception('user abort.')
def requiredUserPrompt(self, message):
if not self.requiredUserPromptEnabled:
return
yield
result = raw_input(message)
if result != 'y':
raise Exception('user abort.')
def delay(self, delayTimeInSeconds):
yield
t = SimpleTimer()
while t.elapsed() < delayTimeInSeconds:
yield
def getEstimatedRobotStatePose(self):
return self.sensorJointController.getPose('EST_ROBOT_STATE')
def getPlanningStartPose(self):
if self.planFromCurrentRobotState:
return self.getEstimatedRobotStatePose()
else:
if self.plans:
return robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
else:
return self.getEstimatedRobotStatePose()
def playSequenceNominal(self):
assert None not in self.plans
self.planPlaybackFunction(self.plans)
def commitManipPlan(self):
self.manipPlanner.commitManipPlan(self.plans[-1])
def waitForPlanExecution(self, plan):
planElapsedTime = planplayback.PlanPlayback.getPlanElapsedTime(plan)
return self.delay(planElapsedTime + 1.0)
def animateLastPlan(self):
plan = self.plans[-1]
if not self.visOnly:
self.commitManipPlan()
return self.waitForPlanExecution(plan)
######### Nominal Plans and Execution #################################################################
####### Module for an arm to sweep out a gaze-constrained trajectory to map an area:
# t.spawnTargetAffordance(), t.planTargetSweep()
def planSequenceTargetSweep(self):
self.graspingHand = 'left'
self.planFromCurrentRobotState = False
self.plans = []
self.graspToHandLinkFrame = self.ikPlanner.newGraspToHandFrame(self.graspingHand)
self.planTargetReach()
self.nextPosition =[0,0,0]
self.targetPath = []
self.resetTargetPath()
self.computeNextTargetFrame()
self.initConstraintSet()
self.targetPath.append(self.faceTransformLocal)
pointsPerSide = 3
deltaDistance = self.targetAffordance.params.get('length') / pointsPerSide # 5cm was good
for i in xrange(pointsPerSide*0,pointsPerSide*1):
self.nextPosition[0] += -deltaDistance
self.computeNextTargetFrame()
self.addConstraintForTargetFrame(self.faceFrameDesired, i+1)
self.targetPath.append(self.faceTransformLocal)
for i in xrange(pointsPerSide*1,pointsPerSide*2):
self.nextPosition[2] += deltaDistance
self.computeNextTargetFrame()
self.addConstraintForTargetFrame(self.faceFrameDesired, i+1)
self.targetPath.append(self.faceTransformLocal)
for i in xrange(pointsPerSide*2,pointsPerSide*3):
self.nextPosition[0] += deltaDistance
self.computeNextTargetFrame()
self.addConstraintForTargetFrame(self.faceFrameDesired, i+1)
self.targetPath.append(self.faceTransformLocal)
for i in xrange(pointsPerSide*3,pointsPerSide*4):
self.nextPosition[2] += -deltaDistance
self.computeNextTargetFrame()
self.addConstraintForTargetFrame(self.faceFrameDesired, i+1)
self.targetPath.append(self.faceTransformLocal)
self.drawTargetPath()
#self.ikPlanner.ikServer.maxDegreesPerSecond = self.speedLow
self.planTrajectory()
#self.ikPlanner.ikServer.maxDegreesPerSecond = self.speedHigh
# Module to sweep the kuka arm around in a sphere - for map building
def planSequenceRoomMap(self):
self.graspingHand = 'left'
self.targetSweepType = 'orientation'
self.graspToHandLinkFrame = self.ikPlanner.newGraspToHandFrame(self.graspingHand)
self.planFromCurrentRobotState = False
self.plans = []
self.currentYawDegrees = 60
self.ikPlanner.ikServer.maxDegreesPerSecond = 10
self.nextPosition =[0,0,0]
self.targetPath = []
self.resetTargetPath()
self.fromTop = True
self.mapFolder=om.getOrCreateContainer('room mapping')
om.collapse(self.mapFolder)
# taskQueue doesnt support a while loop:
#while (self.currentYawDegrees >= -90):
# self.getRoomSweepFrames()
# self.planRoomReach()# move to next start point
# self.planRoomSweep() # reach down/up
# self.currentYawDegrees = self.currentYawDegrees - 30
# self.fromTop = not self.fromTop
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
self.getRoomSweepFrames()
self.planRoomReach()# move to next start point
self.planRoomSweep() # reach down/up
self.moveRoomSweepOnwards()
def doneIndicator(self):
print "We are done here."
def setMaxDegreesPerSecond(self, maxDeg):
self.ikPlanner.defaultIkParameters.maxDegreesPerSecond = maxDeg
def autonomousRoomMapNew(self, side='left'):
taskQueue = AsyncTaskQueue()
lowSpeed = 5
highSpeed = 30
delayTime = 3 # TODO: for potential self.delay to wait for pointclouds to be registered
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'General', 'arm up pregrasp'))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p1_up'))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, lowSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p1_down', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p2_down', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, lowSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p2_up', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p3_up', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, lowSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p3_down', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p4_down', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, lowSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p4_up', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p5_up', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, lowSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'roomMapping', 'p5_down', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(functools.partial(self.setMaxDegreesPerSecond, highSpeed))
taskQueue.addTask(functools.partial(self.planPostureFromDatabase, 'General', 'arm up pregrasp', side=side))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(self.doneIndicator)
return taskQueue
def autonomousExecuteRoomMap(self):
self.graspingHand = 'left'
self.targetSweepType = 'orientation'
self.graspToHandLinkFrame = self.ikPlanner.newGraspToHandFrame(self.graspingHand)
self.planFromCurrentRobotState = True
self.visOnly = False
self.ikPlanner.ikServer.maxDegreesPerSecond = 3#5
self.currentYawDegrees = 60
self.fromTop = True
self.mapFolder=om.getOrCreateContainer('room mapping')
taskQueue = AsyncTaskQueue()
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
self.addTasksToQueueSweep(taskQueue)
taskQueue.addTask(self.printAsync('done!'))
taskQueue.addTask(self.doneIndicator)
return taskQueue
def addTasksToQueueSweep(self, taskQueue):
taskQueue.addTask(self.getRoomSweepFrames)
taskQueue.addTask(self.planRoomReach)
taskQueue.addTask(self.optionalUserPrompt('execute reach? y/n: '))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(self.planRoomSweep)
taskQueue.addTask(self.optionalUserPrompt('execute sweep? y/n: '))
taskQueue.addTask(self.animateLastPlan)
taskQueue.addTask(self.moveRoomSweepOnwards)
return taskQueue
|
edowson/director
|
src/python/ddapp/mappingdemo.py
|
Python
|
bsd-3-clause
| 24,062
|
[
"VTK"
] |
db4110b7c67127a30e7dc6c0b79ee7ade9a0044396b95b2c78baed284f689f49
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that encode the sequence of PSI module
calls for each of the *name* values of the energy(), optimize(),
response(), and frequency() function. *name* can be assumed lowercase by here.
"""
import os
import sys
import shutil
import subprocess
import warnings
import numpy as np
from qcelemental import constants
from psi4 import extras
from psi4 import core
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver import psifiles as psif
from psi4.driver.p4util.exceptions import ManagedMethodError, PastureRequiredError, ValidationError
#from psi4.driver.molutil import *
from psi4.driver.qcdb.basislist import corresponding_basis
# never import driver, wrappers, or aliases into this file
from .roa import run_roa
from . import proc_util
from . import empirical_dispersion
from . import dft
from . import mcscf
from . import response
from . import solvent
# ATTN NEW ADDITIONS!
# consult http://psicode.org/psi4manual/master/proc_py.html
def select_mp2(name, **kwargs):
"""Function selecting the algorithm for a MP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/dfmp2/detci/fnocc
# MP2_TYPE exists largely for py-side reasoning, so must manage it
# here rather than passing to c-side unprepared for validation
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference in ['RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'DFMP2']:
func = run_dfmp2
if module == 'DETCI':
core.print_out("""\nDETCI is ill-advised for method MP2 as it is available inefficiently as a """
"""byproduct of a CISD computation.\n DETCI ROHF MP2 will produce non-standard results.\n""")
if func is None:
raise ManagedMethodError(['select_mp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ/dfmp2
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc_gradient
elif module in ['', 'DFMP2']:
func = run_dfmp2_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_property(name, **kwargs):
"""Function selecting the algorithm for a MP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only dfmp2 for now
func = None
if reference == 'RHF':
if mtd_type == 'DF':
#if module == 'OCC':
# func = run_dfocc_property
if module in ['', 'DFMP2']:
func = run_dfmp2_property
#elif reference == 'UHF':
# if mtd_type == 'DF':
# if module in ['', 'OCC']:
# func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_mp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2(name, **kwargs):
"""Function selecting the algorithm for an OMP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2p5_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_property(name, **kwargs):
"""Function selecting the algorithm for an OMP3 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp3_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_property(name, **kwargs):
"""Function selecting the algorithm for an OLCCD property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_olccd_property', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3(name, **kwargs):
"""Function selecting the algorithm for a MP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc/detci
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISD computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp3_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3(name, **kwargs):
"""Function selecting the algorithm for an OMP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp3_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_mp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd(name, **kwargs):
"""Function selecting the algorithm for a LCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'OCC':
func = run_occ
elif module in ['', 'FNOCC']:
func = run_cepa
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_lccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd_gradient(name, **kwargs):
"""Function selecting the algorithm for a LCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_lccd_gradient', name, 'CC_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd(name, **kwargs):
"""Function selecting the algorithm for an OLCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_olccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_gradient(name, **kwargs):
"""Function selecting the algorithm for an OLCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_olccd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd(name, **kwargs):
"""Function selecting the algorithm for a CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference in ['UHF', 'ROHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t__gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only ccenergy
func = None
if reference in ['RHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_t__gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_at_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(AT) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_ccsd_at_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_cisd(name, **kwargs):
"""Function selecting the algorithm for a CISD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CI_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_cepa
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'DETCI']:
func = run_detci
if func is None:
raise ManagedMethodError(['select_cisd', name, 'CI_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp4(name, **kwargs):
"""Function selecting the algorithm for a MP4 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_fnocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISDT computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp4', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_adc2(name, **kwargs):
"""Function selecting the algorithm for ADC(2) excited state energy
call and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only adcc/adc
# TODO Actually one should do selection on a couple of other options here
# as well, e.g. adcc supports frozen-core and frozen-virtual,
# spin-specific states or spin-flip methods.
# But as far as I (mfherbst) know the BUILTIN ADC routine only supports
# singlet states and without freezing some core or some virtual orbitals.
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'ADCC' and extras.addons("adcc"):
func = run_adcc
elif module in ['', 'BUILTIN']:
func = run_adc
if reference == 'UHF':
if mtd_type == 'CONV':
if module in ['ADCC', ''] and extras.addons("adcc"):
func = run_adcc
# Note: ROHF is theoretically available in adcc, but are not fully tested
# ... so will be added later.
if func is None:
raise ManagedMethodError(['select_adc2', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def build_disp_functor(name, restricted, **kwargs):
if core.has_option_changed("SCF", "DFT_DISPERSION_PARAMETERS"):
modified_disp_params = core.get_option("SCF", "DFT_DISPERSION_PARAMETERS")
else:
modified_disp_params = None
# Figure out functional
superfunc, disp_type = dft.build_superfunctional(name, restricted)
if disp_type:
if isinstance(name, dict):
# user dft_functional={} spec - type for lookup, dict val for param defs,
# name & citation discarded so only param matches to existing defs will print labels
_disp_functor = empirical_dispersion.EmpiricalDispersion(
name_hint='',
level_hint=disp_type["type"],
param_tweaks=disp_type["params"],
engine=kwargs.get('engine', None))
else:
# dft/*functionals.py spec - name & type for lookup, option val for param tweaks
_disp_functor = empirical_dispersion.EmpiricalDispersion(
name_hint=superfunc.name(),
level_hint=disp_type["type"],
param_tweaks=modified_disp_params,
engine=kwargs.get('engine', None))
# [Aug 2018] there once was a breed of `disp_type` that quacked
# like a list rather than the more common dict handled above. if
# ever again sighted, make an issue so this code can accommodate.
_disp_functor.print_out()
return superfunc, _disp_functor
else:
return superfunc, None
def scf_wavefunction_factory(name, ref_wfn, reference, **kwargs):
"""Builds the correct (R/U/RO/CU HF/KS) wavefunction from the
provided information, sets relevant auxiliary basis sets on it,
and prepares any empirical dispersion.
"""
# Figure out functional and dispersion
superfunc, _disp_functor = build_disp_functor(name, restricted=(reference in ["RKS", "RHF"]), **kwargs)
# Build the wavefunction
core.prepare_options_for_module("SCF")
if reference in ["RHF", "RKS"]:
wfn = core.RHF(ref_wfn, superfunc)
elif reference == "ROHF":
wfn = core.ROHF(ref_wfn, superfunc)
elif reference in ["UHF", "UKS"]:
wfn = core.UHF(ref_wfn, superfunc)
elif reference == "CUHF":
wfn = core.CUHF(ref_wfn, superfunc)
else:
raise ValidationError("SCF: Unknown reference (%s) when building the Wavefunction." % reference)
if _disp_functor and _disp_functor.engine != 'nl':
wfn._disp_functor = _disp_functor
# Set the DF basis sets
if (("DF" in core.get_global_option("SCF_TYPE")) or
(core.get_option("SCF", "DF_SCF_GUESS") and (core.get_global_option("SCF_TYPE") == "DIRECT"))):
aux_basis = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("DF_BASIS_SCF", aux_basis)
else:
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
# Set the relativistic basis sets
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
decon_basis = core.BasisSet.build(wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("BASIS_RELATIVISTIC", decon_basis)
# Set the multitude of SAD basis sets
if (core.get_option("SCF", "GUESS") in ["SAD", "SADNO", "HUCKEL"]):
sad_basis_list = core.BasisSet.build(wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=wfn.basisset().has_puream(),
return_atomlist=True)
wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
# We need to force this to spherical regardless of any user or other demands.
optstash = p4util.OptionsState(['PUREAM'])
core.set_global_option('PUREAM', True)
sad_fitting_list = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=True,
return_atomlist=True)
wfn.set_sad_fitting_basissets(sad_fitting_list)
optstash.restore()
# Deal with the EXTERN issues
if hasattr(core, "EXTERN"):
wfn.set_external_potential(core.EXTERN)
return wfn
def scf_helper(name, post_scf=True, **kwargs):
"""Function serving as helper to SCF, choosing whether to cast
up or just run SCF with a standard guess. This preserves
previous SCF options set by other procedures (e.g., SAPT
output file types for SCF).
"""
if post_scf:
name = "scf"
optstash = p4util.OptionsState(
['PUREAM'],
['BASIS'],
['QMEFP'],
['INTS_TOLERANCE'],
['DF_BASIS_SCF'],
['SCF', 'GUESS'],
['SCF', 'DF_INTS_IO'],
['SCF_TYPE'], # Hack: scope gets changed internally with the Andy trick
)
optstash2 = p4util.OptionsState(
['BASIS'],
['DF_BASIS_SCF'],
['SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
)
# Make sure we grab the correctly scoped integral threshold for SCF
core.set_global_option('INTS_TOLERANCE', core.get_option('SCF', 'INTS_TOLERANCE'))
# Grab a few kwargs
use_c1 = kwargs.get('use_c1', False)
scf_molecule = kwargs.get('molecule', core.get_active_molecule())
read_orbitals = core.get_option('SCF', 'GUESS') == "READ"
do_timer = kwargs.pop("do_timer", True)
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is not None:
raise ValidationError("Cannot seed an SCF calculation with a reference wavefunction ('ref_wfn' kwarg).")
# PCM needs to be run w/o symmetry
if core.get_option("SCF", "PCM"):
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.update_geometry()
scf_molecule = c1_molecule
core.print_out(""" PCM does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
# PE needs to use exactly input orientation to correspond to potfile
if core.get_option("SCF", "PE"):
c1_molecule = scf_molecule.clone()
if getattr(scf_molecule, "_initial_cartesian", None) is not None:
c1_molecule._initial_cartesian = scf_molecule._initial_cartesian.clone()
c1_molecule.set_geometry(c1_molecule._initial_cartesian)
c1_molecule.reset_point_group("c1")
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
else:
raise ValidationError("Set no_com/no_reorient/symmetry c1 by hand for PE on non-Cartesian molecules.")
scf_molecule = c1_molecule
core.print_out(""" PE does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
core.print_out(""" PE geometry must align with POTFILE keyword: """
"""resetting coordinates with fixed origin and orientation.\n""")
# SCF Banner data
banner = kwargs.pop('banner', None)
bannername = name
# Did we pass in a DFT functional?
dft_func = kwargs.pop('dft_functional', None)
if dft_func is not None:
if name.lower() != "scf":
raise ValidationError("dft_functional was supplied to SCF, but method name was not SCF ('%s')" % name)
name = dft_func
bannername = name
if isinstance(name, dict):
bannername = name.get("name", "custom functional")
# Setup the timer
if do_timer:
core.tstart()
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('SCF', 'SOSCF'):
proc_util.check_non_symmetric_jk_density("Second-order SCF")
# sort out cast_up settings. no need to stash these since only read, never reset
cast = False
if core.has_option_changed('SCF', 'BASIS_GUESS'):
cast = core.get_option('SCF', 'BASIS_GUESS')
if p4util.yes.match(str(cast)):
cast = True
elif p4util.no.match(str(cast)):
cast = False
if cast:
# A user can set "BASIS_GUESS" to True and we default to 3-21G
if cast is True:
guessbasis = corresponding_basis(core.get_global_option('BASIS'), 'GUESS')[0]
if guessbasis is None:
guessbasis = '3-21G' # guess of last resort
else:
guessbasis = cast
core.set_global_option('BASIS', guessbasis)
castdf = 'DF' in core.get_global_option('SCF_TYPE')
if core.has_option_changed('SCF', 'DF_BASIS_GUESS'):
castdf = core.get_option('SCF', 'DF_BASIS_GUESS')
if p4util.yes.match(str(castdf)):
castdf = True
elif p4util.no.match(str(castdf)):
castdf = False
if castdf:
core.set_global_option('SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DF_INTS_IO', 'none')
# Figure out the fitting basis set
if castdf is True:
core.set_global_option('DF_BASIS_SCF', '')
elif isinstance(castdf, str):
core.set_global_option('DF_BASIS_SCF', castdf)
else:
raise ValidationError("Unexpected castdf option (%s)." % castdf)
# Switch to the guess namespace
namespace = core.IO.get_default_namespace()
guesspace = namespace + '.guess'
if namespace == '':
guesspace = 'guess'
core.IO.set_default_namespace(guesspace)
# Print some info about the guess
core.print_out('\n')
p4util.banner('Guess SCF, %s Basis' % (guessbasis))
core.print_out('\n')
# sort out broken_symmetry settings.
if 'brokensymmetry' in kwargs:
multp = scf_molecule.multiplicity()
if multp != 1:
raise ValidationError('Broken symmetry is only for singlets.')
if core.get_option('SCF', 'REFERENCE') not in ['UHF', 'UKS']:
raise ValidationError("""You must specify 'set reference uhf' to use broken symmetry.""")
do_broken = True
else:
do_broken = False
if cast and read_orbitals:
raise ValidationError("""Detected options to both cast and read orbitals""")
if cast and do_broken:
raise ValidationError("""Detected options to both cast and perform a broken symmetry computation""")
if (core.get_option('SCF', 'STABILITY_ANALYSIS') == 'FOLLOW') and (core.get_option('SCF', 'REFERENCE') != 'UHF'):
raise ValidationError("""Stability analysis root following is only available for UHF""")
# broken set-up
if do_broken:
raise ValidationError("""Broken symmetry computations are not currently enabled.""")
scf_molecule.set_multiplicity(3)
core.print_out('\n')
p4util.banner(' Computing high-spin triplet guess ')
core.print_out('\n')
# If GUESS is auto guess what it should be
if core.get_option('SCF', 'GUESS') == "AUTO":
if (scf_molecule.natom() > 1):
core.set_local_option('SCF', 'GUESS', 'SAD')
else:
core.set_local_option('SCF', 'GUESS', 'CORE')
if core.get_global_option('BASIS') in ['', '(AUTO)']:
if name in ['hf3c', 'hf-3c']:
core.set_global_option('BASIS', 'minix')
elif name in ['pbeh3c', 'pbeh-3c']:
core.set_global_option('BASIS', 'def2-msvp')
# the FIRST scf call
if cast or do_broken:
# Cast or broken are special cases
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
core.print_out("\n ---------------------------------------------------------\n");
if banner:
core.print_out(" " + banner.center(58));
if cast:
core.print_out(" " + "SCF Castup computation".center(58));
ref_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(ref_wfn)
# Compute dftd3
if hasattr(ref_wfn, "_disp_functor"):
disp_energy = ref_wfn._disp_functor.compute_energy(ref_wfn.molecule())
ref_wfn.set_variable("-D Energy", disp_energy)
ref_wfn.compute_energy()
# broken clean-up
if do_broken:
raise ValidationError("Broken Symmetry computations are temporarily disabled.")
scf_molecule.set_multiplicity(1)
core.set_local_option('SCF', 'GUESS', 'READ')
core.print_out('\n')
p4util.banner(' Computing broken symmetry solution from high-spin triplet guess ')
core.print_out('\n')
# cast clean-up
if cast:
# Move files to proper namespace
core.IO.change_file_namespace(180, guesspace, namespace)
core.IO.set_default_namespace(namespace)
optstash2.restore()
# Print the banner for the standard operation
core.print_out('\n')
p4util.banner(bannername.upper())
core.print_out('\n')
# the SECOND scf call
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
if banner:
core.print_out("\n ---------------------------------------------------------\n");
core.print_out(" " + banner.center(58));
scf_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(scf_wfn)
# The wfn from_file routine adds the npy suffix if needed, but we add it here so that
# we can use os.path.isfile to query whether the file exists before attempting to read
read_filename = scf_wfn.get_scratch_filename(180) + '.npy'
if (core.get_option('SCF', 'GUESS') == 'READ') and os.path.isfile(read_filename):
old_wfn = core.Wavefunction.from_file(read_filename)
Ca_occ = old_wfn.Ca_subset("SO", "OCC")
Cb_occ = old_wfn.Cb_subset("SO", "OCC")
if old_wfn.molecule().schoenflies_symbol() != scf_molecule.schoenflies_symbol():
raise ValidationError("Cannot compute projection of different symmetries.")
if old_wfn.basisset().name() == scf_wfn.basisset().name():
core.print_out(" Reading orbitals from file 180, no projection.\n\n")
scf_wfn.guess_Ca(Ca_occ)
scf_wfn.guess_Cb(Cb_occ)
else:
core.print_out(" Reading orbitals from file 180, projecting to new basis.\n\n")
core.print_out(" Computing basis projection from %s to %s\n\n" % (old_wfn.basisset().name(), scf_wfn.basisset().name()))
pCa = scf_wfn.basis_projection(Ca_occ, old_wfn.nalphapi(), old_wfn.basisset(), scf_wfn.basisset())
pCb = scf_wfn.basis_projection(Cb_occ, old_wfn.nbetapi(), old_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Strip off headers to only get R, RO, U, CU
old_ref = old_wfn.name().replace("KS", "").replace("HF", "")
new_ref = scf_wfn.name().replace("KS", "").replace("HF", "")
if old_ref != new_ref:
scf_wfn.reset_occ_ = True
elif (core.get_option('SCF', 'GUESS') == 'READ') and not os.path.isfile(read_filename):
core.print_out(" Unable to find file 180, defaulting to SAD guess.\n")
core.set_local_option('SCF', 'GUESS', 'SAD')
sad_basis_list = core.BasisSet.build(scf_wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
sad_fitting_list = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_fitting_basissets(sad_fitting_list)
if cast:
core.print_out("\n Computing basis projection from %s to %s\n\n" % (ref_wfn.basisset().name(), base_wfn.basisset().name()))
if ref_wfn.basisset().n_ecp_core() != base_wfn.basisset().n_ecp_core():
raise ValidationError("Projecting from basis ({}) with ({}) ECP electrons to basis ({}) with ({}) ECP electrons will be a disaster. Select a compatible cast-up basis with `set guess_basis YOUR_BASIS_HERE`.".format(
ref_wfn.basisset().name(), ref_wfn.basisset().n_ecp_core(), base_wfn.basisset().name(), base_wfn.basisset().n_ecp_core()))
pCa = ref_wfn.basis_projection(ref_wfn.Ca(), ref_wfn.nalphapi(), ref_wfn.basisset(), scf_wfn.basisset())
pCb = ref_wfn.basis_projection(ref_wfn.Cb(), ref_wfn.nbetapi(), ref_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Print basis set info
if core.get_option("SCF", "PRINT_BASIS"):
scf_wfn.basisset().print_detail_out()
# Compute dftd3
if hasattr(scf_wfn, "_disp_functor"):
disp_energy = scf_wfn._disp_functor.compute_energy(scf_wfn.molecule(), scf_wfn)
scf_wfn.set_variable("-D Energy", disp_energy)
# PCM preparation
if core.get_option('SCF', 'PCM'):
if core.get_option('SCF', 'PE'):
raise ValidationError("""Error: 3-layer QM/MM/PCM not implemented.\n""")
pcmsolver_parsed_fname = core.get_local_option('PCM', 'PCMSOLVER_PARSED_FNAME')
pcm_print_level = core.get_option('SCF', "PRINT")
scf_wfn.set_PCM(core.PCM(pcmsolver_parsed_fname, pcm_print_level, scf_wfn.basisset()))
# PE preparation
if core.get_option('SCF', 'PE'):
if not solvent._have_pe:
raise ModuleNotFoundError('Python module cppe not found. Solve by installing it: `conda install -c psi4 pycppe`')
# PE needs information about molecule and basis set
pol_embed_options = solvent.pol_embed.get_pe_options()
core.print_out(f""" Using potential file
{pol_embed_options["potfile"]}
for Polarizable Embedding calculation.\n""")
scf_wfn.pe_state = solvent.pol_embed.CppeInterface(
molecule=scf_molecule, options=pol_embed_options,
basisset=scf_wfn.basisset()
)
e_scf = scf_wfn.compute_energy()
for obj in [core, scf_wfn]:
for pv in ["SCF TOTAL ENERGY", "CURRENT ENERGY", "CURRENT REFERENCE ENERGY"]:
obj.set_variable(pv, e_scf)
# We always would like to print a little property information
if kwargs.get('scf_do_properties', True):
oeprop = core.OEProp(scf_wfn)
oeprop.set_title("SCF")
# Figure our properties, if empty do dipole
props = [x.upper() for x in core.get_option("SCF", "SCF_PROPERTIES")]
if "DIPOLE" not in props:
props.append("DIPOLE")
proc_util.oeprop_validator(props)
for x in props:
oeprop.add(x)
# Compute properties
oeprop.compute()
for obj in [core, scf_wfn]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# component qcvars can be retired at v1.5
for xyz in 'XYZ':
obj.set_variable('CURRENT DIPOLE ' + xyz, obj.variable('SCF DIPOLE ' + xyz))
obj.set_variable('CURRENT DIPOLE', obj.variable("SCF DIPOLE"))
# Write out MO's
if core.get_option("SCF", "PRINT_MOS"):
mowriter = core.MOWriter(scf_wfn)
mowriter.write()
# Write out a molden file
if core.get_option("SCF", "MOLDEN_WRITE"):
filename = core.get_writer_file_prefix(scf_molecule.name()) + ".molden"
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
occa = scf_wfn.occupation_a()
occb = scf_wfn.occupation_a()
mw = core.MoldenWriter(scf_wfn)
mw.write(filename, scf_wfn.Ca(), scf_wfn.Cb(), scf_wfn.epsilon_a(),
scf_wfn.epsilon_b(), scf_wfn.occupation_a(),
scf_wfn.occupation_b(), dovirt)
# Write out orbitals and basis; Can be disabled, e.g., for findif displacements
if kwargs.get('write_orbitals', True):
write_filename = scf_wfn.get_scratch_filename(180)
scf_wfn.to_file(write_filename)
extras.register_numpy_file(write_filename)
if do_timer:
core.tstop()
optstash.restore()
if (not use_c1) or (scf_molecule.schoenflies_symbol() == 'c1'):
return scf_wfn
else:
# C1 copy quietly
c1_optstash = p4util.OptionsState(['PRINT'])
core.set_global_option("PRINT", 0)
# If we force c1 copy the active molecule
scf_molecule.update_geometry()
core.print_out("""\n A requested method does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n\n""")
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
c1_basis = core.BasisSet.build(c1_molecule, "ORBITAL", core.get_global_option('BASIS'), quiet=True)
tmp = scf_wfn.c1_deep_copy(c1_basis)
c1_jkbasis = core.BasisSet.build(c1_molecule, "DF_BASIS_SCF",
core.get_global_option("DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'), quiet=True)
tmp.set_basisset("DF_BASIS_SCF", c1_jkbasis)
c1_optstash.restore()
return tmp
def run_dct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density cumulant theory calculation.
"""
if (core.get_global_option('FREEZE_CORE') == 'TRUE'):
raise ValidationError('Frozen core is not available for DCT.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
if (core.get_global_option("DCT_TYPE") == "DF"):
core.print_out(" Constructing Basis Sets for DCT...\n\n")
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_DCT",
core.get_global_option("DF_BASIS_DCT"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_DCT", aux_basis)
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
dct_wfn = core.dct(ref_wfn)
else:
# Ensure IWL files have been written for non DF-DCT
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
dct_wfn = core.dct(ref_wfn)
return dct_wfn
def run_dct_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
DCT gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'])
core.set_global_option('DERTYPE', 'FIRST')
dct_wfn = run_dct(name, **kwargs)
derivobj = core.Deriv(dct_wfn)
derivobj.set_tpdm_presorted(True)
grad = derivobj.compute()
dct_wfn.set_gradient(grad)
optstash.restore()
return dct_wfn
def run_dfocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted or Cholesky-decomposed
(non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'DO_SCS'],
['DFOCC', 'DO_SOS'],
['DFOCC', 'READ_SCF_3INDEX'],
['DFOCC', 'CHOLESKY'],
['DFOCC', 'CC_LAMBDA'])
def set_cholesky_from(corl_type):
if corl_type == 'DF':
core.set_local_option('DFOCC', 'CHOLESKY', 'FALSE')
proc_util.check_disk_df(name.upper(), optstash)
elif corl_type == 'CD':
core.set_local_option('DFOCC', 'CHOLESKY', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
if core.get_global_option('SCF_TYPE') != 'CD':
core.set_local_option('DFOCC', 'READ_SCF_3INDEX', 'FALSE')
else:
raise ValidationError(f"""Invalid type '{corl_type}' for DFOCC""")
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
corl_type = core.get_global_option('MP2_TYPE')
elif name in ['mp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['mp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd(at)':
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(AT)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'dfocc':
pass
else:
raise ValidationError('Unidentified method %s' % (name))
set_cholesky_from(corl_type)
# conventional vs. optimized orbitals
if name in ['mp2', 'mp2.5', 'mp3', 'lccd',
'ccd', 'ccsd', 'ccsd(t)', 'ccsd(at)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if not core.get_local_option("DFOCC", "CHOLESKY"):
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
if name in ['mp2', 'omp2', 'mp2.5', 'mp3', 'lccd',]:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_dfocc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['REFERENCE'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'CC_LAMBDA'],
['GLOBALS', 'DERTYPE'])
proc_util.check_disk_df(name.upper(), optstash)
if core.get_global_option('SCF_TYPE') != 'DISK_DF':
raise ValidationError('DFOCC gradients need DF-SCF reference.')
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
elif name in ['mp2.5', 'omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
elif name in ['mp3', 'omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
elif name in ['ccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
elif name in ['ccsd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
elif name in ['ccsd(t)']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
else:
raise ValidationError('Unidentified method %s' % (name))
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccd', 'ccsd', 'ccsd(t)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_global_option('DERTYPE', 'FIRST')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
derivobj = core.Deriv(dfocc_wfn)
derivobj.compute_df("DF_BASIS_SCF", "DF_BASIS_CC")
dfocc_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", dfocc_wfn.gradient())
# Shove variables into global space
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccsd', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_dfocc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'OEPROP'])
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
elif name in ['olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
else:
raise ValidationError('Unidentified method ' % (name))
proc_util.check_disk_df(name.upper(), optstash)
if name in ['mp2']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp3', 'omp2.5', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'OEPROP', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
# TODO: Make other methods in DFOCC update all variables, then add them to the list. Adding now, risks setting outdated information.
if name in ['mp2', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_qchf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an density-fitted orbital-optimized MP2 computation
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DF_BASIS_SCF'],
['DIE_IF_NOT_CONVERGED'],
['MAXITER'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'QCHF'],
['DFOCC', 'E_CONVERGENCE'])
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'QCHF')
core.set_local_option('DFOCC', 'QCHF', 'TRUE')
core.set_local_option('DFOCC', 'E_CONVERGENCE', 8)
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
core.set_local_option('SCF', 'DIE_IF_NOT_CONVERGED', 'FALSE')
core.set_local_option('SCF', 'MAXITER', 1)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" QCHF does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
return dfocc_wfn
def run_occ(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
# Stash these options so we can reload them at computation end.
optstash = p4util.OptionsState(
['OCC', 'SPIN_SCALE_TYPE'],
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'])
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'scs(n)-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSN')
elif name == 'scs-mp2-vdw':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSVDW')
elif name == 'sos-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'sos-pi-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOSPI')
elif name == 'custom-scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'custom-scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
else:
raise ValidationError("""Invalid method %s""" % name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_occ_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
optstash = p4util.OptionsState(
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'],
['OCC', 'DO_SCS'],
['OCC', 'DO_SOS'],
['GLOBALS', 'DERTYPE'])
if core.get_global_option('SCF_TYPE') in ['CD', 'DF', 'MEM_DF', 'DISK_DF']:
raise ValidationError('OCC gradients need conventional SCF reference.')
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'conv-omp2']:
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
else:
raise ValidationError("""Invalid method %s""" % name)
core.set_global_option('DERTYPE', 'FIRST')
# locking out SCS through explicit keyword setting
# * so that current energy must match call
# * since grads not avail for scs
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
derivobj = core.Deriv(occ_wfn)
grad = derivobj.compute()
occ_wfn.set_gradient(grad)
occ_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a self-consistent-field theory (HF & DFT) calculation.
"""
optstash_mp2 = p4util.OptionsState(
['DF_BASIS_MP2'],
['DFMP2', 'MP2_OS_SCALE'],
['DFMP2', 'MP2_SS_SCALE'])
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash_scf = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# See if we're doing TDSCF after, keep JK if so
if sum(core.get_option("SCF", "TDSCF_STATES")) > 0:
core.set_local_option("SCF", "SAVE_JK", True)
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
scf_wfn = scf_helper(name, post_scf=False, **kwargs)
returnvalue = scf_wfn.energy()
ssuper = scf_wfn.functional()
if ssuper.is_c_hybrid():
core.tstart()
aux_basis = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'),
puream=-1)
scf_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
if ssuper.is_c_scs_hybrid():
core.set_local_option('DFMP2', 'MP2_OS_SCALE', ssuper.c_os_alpha())
core.set_local_option('DFMP2', 'MP2_SS_SCALE', ssuper.c_ss_alpha())
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = dfmp2_wfn.variable('CUSTOM SCS-MP2 CORRELATION ENERGY')
else:
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = ssuper.c_alpha() * dfmp2_wfn.variable('MP2 CORRELATION ENERGY')
# remove misleading MP2 psivars computed with DFT, not HF, reference
for var in dfmp2_wfn.variables():
if var.startswith('MP2 ') and ssuper.name() not in ['MP2D']:
scf_wfn.del_variable(var)
scf_wfn.set_variable('DOUBLE-HYBRID CORRECTION ENERGY', vdh)
scf_wfn.set_variable('{} DOUBLE-HYBRID CORRECTION ENERGY'.format(ssuper.name()), vdh)
returnvalue += vdh
scf_wfn.set_variable('DFT TOTAL ENERGY', returnvalue)
for pv, pvv in scf_wfn.variables().items():
if pv.endswith('DISPERSION CORRECTION ENERGY') and pv.startswith(ssuper.name()):
fctl_plus_disp_name = pv.split()[0]
scf_wfn.set_variable(fctl_plus_disp_name + ' TOTAL ENERGY', returnvalue)
break
else:
scf_wfn.set_variable('{} TOTAL ENERGY'.format(ssuper.name()), returnvalue)
scf_wfn.set_variable('CURRENT ENERGY', returnvalue)
scf_wfn.set_energy(returnvalue)
core.print_out('\n\n')
core.print_out(' %s Energy Summary\n' % (name.upper()))
core.print_out(' ' + '-' * (15 + len(name)) + '\n')
core.print_out(' DFT Reference Energy = %22.16lf\n' % (returnvalue - vdh))
core.print_out(' Scaled MP2 Correlation = %22.16lf\n' % (vdh))
core.print_out(' @Final double-hybrid DFT total energy = %22.16lf\n\n' % (returnvalue))
core.tstop()
if ssuper.name() == 'MP2D':
for pv, pvv in dfmp2_wfn.variables().items():
scf_wfn.set_variable(pv, pvv)
# Conversely, remove DFT qcvars from MP2D
for var in scf_wfn.variables():
if 'DFT ' in var or 'DOUBLE-HYBRID ' in var:
scf_wfn.del_variable(var)
# DFT groups dispersion with SCF. Reshuffle so dispersion with MP2 for MP2D.
for pv in ['SCF TOTAL ENERGY', 'SCF ITERATION ENERGY', 'MP2 TOTAL ENERGY']:
scf_wfn.set_variable(pv, scf_wfn.variable(pv) - scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D CORRELATION ENERGY', scf_wfn.variable('MP2 CORRELATION ENERGY') + scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D TOTAL ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY') + scf_wfn.variable('HF TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT ENERGY', scf_wfn.variable('MP2D TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT CORRELATION ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY'))
scf_wfn.set_variable('CURRENT REFERENCE ENERGY', scf_wfn.variable('SCF TOTAL ENERGY'))
# Shove variables into global space
for k, v in scf_wfn.variables().items():
core.set_variable(k, v)
optstash_scf.restore()
optstash_mp2.restore()
return scf_wfn
def run_scf_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SCF gradient calculation.
"""
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
if core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF']:
ref_wfn.semicanonicalize()
if hasattr(ref_wfn, "_disp_functor"):
disp_grad = ref_wfn._disp_functor.compute_gradient(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Gradient", disp_grad)
grad = core.scfgrad(ref_wfn)
if ref_wfn.basisset().has_ECP():
core.print_out("\n\n ==> Adding ECP gradient terms (computed numerically) <==\n")
# Build a map of atom->ECP number
old_print = ref_wfn.get_print()
ref_wfn.set_print(0)
delta = 0.0001
natom = ref_wfn.molecule().natom()
mints = core.MintsHelper(ref_wfn)
ecpgradmat = core.Matrix("ECP Gradient", natom, 3)
ecpgradmat.zero()
ecpgrad = np.asarray(ecpgradmat)
Dmat = ref_wfn.Da_subset("AO")
Dmat.add(ref_wfn.Db_subset("AO"))
def displaced_energy(atom, displacement):
mints.basisset().move_atom(atom, displacement)
E = Dmat.vector_dot(mints.ao_ecp())
mints.basisset().move_atom(atom, -1*displacement)
return E
for atom in range(natom):
for xyz in range(3):
transvec = core.Vector3(0.0)
transvec[xyz] += delta
# +1 displacement
Ep1 = displaced_energy(atom, 1*transvec)
# -1 displacement
Em1 = displaced_energy(atom, -1*transvec)
# +2 displacement
Ep2 = displaced_energy(atom, 2*transvec)
# -2 displacement
Em2 = displaced_energy(atom, -2*transvec)
# Evaluate
ecpgrad[atom, xyz] = (Em2 + 8*Ep1 - 8*Em1 - Ep2) / (12*delta)
ecpgradmat.symmetrize_gradient(ref_wfn.molecule())
ecpgradmat.print_atom_vector()
grad.add(ecpgradmat)
grad.print_atom_vector()
ref_wfn.set_print(old_print)
ref_wfn.set_gradient(grad)
ref_wfn.set_variable("SCF TOTAL GRADIENT", grad)
if ref_wfn.functional().needs_xc():
ref_wfn.set_variable("DFT TOTAL GRADIENT", grad) # overwritten later for DH -- TODO when DH gradients
else:
ref_wfn.set_variable("HF TOTAL GRADIENT", grad)
# Shove variables into global space
for k, v in ref_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ref_wfn
def run_scf_hessian(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an SCF hessian calculation.
"""
optstash = proc_util.scf_set_reference_local(name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
badref = core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF', 'UKS']
badint = core.get_global_option('SCF_TYPE') in [ 'CD', 'OUT_OF_CORE']
if badref or badint:
raise ValidationError("Only RHF/UHF Hessians are currently implemented. SCF_TYPE either CD or OUT_OF_CORE not supported")
if hasattr(ref_wfn, "_disp_functor"):
disp_hess = ref_wfn._disp_functor.compute_hessian(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Hessian", disp_hess)
H = core.scfhess(ref_wfn)
ref_wfn.set_hessian(H)
# Clearly, add some logic when the reach of this fn expands
ref_wfn.set_variable('HF TOTAL HESSIAN', H)
optstash.restore()
return ref_wfn
def run_mcscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a multiconfigurational self-consistent-field calculation.
"""
# Make sure the molecule the user provided is the active one
mcscf_molecule = kwargs.get('molecule', core.get_active_molecule())
mcscf_molecule.update_geometry()
if 'ref_wfn' in kwargs:
raise ValidationError("It is not possible to pass run_mcscf a reference wavefunction")
new_wfn = core.Wavefunction.build(mcscf_molecule, core.get_global_option('BASIS'))
return core.mcscf(new_wfn)
def run_dfmp2_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 gradient calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if "DF" not in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 gradients need DF-SCF reference.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if ref_wfn.basisset().has_ECP():
raise ValidationError('DF-MP2 gradients with an ECP are not yet available. Use dertype=0 to select numerical gradients.')
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
dfmp2_wfn.set_gradient(grad)
# Shove variables into global space
dfmp2_wfn.set_variable('MP2 TOTAL GRADIENT', grad)
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfmp2d_gradient(name, **kwargs):
"""Encode MP2-D method."""
dfmp2_wfn = run_dfmp2_gradient('mp2', **kwargs)
_, _disp_functor = build_disp_functor('MP2D', restricted=True)
disp_grad = _disp_functor.compute_gradient(dfmp2_wfn.molecule(), dfmp2_wfn)
dfmp2_wfn.gradient().add(disp_grad)
dfmp2_wfn.set_variable('MP2D CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY') + dfmp2_wfn.variable('DISPERSION CORRECTION ENERGY'))
dfmp2_wfn.set_variable('MP2D TOTAL ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY') + dfmp2_wfn.variable('HF TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2D TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
return dfmp2_wfn
def run_ccenergy(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD, CC2, and CC3 calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD')
core.set_local_option('CCSORT', 'WFN', 'CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD')
core.set_local_option('CCENERGY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_T')
core.set_local_option('CCSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_T')
elif name == 'ccsd(at)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_AT')
core.set_local_option('CCSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_AT')
core.set_local_option('CCHBAR', 'WFN', 'CCSD_AT')
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_AT')
elif name == 'cc2':
core.set_local_option('TRANSQT2', 'WFN', 'CC2')
core.set_local_option('CCSORT', 'WFN', 'CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'CC2')
core.set_local_option('CCENERGY', 'WFN', 'CC2')
elif name == 'cc3':
core.set_local_option('TRANSQT2', 'WFN', 'CC3')
core.set_local_option('CCSORT', 'WFN', 'CC3')
core.set_local_option('CCTRANSORT', 'WFN', 'CC3')
core.set_local_option('CCENERGY', 'WFN', 'CC3')
elif name == 'eom-cc2':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
elif name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
# Call a plain energy('ccenergy') and have full control over options, incl. wfn
elif name == 'ccenergy':
pass
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option("CC_TYPE") == "DF":
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
# Obtain semicanonical orbitals
if (core.get_option('SCF', 'REFERENCE') == 'ROHF') and \
((name in ['ccsd(t)', 'ccsd(at)', 'cc2', 'cc3', 'eom-cc2', 'eom-cc3']) or
core.get_option('CCTRANSORT', 'SEMICANONICAL')):
ref_wfn.semicanonicalize()
if core.get_global_option('RUN_CCTRANSORT'):
core.cctransort(ref_wfn)
else:
try:
from psi4.driver.pasture import addins
addins.ccsort_transqt2(ref_wfn)
except:
raise PastureRequiredError("RUN_CCTRANSORT")
ccwfn = core.ccenergy(ref_wfn)
if core.get_global_option('PE'):
ccwfn.pe_state = ref_wfn.pe_state
if name == 'ccsd(at)':
core.cchbar(ref_wfn)
core.cclambda(ref_wfn)
optstash.restore()
return ccwfn
def run_ccenergy_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD and CCSD(T) gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'],
['CCLAMBDA', 'WFN'],
['CCDENSITY', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if core.get_global_option('FREEZE_CORE') == 'TRUE':
raise ValidationError('Frozen core is not available for the CC gradients.')
ccwfn = run_ccenergy(name, **kwargs)
if name == 'cc2':
core.set_local_option('CCHBAR', 'WFN', 'CC2')
core.set_local_option('CCLAMBDA', 'WFN', 'CC2')
core.set_local_option('CCDENSITY', 'WFN', 'CC2')
if name == 'ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_T')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD_T')
core.cchbar(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
derivobj = core.Deriv(ccwfn)
grad = derivobj.compute()
del derivobj
ccwfn.set_gradient(grad)
ccwfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable("CURRENT GRADIENT", grad)
optstash.restore()
return ccwfn
def run_bccd(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a Brueckner CCD calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'bccd':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD')
core.set_local_option('CCSORT', 'WFN', 'BCCD')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD')
core.set_local_option('CCENERGY', 'WFN', 'BCCD')
elif name == 'bccd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD_T')
core.set_local_option('CCSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCENERGY', 'WFN', 'BCCD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCTRIPLES', 'WFN', 'BCCD_T')
else:
raise ValidationError("proc.py:run_bccd name %s not recognized" % name)
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Needed for (T).
if (core.get_option('SCF', 'REFERENCE') == 'ROHF'):
ref_wfn.semicanonicalize()
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('CCTRANSORT', 'DELETE_TEI', 'false')
bcc_iter_cnt = 0
if (core.get_global_option("RUN_CCTRANSORT")):
sort_func = core.cctransort
else:
try:
from psi4.driver.pasture import addins
core.set_local_option('TRANSQT2', 'DELETE_TEI', 'false')
sort_func = addins.ccsort_transqt2
except:
raise PastureRequiredError("RUN_CCTRANSORT")
while True:
sort_func(ref_wfn)
ref_wfn = core.ccenergy(ref_wfn)
core.print_out('Brueckner convergence check: %s\n' % bool(core.variable('BRUECKNER CONVERGED')))
if (core.variable('BRUECKNER CONVERGED') == True):
break
if bcc_iter_cnt >= core.get_option('CCENERGY', 'BCCD_MAXITER'):
core.print_out("\n\nWarning! BCCD did not converge within the maximum number of iterations.")
core.print_out("You can increase the number of BCCD iterations by changing BCCD_MAXITER.\n\n")
break
bcc_iter_cnt += 1
if name == 'bccd(t)':
core.cctriples(ref_wfn)
optstash.restore()
return ref_wfn
def run_tdscf_excitations(wfn,**kwargs):
states = core.get_option("SCF","TDSCF_STATES")
# some sanity checks
if sum(states) == 0:
raise ValidationError("TDSCF: No states requested in TDSCF_STATES")
# unwrap 1-membered list of states, regardless of symmetry
# we will apportion states per irrep later on
if len(states) == 1:
states = states[0]
# Tie TDSCF_R_CONVERGENCE to D_CONVERGENCE in SCF reference
if core.has_option_changed('SCF', 'TDSCF_R_CONVERGENCE'):
r_convergence = core.get_option('SCF', 'TDSCF_R_CONVERGENCE')
else:
r_convergence = min(1.e-4, core.get_option('SCF', 'D_CONVERGENCE') * 1.e2)
# "anonymous" return value, as we stash observables in the passed Wavefunction object internally
_ = response.scf_response.tdscf_excitations(wfn,
states=states,
triplets=core.get_option("SCF", "TDSCF_TRIPLETS"),
tda=core.get_option("SCF", "TDSCF_TDA"),
r_convergence=r_convergence,
maxiter=core.get_option("SCF", "TDSCF_MAXITER"),
guess=core.get_option("SCF", "TDSCF_GUESS"),
verbose=core.get_option("SCF", "TDSCF_PRINT"))
# Shove variables into global space
for k, v in wfn.variables().items():
core.set_variable(k, v)
return wfn
def run_tdscf_energy(name, **kwargs):
# Get a wfn in case we aren't given one
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
if name is None:
raise ValidationError("TDSCF: No reference wave function!")
else:
ref_wfn = run_scf(name.strip('td-'), **kwargs)
return run_tdscf_excitations(ref_wfn, **kwargs)
def run_scf_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
SCF calculations. This is a simple alias to :py:func:`~proc.run_scf`
since SCF properties all handled through oeprop.
"""
core.tstart()
optstash = proc_util.scf_set_reference_local(name)
properties = kwargs.pop('properties')
# What response do we need?
response_list_vals = list(response.scf_response.property_dicts)
oeprop_list_vals = core.OEProp.valid_methods
oe_properties = []
linear_response = []
unknown_property = []
for prop in properties:
prop = prop.upper()
if prop in response_list_vals:
linear_response.append(prop)
elif (prop in oeprop_list_vals) or ("MULTIPOLE(" in prop):
oe_properties.append(prop)
else:
unknown_property.append(prop)
if "DIPOLE" not in oe_properties:
oe_properties.append("DIPOLE")
# Throw if we dont know what something is
if len(unknown_property):
complete_options = oeprop_list_vals + response_list_vals
alt_method_name = p4util.text.find_approximate_string_matches(unknown_property[0],
complete_options, 2)
alternatives = ""
if len(alt_method_name) > 0:
alternatives = " Did you mean? %s" % (" ".join(alt_method_name))
raise ValidationError("SCF Property: Feature '%s' is not recognized. %s" % (unknown_property[0], alternatives))
# Validate OEProp
if len(oe_properties):
proc_util.oeprop_validator(oe_properties)
if len(linear_response):
optstash_jk = p4util.OptionsState(["SAVE_JK"])
core.set_global_option("SAVE_JK", True)
# Compute the Wavefunction
scf_wfn = run_scf(name, scf_do_properties=False, do_timer=False, **kwargs)
# Run OEProp
oe = core.OEProp(scf_wfn)
oe.set_title(name.upper())
for prop in oe_properties:
oe.add(prop.upper())
oe.compute()
scf_wfn.oeprop = oe
# Always must set SCF dipole (retire components at v1.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for cart in ["X", "Y", "Z"]:
core.set_variable("SCF DIPOLE " + cart, core.variable(name + " DIPOLE " + cart))
core.set_variable("SCF DIPOLE", core.variable(name + " DIPOLE"))
# Run Linear Respsonse
if len(linear_response):
core.prepare_options_for_module("SCF")
ret = response.scf_response.cpscf_linear_response(scf_wfn, *linear_response,
conv_tol = core.get_global_option("SOLVER_CONVERGENCE"),
max_iter = core.get_global_option("SOLVER_MAXITER"),
print_lvl = (core.get_global_option("PRINT") + 1))
optstash_jk.restore()
core.tstop()
optstash.restore()
return scf_wfn
def run_cc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
all CC property calculations.
"""
optstash = p4util.OptionsState(
['WFN'],
['DERTYPE'],
['ONEPDM'],
['PROPERTY'],
['CCLAMBDA', 'R_CONVERGENCE'],
['CCEOM', 'R_CONVERGENCE'],
['CCEOM', 'E_CONVERGENCE']) # yapf:disable
oneel_properties = core.OEProp.valid_methods
twoel_properties = []
response_properties = ['POLARIZABILITY', 'ROTATION', 'ROA', 'ROA_TENSOR']
excited_properties = ['OSCILLATOR_STRENGTH', 'ROTATIONAL_STRENGTH']
one = []
two = []
response = []
excited = []
invalid = []
if 'properties' in kwargs:
properties = kwargs['properties']
for prop in properties:
prop = prop.upper()
if prop in oneel_properties:
one.append(prop)
elif prop in twoel_properties:
two.append(prop)
elif prop in response_properties:
response.append(prop)
elif prop in excited_properties:
excited.append(prop)
else:
invalid.append(prop)
else:
raise ValidationError("""The "properties" keyword is required with the property() function.""")
# People are used to requesting dipole/quadrupole and getting dipole,quadrupole,mulliken_charges and NO_occupations
if ('DIPOLE' in one) or ('QUADRUPOLE' in one):
one = list(set(one + ['DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS']))
n_one = len(one)
n_two = len(two)
n_response = len(response)
n_excited = len(excited)
n_invalid = len(invalid)
if n_invalid > 0:
print("""The following properties are not currently supported: %s""" % invalid)
if n_excited > 0 and (name not in ['eom-ccsd', 'eom-cc2']):
raise ValidationError("""Excited state CC properties require EOM-CC2 or EOM-CCSD.""")
if (name in ['eom-ccsd', 'eom-cc2']) and n_response > 0:
raise ValidationError("""Cannot (yet) compute response properties for excited states.""")
if 'roa' in response:
# Perform distributed roa job
run_roa(name, **kwargs)
return # Don't do anything further
if (n_one > 0 or n_two > 0) and (n_response > 0):
print("""Computing both density- and response-based properties.""")
if name in ['ccsd', 'cc2', 'eom-ccsd', 'eom-cc2']:
this_name = name.upper().replace('-', '_')
core.set_global_option('WFN', this_name)
ccwfn = run_ccenergy(name, **kwargs)
core.set_global_option('WFN', this_name)
else:
raise ValidationError("""CC property name %s not recognized""" % name.upper())
# Need cchbar for everything
core.cchbar(ccwfn)
# Need ccdensity at this point only for density-based props
if n_one > 0 or n_two > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
# Need ccresponse only for response-type props
if n_response > 0:
core.set_global_option('DERTYPE', 'RESPONSE')
core.cclambda(ccwfn)
for prop in response:
core.set_global_option('PROPERTY', prop)
core.ccresponse(ccwfn)
# Excited-state transition properties
if n_excited > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
else:
raise ValidationError("""Unknown excited-state CC wave function.""")
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
# Tight convergence unnecessary for transition properties
core.set_local_option('CCLAMBDA', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'E_CONVERGENCE', 1e-5)
core.cceom(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
if n_one > 0:
# call oe prop for GS density
oe = core.OEProp(ccwfn)
oe.set_title(name.upper())
for oe_name in one:
oe.add(oe_name.upper())
oe.compute()
# call oe prop for each ES density
if name.startswith('eom'):
# copy GS CC DIP/QUAD ... to CC ROOT 0 DIP/QUAD ... if we are doing multiple roots
# retire components at v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE X", core.variable("CC DIPOLE X"))
core.set_variable("CC ROOT 0 DIPOLE Y", core.variable("CC DIPOLE Y"))
core.set_variable("CC ROOT 0 DIPOLE Z", core.variable("CC DIPOLE Z"))
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE XX", core.variable("CC QUADRUPOLE XX"))
core.set_variable("CC ROOT 0 QUADRUPOLE XY", core.variable("CC QUADRUPOLE XY"))
core.set_variable("CC ROOT 0 QUADRUPOLE XZ", core.variable("CC QUADRUPOLE XZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE YY", core.variable("CC QUADRUPOLE YY"))
core.set_variable("CC ROOT 0 QUADRUPOLE YZ", core.variable("CC QUADRUPOLE YZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE ZZ", core.variable("CC QUADRUPOLE ZZ"))
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE", core.variable("CC DIPOLE"))
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE", core.variable("CC QUADRUPOLE"))
n_root = sum(core.get_global_option("ROOTS_PER_IRREP"))
for rn in range(n_root):
oe.set_title("CC ROOT {}".format(rn + 1))
Da = ccwfn.variable("CC ROOT {} Da".format(rn + 1))
oe.set_Da_so(Da)
if core.get_global_option("REFERENCE") == "UHF":
Db = ccwfn.variable("CC ROOT {} Db".format(rn + 1))
oe.set_Db_so(Db)
oe.compute()
core.set_global_option('WFN', 'SCF')
core.revoke_global_option_changed('WFN')
core.set_global_option('DERTYPE', 'NONE')
core.revoke_global_option_changed('DERTYPE')
optstash.restore()
return ccwfn
def run_dfmp2_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 property calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX'],
['SCF_TYPE'])
core.set_global_option('ONEPDM', 'TRUE')
core.set_global_option('OPDM_RELAX', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF') # local set insufficient b/c SCF option read in DFMP2
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if not 'DF' in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 properties need DF-SCF reference.')
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, scf_do_properties=False, use_c1=True, **kwargs) # C1 certified
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Run OEProp
oe = core.OEProp(dfmp2_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
dfmp2_wfn.oeprop = oe
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def _clean_detci(keep: bool=True):
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
cifl = core.get_option("DETCI", "CI_FILE_START")
for fl in range(cifl, cifl + 4):
if psio.open_check(fl):
psio.close(fl, keep)
def run_detci_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn, computing properties.
"""
optstash = p4util.OptionsState(
['OPDM'],
['TDM'])
# Find valid properties
valid_transition = ['TRANSITION_DIPOLE', 'TRANSITION_QUADRUPOLE']
ci_prop = []
ci_trans = []
properties = kwargs.pop('properties')
for prop in properties:
if prop.upper() in valid_transition:
ci_trans.append(prop)
else:
ci_prop.append(prop)
proc_util.oeprop_validator(ci_prop)
core.set_global_option('OPDM', 'TRUE')
if len(ci_trans):
core.set_global_option('TDM', 'TRUE')
# Compute
if name in ['mcscf', 'rasscf', 'casscf']:
ciwfn = run_detcas(name, **kwargs)
else:
ciwfn = run_detci(name, **kwargs)
# All property names are just CI
if 'CI' in name.upper():
name = 'CI'
states = core.get_global_option('avg_states')
nroots = core.get_global_option('num_roots')
if len(states) != nroots:
states = range(nroots)
# Run OEProp
oe = core.OEProp(ciwfn)
oe.set_title(name.upper())
for prop in ci_prop:
oe.add(prop.upper())
# Compute "the" CI density
oe.compute()
ciwfn.oeprop = oe
# If we have more than one root, compute all data
if nroots > 1:
core.print_out("\n ===> %s properties for all CI roots <=== \n\n" % name.upper())
for root in states:
oe.set_title("%s ROOT %d" % (name.upper(), root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(root, root, "B", True))
oe.compute()
# Transition density matrices
if (nroots > 1) and len(ci_trans):
oe.clear()
for tprop in ci_trans:
oe.add(tprop.upper())
core.print_out("\n ===> %s properties for all CI transition density matrices <=== \n\n" % name.upper())
for root in states[1:]:
oe.set_title("%s ROOT %d -> ROOT %d" % (name.upper(), 0, root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(0, root, "B", True))
oe.compute()
_clean_detci()
optstash.restore()
return ciwfn
def run_eom_cc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CC calculation, namely EOM-CC2, EOM-CCSD, and EOM-CC3.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'],
['CCHBAR', 'WFN'],
['CCEOM', 'WFN'])
if name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CCSD')
core.set_local_option('CCEOM', 'WFN', 'EOM_CCSD')
ref_wfn = run_ccenergy('ccsd', **kwargs)
elif name == 'eom-cc2':
user_ref = core.get_option('CCENERGY', 'REFERENCE')
if (user_ref != 'RHF') and (user_ref != 'UHF'):
raise ValidationError('Reference %s for EOM-CC2 is not available.' % user_ref)
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC2')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC2')
ref_wfn = run_ccenergy('cc2', **kwargs)
elif name == 'eom-cc3':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC3')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC3')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC3')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC3')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC3')
ref_wfn = run_ccenergy('cc3', **kwargs)
core.cchbar(ref_wfn)
core.cceom(ref_wfn)
optstash.restore()
return ref_wfn
# TODO ask if all these cc modules not actually changing wfn
def run_eom_cc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CCSD gradient calculation.
"""
optstash = p4util.OptionsState(
['CCDENSITY', 'XI'],
['CCDENSITY', 'ZETA'],
['CCLAMBDA', 'ZETA'],
['DERTYPE'],
['CCDENSITY', 'WFN'],
['CCLAMBDA', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if name == 'eom-ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'EOM_CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'EOM_CCSD')
ref_wfn = run_eom_cc(name, **kwargs)
else:
core.print_out('DGAS: proc.py:1599 hitting an undefined sequence')
core.clean()
raise ValueError('Hit a wall in proc.py:1599')
core.set_local_option('CCLAMBDA', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'XI', 'TRUE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
core.set_local_option('CCLAMBDA', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'XI', 'FALSE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
derivobj = core.Deriv(ref_wfn)
grad = derivobj.compute()
ref_wfn.set_gradient(grad)
optstash.restore()
return ref_wfn
def run_adc_deprecated(*args, **kwargs):
warnings.warn("The method 'adc' has been deprecated, please use 'adc2' instead."
"The method key 'adc' will be removed Psi4 1.6.", DeprecationWarning)
return select_adc2(*args, **kwargs)
def run_adc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an algebraic diagrammatic construction calculation.
.. caution:: Get rid of active molecule lines- should be handled in energy.
"""
if core.get_option('ADC', 'REFERENCE') != 'RHF':
raise ValidationError('ADC requires reference RHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
return core.adc(ref_wfn)
def run_adcc(name, **kwargs):
"""Prepare and run an ADC calculation in adcc, interpret the result and return
as a wavefunction.
"""
# TODO Maybe it would improve readability if this function was spilt
# up and the whole thing went to a separate file (like for sapt,
# interface_cfour.py, ...
try:
import adcc
from adcc.backends import InvalidReference
except ModuleNotFoundError:
raise ValidationError("adcc extras qc_module not available. Try installing "
"via 'pip install adcc' or 'conda install -c adcc adcc'.")
if core.get_option('ADC', 'REFERENCE') not in ["RHF", "UHF"]:
raise ValidationError('adcc requires reference RHF or UHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs)
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
#
# Build kwargs for adcc
#
kwargs.pop("molecule", None)
if ref_wfn.frzcpi()[0] > 0:
kwargs["frozen_core"] = ref_wfn.frzcpi()[0]
if ref_wfn.frzvpi()[0] > 0:
kwargs["frozen_virtual"] = ref_wfn.frzvpi()[0]
if core.get_option("ADC", "NUM_CORE_ORBITALS"):
kwargs["core_orbitals"] = core.get_option("ADC", "NUM_CORE_ORBITALS")
scf_accuracy = max(core.get_option("SCF", "E_CONVERGENCE"),
core.get_option("SCF", "D_CONVERGENCE"))
if core.get_option("ADC", "R_CONVERGENCE") < 0:
kwargs["conv_tol"] = max(100 * scf_accuracy, 1e-6)
else:
kwargs["conv_tol"] = core.get_option("ADC", "R_CONVERGENCE")
n_roots = core.get_option('ADC', 'ROOTS_PER_IRREP')
if len(n_roots) > 1:
raise ValidationError("adcc can only deal with a single irrep.")
kwargs["n_states"] = n_roots[0]
if core.get_option("ADC", "NUM_GUESSES") > 0:
kwargs["n_guesses"] = core.get_option("ADC", "NUM_GUESSES")
if core.get_option("ADC", "MAX_NUM_VECS") > 0:
kwargs["max_subspace"] = core.get_option("ADC", "MAX_NUM_VECS")
kind = core.get_option("ADC", "KIND").lower()
if isinstance(ref_wfn, core.UHF):
if not core.has_option_changed("ADC", "KIND"):
kind = "any"
elif not kind in ["any", "spin_flip"]:
raise ValidationError("For UHF references the only valid values for 'KIND' are "
"'SPIN_FLIP' or 'ANY' and not '{}.".format(kind.upper()))
elif not kind in ["singlet", "triplet", "any"]:
raise ValidationError("For RHF references the value '{}' for 'KIND' is "
"not supported.".format(kind.upper()))
kwargs["kind"] = kind
kwargs["max_iter"] = core.get_option("ADC", "MAXITER")
#
# Determine ADC function method from adcc to run ADC
#
adcrunner = {
"cvs-adc(1)": adcc.cvs_adc1, "cvs-adc(2)": adcc.cvs_adc2,
"cvs-adc(2)-x": adcc.cvs_adc2x, "cvs-adc(3)": adcc.cvs_adc3,
"adc(1)": adcc.adc1, "adc(2)": adcc.adc2,
"adc(2)-x": adcc.adc2x, "adc(3)": adcc.adc3,
}
if name not in adcrunner:
raise ValidationError(f"Unsupported ADC method: {name}")
if "cvs" in name and "core_orbitals" not in kwargs:
raise ValidationError("If a CVS-ADC method is requested, the NUM_CORE_ORBITALS option "
"needs to be set.")
if "core_orbitals" in kwargs and not "cvs" in name:
raise ValidationError("The NUM_CORE_ORBITALS option needs to be set to '0' or absent "
"unless a CVS ADC method is requested.")
if "cvs" in name and kwargs["kind"] in ["spin_flip"]:
raise ValidationError("Spin-flip for CVS-ADC variants is not available.")
#
# Check for unsupported options
#
for option in ["PR", "NORM_TOLERANCE", "POLE_MAXITER", "SEM_MAXITER",
"NEWTON_CONVERGENCE", "MEMORY", "CACHELEVEL", "NUM_AMPS_PRINT"]:
if core.has_option_changed("ADC", option):
raise ValidationError(f"ADC backend adcc does not support option '{option}'")
#
# Launch the rocket
#
# Copy thread setup from psi4
try:
adcc.set_n_threads(core.get_num_threads())
except AttributeError:
# Before adcc 0.13.3:
adcc.thread_pool.reinit(core.get_num_threads(), core.get_num_threads())
# Hack to direct the stream-like interface adcc expects to the string interface of Psi4 core
class CoreStream:
def write(self, text):
core.print_out(text)
core.print_out("\n" + adcc.banner(colour=False) + "\n")
try:
state = adcrunner[name](ref_wfn, **kwargs, output=CoreStream())
except InvalidReference as ex:
raise ValidationError("Cannot run adcc because the passed reference wavefunction is "
"not supported in adcc. Check Psi4 SCF parameters. adcc reports: "
"{}".format(str(ex)))
core.print_out("\n")
# TODO Should a non-converged calculation throw?
#
# Interpret results
#
# Note: This wavefunction is not consistent ... the density
# is e.g. not the proper one (i.e. not the MP(n) one)
adc_wfn = core.Wavefunction(ref_wfn.molecule(), ref_wfn.basisset())
adc_wfn.shallow_copy(ref_wfn)
adc_wfn.set_reference_wavefunction(ref_wfn)
adc_wfn.set_name(name)
adc_wfn.set_module("adcc")
# MP(3) energy for CVS-ADC(3) calculations is still a missing feature in adcc
# ... we store this variant here to be able to fall back to MP(2) energies.
is_cvs_adc3 = state.method.level >= 3 and state.ground_state.has_core_occupied_space
# Ground-state energies
mp = state.ground_state
mp_energy = mp.energy(state.method.level if not is_cvs_adc3 else 2)
mp_corr = 0.0
if state.method.level > 1:
core.print_out("Ground state energy breakdown:\n")
core.print_out(" Energy SCF {0:15.8g} [Eh]\n".format(ref_wfn.energy()))
for level in range(2, state.method.level + 1):
if level >= 3 and is_cvs_adc3:
continue
energy = mp.energy_correction(level)
mp_corr += energy
adc_wfn.set_variable(f"MP{level} correlation energy", energy)
adc_wfn.set_variable(f"MP{level} total energy", mp.energy(level))
core.print_out(f" Energy correlation MP{level} {energy:15.8g} [Eh]\n")
core.print_out(" Energy total {0:15.8g} [Eh]\n".format(mp_energy))
adc_wfn.set_variable("current correlation energy", mp_corr)
adc_wfn.set_variable("current energy", mp_energy)
# Set results of excited-states computation
# TODO Does not work: Can't use strings
# adc_wfn.set_variable("excitation kind", state.kind)
adc_wfn.set_variable("number of iterations", state.n_iter)
adc_wfn.set_variable(name + " excitation energies",
core.Matrix.from_array(state.excitation_energy.reshape(-1, 1)))
adc_wfn.set_variable("number of excited states", len(state.excitation_energy))
core.print_out("\n\n ==> Excited states summary <== \n")
core.print_out("\n" + state.describe(oscillator_strengths=False) + "\n")
# TODO Setting the excitation amplitude elements inside the wavefunction is a little
# challenging, since for each excitation vector one needs to extract the elements
# and map the indices from the adcc to the Psi4 convention. For this reason it
# is not yet done.
core.print_out("\n ==> Dominant amplitudes per state <== \n\n")
tol_ampl = core.get_option("ADC", "CUTOFF_AMPS_PRINT")
core.print_out(state.describe_amplitudes(tolerance=tol_ampl) + "\n\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
adc_wfn.adcc_state = state
return adc_wfn
def run_adcc_property(name, **kwargs):
"""Run a ADC excited-states property calculation in adcc
and return the resulting properties.
"""
# TODO Things available in ADCC, but not yet implemented here:
# Export of difference and transition density matrices for all states
properties = [prop.upper() for prop in kwargs.pop('properties')]
valid_properties = ['DIPOLE', 'OSCILLATOR_STRENGTH', 'TRANSITION_DIPOLE',
'ROTATIONAL_STRENGTH']
unknown_properties = [prop for prop in properties if prop not in valid_properties]
if unknown_properties:
alternatives = ""
alt_method_name = p4util.text.find_approximate_string_matches(unknown_properties[0],
valid_properties, 2)
if alt_method_name:
alternatives = " Did you mean? " + " ".join(alt_method_name)
raise ValidationError("ADC property: Feature '{}' is not recognized. {}"
"".format(unknown_properties[0], alternatives))
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
adc_wfn = run_adcc(name, do_timer=False, **kwargs)
state = adc_wfn.adcc_state
hf = state.reference_state
mp = state.ground_state
# Formats and indention
ind = " "
def format_vector(label, data):
assert data.ndim == 1
return f"{label:<40s} " + " ".join(f"{d:12.6g}" for d in data)
if "DIPOLE" in properties:
lines = ["\nGround state properties"]
lines += [ind + "Hartree-Fock (HF)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", hf.dipole_moment)]
if state.method.level > 1:
lines += [ind + "Møller Plesset 2nd order (MP2)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", mp.dipole_moment(2))]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i, cart in enumerate(["X", "Y", "Z"]):
# retire components at v1.5
adc_wfn.set_variable("MP2 dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("current dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("MP2 dipole", mp.dipole_moment(2))
adc_wfn.set_variable("current dipole", mp.dipole_moment(2))
lines += [""]
core.print_out("\n".join(lines) + "\n")
gauge = core.get_option("ADC", "GAUGE").lower()
if gauge == "velocity":
gauge_short = "VEL"
elif gauge == "length":
gauge_short = "LEN"
else:
raise ValidationError(f"Gauge {gauge} not recognised for ADC calculations.")
computed = {}
if any(prop in properties for prop in ("TRANSITION_DIPOLE", "OSCILLATOR_STRENGTH")):
data = state.transition_dipole_moment
computed["Transition dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} transition dipoles", core.Matrix.from_array(data))
if "OSCILLATOR_STRENGTH" in properties:
if gauge == "velocity":
data = state.oscillator_strength_velocity.reshape(-1, 1)
else:
data = state.oscillator_strength.reshape(-1, 1)
computed[f"Oscillator strength ({gauge} gauge)"] = data
adc_wfn.set_variable(f"{name} oscillator strengths ({gauge_short})",
core.Matrix.from_array(data))
if "ROTATIONAL_STRENGTH" in properties:
data = state.rotatory_strength.reshape(-1, 1)
computed["Rotational strength (velocity gauge)"] = data
adc_wfn.set_variable(f"{name} rotational strengths (VEL)",
core.Matrix.from_array(data))
if "DIPOLE" in properties:
data = state.state_dipole_moment
computed["State dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} state dipoles", core.Matrix.from_array(data))
core.print_out("\nExcited state properties:\n")
n_states = adc_wfn.variable("number of excited states")
for i in range(int(n_states)):
lines = [ind + f"Excited state {i}"]
for prop, data in sorted(computed.items()):
lines += [ind + ind + format_vector(prop, data[i])]
core.print_out("\n".join(lines) + "\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
return adc_wfn
def run_detci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['DETCI', 'MAX_NUM_VECS'],
['DETCI', 'MPN_ORDER_SAVE'],
['DETCI', 'MPN'],
['DETCI', 'FCI'],
['DETCI', 'EX_LEVEL'])
if core.get_option('DETCI', 'REFERENCE') not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' %
core.get_option('DETCI', 'REFERENCE'))
if name == 'zapt':
core.set_local_option('DETCI', 'WFN', 'ZAPTN')
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name in ['mp', 'mp2', 'mp3', 'mp4']:
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'MPN', 'TRUE')
if name == 'mp2':
level = 2
elif name == 'mp3':
level = 3
elif name == 'mp4':
level = 4
else:
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name == 'ccsd':
# untested
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'CC', 'TRUE')
core.set_local_option('DETCI', 'CC_EX_LEVEL', 2)
elif name == 'fci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'FCI', 'TRUE')
elif name == 'cisd':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 2)
elif name == 'cisdt':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 3)
elif name == 'cisdtq':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 4)
elif name == 'ci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
level = kwargs['level']
core.set_local_option('DETCI', 'EX_LEVEL', level)
elif name == 'detci':
pass
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
ciwfn = core.detci(ref_wfn)
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
print_nos = False
if core.get_option("DETCI", "NAT_ORBS"):
ciwfn.ci_nat_orbs()
print_nos = True
proc_util.print_ci_results(ciwfn, name.upper(), ciwfn.variable("HF TOTAL ENERGY"), ciwfn.variable("CURRENT ENERGY"), print_nos)
core.print_out("\t\t \"A good bug is a dead bug\" \n\n");
core.print_out("\t\t\t - Starship Troopers\n\n");
core.print_out("\t\t \"I didn't write FORTRAN. That's the problem.\"\n\n");
core.print_out("\t\t\t - Edward Valeev\n");
if core.get_global_option("DIPMOM") and ("mp" not in name.lower()):
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components in v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
ciwfn.cleanup_ci()
ciwfn.cleanup_dpd()
_clean_detci()
optstash.restore()
return ciwfn
def run_dfmp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
if core.get_global_option('REFERENCE') == "ROHF":
ref_wfn.semicanonicalize()
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
dfmp2_wfn.compute_energy()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfep2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
core.tstart()
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option('REFERENCE') != "RHF":
raise ValidationError("DF-EP2 is not available for %s references.",
core.get_global_option('REFERENCE'))
# Build the wavefunction
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_EP2",
core.get_option("DFEP2", "DF_BASIS_EP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_EP2", aux_basis)
dfep2_wfn = core.DFEP2Wavefunction(ref_wfn)
# Figure out what were doing
if core.has_option_changed('DFEP2', 'EP2_ORBITALS'):
ep2_input = core.get_global_option("EP2_ORBITALS")
else:
n_ip = core.get_global_option("EP2_NUM_IP")
n_ea = core.get_global_option("EP2_NUM_EA")
eps = np.hstack(dfep2_wfn.epsilon_a().nph)
irrep_map = np.hstack([np.ones_like(dfep2_wfn.epsilon_a().nph[x]) * x for x in range(dfep2_wfn.nirrep())])
sort = np.argsort(eps)
ip_map = sort[dfep2_wfn.nalpha() - n_ip:dfep2_wfn.nalpha()]
ea_map = sort[dfep2_wfn.nalpha():dfep2_wfn.nalpha() + n_ea]
ep2_input = [[] for x in range(dfep2_wfn.nirrep())]
nalphapi = tuple(dfep2_wfn.nalphapi())
# Add IP info
ip_info = np.unique(irrep_map[ip_map], return_counts=True)
for irrep, cnt in zip(*ip_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep] - cnt, nalphapi[irrep]))
# Add EA info
ea_info = np.unique(irrep_map[ea_map], return_counts=True)
for irrep, cnt in zip(*ea_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep], nalphapi[irrep] + cnt))
# Compute
ret = dfep2_wfn.compute(ep2_input)
# Resort it...
ret_eps = []
for h in range(dfep2_wfn.nirrep()):
ep2_data = ret[h]
inp_data = ep2_input[h]
for i in range(len(ep2_data)):
tmp = [h, ep2_data[i][0], ep2_data[i][1], dfep2_wfn.epsilon_a().get(h, inp_data[i]), inp_data[i]]
ret_eps.append(tmp)
ret_eps.sort(key=lambda x: x[3])
h2ev = constants.hartree2ev
irrep_labels = dfep2_wfn.molecule().irrep_labels()
core.print_out(" ==> Results <==\n\n")
core.print_out(" %8s %12s %12s %8s\n" % ("Orbital", "Koopmans (eV)", "EP2 (eV)", "EP2 PS"))
core.print_out(" ----------------------------------------------\n")
for irrep, ep2, ep2_ps, kt, pos in ret_eps:
label = str(pos + 1) + irrep_labels[irrep]
core.print_out(" %8s % 12.3f % 12.3f % 6.3f\n" % (label, (kt * h2ev), (ep2 * h2ev), ep2_ps))
core.set_variable("EP2 " + label.upper() + " ENERGY", ep2)
core.print_out(" ----------------------------------------------\n\n")
# Figure out the IP and EA
sorted_vals = np.array([x[1] for x in ret_eps])
ip_vals = sorted_vals[sorted_vals < 0]
ea_vals = sorted_vals[sorted_vals > 0]
ip_value = None
ea_value = None
if len(ip_vals):
core.set_variable("EP2 IONIZATION POTENTIAL", ip_vals[-1])
core.set_variable("CURRENT ENERGY", ip_vals[-1])
if len(ea_vals):
core.set_variable("EP2 ELECTRON AFFINITY", ea_vals[0])
if core.variable("EP2 IONIZATION POTENTIAL") == 0.0:
core.set_variable("CURRENT ENERGY", ea_vals[0])
core.print_out(" EP2 has completed successfully!\n\n")
core.tstop()
return dfep2_wfn
def run_dmrgscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_CASPT2_CALC'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if 'CASPT2' in name.upper():
core.set_local_option("DMRG", "DMRG_CASPT2_CALC", True)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_dmrgci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_SCF_MAX_ITER'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('DMRG', 'DMRG_SCF_MAX_ITER', 1)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_psimrcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the MCSCF module
"""
mcscf_wfn = run_mcscf(name, **kwargs)
psimrcc_wfn = core.psimrcc(mcscf_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_psimrcc_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the SCF module
"""
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
psimrcc_wfn = core.psimrcc(ref_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_sapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SAPT calculation of any level.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_basis = kwargs.pop('sapt_basis', 'dimer')
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, sapt_basis)
if (core.get_option('SCF', 'REFERENCE') != 'RHF') and (name.upper() != "SAPT0"):
raise ValidationError('Only SAPT0 supports a reference different from \"reference rhf\".')
do_delta_mp2 = True if name.endswith('dmp2') else False
# raise Exception("")
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
# Compute dimer wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'SAVE')
core.timer_on("SAPT: Dimer SCF")
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("SAPT: Dimer SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=dimer_wfn, **kwargs)
mp2_corl_interaction_e = core.variable('MP2 CORRELATION ENERGY')
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'LOAD')
# Compute Monomer A wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer A SCF")
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
core.timer_off("SAPT: Monomer A SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerA_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
# Compute Monomer B wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer B SCF")
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.timer_off("SAPT: Monomer B SCF")
# Delta MP2
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerB_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
core.set_variable('SAPT MP2 CORRELATION ENERGY', mp2_corl_interaction_e)
core.set_global_option('DF_INTS_IO', df_ints_io)
if core.get_option('SCF', 'REFERENCE') == 'RHF':
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name in ['sapt0', 'ssapt0']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name in ['sapt2+', 'sapt2+dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(3)', 'sapt2+(3)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+3', 'sapt2+3dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(ccd)', 'sapt2+(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+(3)(ccd)', 'sapt2+(3)(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+3(ccd)', 'sapt2+3(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
# Make sure we are not going to run CPHF on ROHF, since its MO Hessian
# is not SPD
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
core.set_local_option('SAPT','COUPLED_INDUCTION',False)
core.print_out(' Coupled induction not available for ROHF.\n')
core.print_out(' Proceeding with uncoupled induction only.\n')
core.print_out(" Constructing Basis Sets for SAPT...\n\n")
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner(name.upper())
core.print_out('\n')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
dimer_wfn.set_module("sapt")
from psi4.driver.qcdb.psivardefs import sapt_psivars
p4util.expand_psivars(sapt_psivars())
optstash.restore()
# Make sure we got induction, otherwise replace it with uncoupled induction
which_ind = 'IND'
target_ind = 'IND'
if not core.has_variable(' '.join([name.upper(), which_ind, 'ENERGY'])):
which_ind='IND,U'
for term in ['ELST', 'EXCH', 'DISP', 'TOTAL']:
core.set_variable(' '.join(['SAPT', term, 'ENERGY']),
core.variable(' '.join([name.upper(), term, 'ENERGY'])))
# Special induction case
core.set_variable(' '.join(['SAPT', target_ind, 'ENERGY']),
core.variable(' '.join([name.upper(), which_ind, 'ENERGY'])))
core.set_variable('CURRENT ENERGY', core.variable('SAPT TOTAL ENERGY'))
return dimer_wfn
def run_sapt_ct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a charge-transfer SAPT calcuation of any level.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'])
if 'ref_wfn' in kwargs:
core.print_out('\nWarning! Argument ref_wfn is not valid for sapt computations\n')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")
monomerAm = sapt_dimer.extract_subsets(1)
monomerAm.set_name('monomerAm')
monomerBm = sapt_dimer.extract_subsets(2)
monomerBm.set_name('monomerBm')
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('SAPT requires requires \"reference rhf\".')
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
core.set_global_option('DF_INTS_IO', 'SAVE')
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.set_global_option('DF_INTS_IO', 'LOAD')
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF (Dimer Basis)')
core.print_out('\n')
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF (Dimer Basis)')
core.print_out('\n')
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.set_global_option('DF_INTS_IO', df_ints_io)
core.IO.set_default_namespace('monomerAm')
core.print_out('\n')
p4util.banner('Monomer A HF (Monomer Basis)')
core.print_out('\n')
monomerAm_wfn = scf_helper('RHF', molecule=monomerAm, **kwargs)
core.IO.set_default_namespace('monomerBm')
core.print_out('\n')
p4util.banner('Monomer B HF (Monomer Basis)')
core.print_out('\n')
monomerBm_wfn = scf_helper('RHF', molecule=monomerBm, **kwargs)
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name == 'sapt0-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name == 'sapt2+-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
elif name == 'sapt2+(3)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
elif name == 'sapt2+3-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
elif name == 'sapt2+(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+(3)(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+3(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
core.print_out('\n')
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner('SAPT Charge Transfer')
core.print_out('\n')
core.print_out('\n')
p4util.banner('Dimer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
CTd = core.variable('SAPT CT ENERGY')
dimer_wfn.set_module("sapt")
core.print_out('\n')
p4util.banner('Monomer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerAm', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerBm', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerAm_wfn, monomerBm_wfn)
CTm = core.variable('SAPT CT ENERGY')
CT = CTd - CTm
units = (1000.0, constants.hartree2kcalmol, constants.hartree2kJmol)
core.print_out('\n\n')
core.print_out(' SAPT Charge Transfer Analysis\n')
core.print_out(' ------------------------------------------------------------------------------------------------\n')
core.print_out(' SAPT Induction (Dimer Basis) %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTd * u for u in units))
core.print_out(' SAPT Induction (Monomer Basis)%12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTm * u for u in units))
core.print_out(' SAPT Charge Transfer %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n\n' %
tuple(CT * u for u in units))
core.set_variable('SAPT CT ENERGY', CT)
optstash.restore()
return dimer_wfn
def run_fisapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an F/ISAPT0 computation
"""
optstash = p4util.OptionsState(
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! FISAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' FISAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('FISAPT requires requires \"reference rhf\".')
if ref_wfn is None:
core.timer_on("FISAPT: Dimer SCF")
ref_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("FISAPT: Dimer SCF")
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
sapt_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"),
ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SAPT", sapt_basis)
minao = core.BasisSet.build(ref_wfn.molecule(), "BASIS",
core.get_global_option("MINAO_BASIS"))
ref_wfn.set_basisset("MINAO", minao)
fisapt_wfn = core.FISAPT(ref_wfn)
from .sapt import fisapt_proc
fisapt_wfn.compute_energy()
optstash.restore()
return ref_wfn
def run_mrcc(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Kallay's MRCC code.
"""
# Check to see if we really need to run the SCF code.
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
vscf = core.variable('SCF TOTAL ENERGY')
# The parse_arbitrary_order method provides us the following information
# We require that level be provided. level is a dictionary
# of settings to be passed to core.mrcc
if not('level' in kwargs):
raise ValidationError('level parameter was not provided.')
level = kwargs['level']
# Fullname is the string we need to search for in iface
fullname = level['fullname']
# User can provide 'keep' to the method.
# When provided, do not delete the MRCC scratch directory.
keep = False
if 'keep' in kwargs:
keep = kwargs['keep']
# Save current directory location
current_directory = os.getcwd()
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Need to move to the scratch directory, perferrably into a separate directory in that location
psi_io = core.IOManager.shared_object()
os.chdir(psi_io.get_default_path())
# Make new directory specifically for mrcc
mrcc_tmpdir = 'mrcc_' + str(os.getpid())
if 'path' in kwargs:
mrcc_tmpdir = kwargs['path']
# Check to see if directory already exists, if not, create.
if os.path.exists(mrcc_tmpdir) is False:
os.mkdir(mrcc_tmpdir)
# Move into the new directory
os.chdir(mrcc_tmpdir)
# Generate integrals and input file (dumps files to the current directory)
core.mrcc_generate_input(ref_wfn, level)
# Load the fort.56 file
# and dump a copy into the outfile
core.print_out('\n===== Begin fort.56 input for MRCC ======\n')
core.print_out(open('fort.56', 'r').read())
core.print_out('===== End fort.56 input for MRCC ======\n')
# Modify the environment:
# PGI Fortan prints warning to screen if STOP is used
lenv['NO_STOP_MESSAGE'] = '1'
# Obtain the number of threads MRCC should use
lenv['OMP_NUM_THREADS'] = str(core.get_num_threads())
# If the user provided MRCC_OMP_NUM_THREADS set the environ to it
if core.has_option_changed('MRCC', 'MRCC_OMP_NUM_THREADS') == True:
lenv['OMP_NUM_THREADS'] = str(core.get_option('MRCC', 'MRCC_OMP_NUM_THREADS'))
# Call dmrcc, directing all screen output to the output file
external_exe = 'dmrcc'
try:
retcode = subprocess.Popen([external_exe], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
core.print_out('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
message = ("Program %s not found in path or execution failed: %s\n" % (external_exe, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
core.print_out(data.decode('utf-8'))
c4out += data.decode('utf-8')
# Scan iface file and grab the file energy.
ene = 0.0
for line in open('iface'):
fields = line.split()
m = fields[1]
try:
ene = float(fields[5])
if m == "MP(2)":
m = "MP2"
core.set_variable(m + ' TOTAL ENERGY', ene)
core.set_variable(m + ' CORRELATION ENERGY', ene - vscf)
except ValueError:
continue
# The last 'ene' in iface is the one the user requested.
core.set_variable('CURRENT ENERGY', ene)
core.set_variable('CURRENT CORRELATION ENERGY', ene - vscf)
# Load the iface file
iface = open('iface', 'r')
iface_contents = iface.read()
# Delete mrcc tempdir
os.chdir('..')
try:
# Delete unless we're told not to
if (keep is False and not('path' in kwargs)):
shutil.rmtree(mrcc_tmpdir)
except OSError as e:
print('Unable to remove MRCC temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory
os.chdir(current_directory)
# If we're told to keep the files or the user provided a path, do nothing.
if (keep != False or ('path' in kwargs)):
core.print_out('\nMRCC scratch files have been kept.\n')
core.print_out('They can be found in ' + mrcc_tmpdir)
# Dump iface contents to output
core.print_out('\n')
p4util.banner('Full results from MRCC')
core.print_out('\n')
core.print_out(iface_contents)
return ref_wfn
def run_fnodfcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DF-CCSD(T) computation.
>>> set cc_type df
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# stash user options
optstash = p4util.OptionsState(
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'DF_BASIS_CC'],
['SCF', 'DF_BASIS_SCF'],
['SCF', 'DF_INTS_IO'])
core.set_local_option('FNOCC', 'DFCC', True)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
def set_cholesky_from(mtd_type):
type_val = core.get_global_option(mtd_type)
if type_val == 'CD':
core.set_local_option('FNOCC', 'DF_BASIS_CC', 'CHOLESKY')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
elif type_val in ['DISK_DF', 'DF']:
if core.get_option('FNOCC', 'DF_BASIS_CC') == 'CHOLESKY':
core.set_local_option('FNOCC', 'DF_BASIS_CC', '')
proc_util.check_disk_df(name.upper(), optstash)
else:
raise ValidationError("""Invalid type '%s' for DFCC""" % type_val)
# triples?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
if core.get_global_option('SCF_TYPE') not in ['CD', 'DISK_DF']:
raise ValidationError("""Invalid scf_type for DFCC.""")
# save DF or CD ints generated by SCF for use in CC
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" FNOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_fnocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a QCISD(T), CCSD(T), MP2.5, MP3, and MP4 computation.
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
level = kwargs.get('level', 0)
# stash user options:
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'RUN_MP2'],
['FNOCC', 'RUN_MP3'],
['FNOCC', 'RUN_MP4'],
['FNOCC', 'RUN_CCSD'],
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'COMPUTE_MP4_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'NAT_ORBS'])
core.set_local_option('FNOCC', 'DFCC', False)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# which method?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'fno-qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'mp2':
core.set_local_option('FNOCC', 'RUN_MP2', True)
elif name == 'fno-mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
elif name == 'fno-mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
elif name == 'mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# set current correlation energy and total energy. only need to treat mpn here.
if name in ["mp3", "fno-mp3"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP3 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP3 CORRELATION ENERGY"))
elif name in ["mp4(sdq)", "fno-mp4(sdq)"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4(SDQ) TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4(SDQ) CORRELATION ENERGY"))
elif name in ["mp4", "fno-mp4"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4 CORRELATION ENERGY"))
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_cepa(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a cepa-like calculation.
>>> energy('cepa(1)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# save user options
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'CEPA_NO_SINGLES'])
core.set_local_option('FNOCC', 'RUN_CEPA', True)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# what type of cepa?
if name in ['lccd', 'fno-lccd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', True)
elif name in ['cepa(0)', 'fno-cepa(0)', 'lccsd', 'fno-lccsd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', False)
elif name in ['cepa(1)', 'fno-cepa(1)']:
cepa_level = 'cepa(1)'
elif name in ['cepa(3)', 'fno-cepa(3)']:
cepa_level = 'cepa(3)'
elif name in ['acpf', 'fno-acpf']:
cepa_level = 'acpf'
elif name in ['aqcc', 'fno-aqcc']:
cepa_level = 'aqcc'
elif name in ['cisd', 'fno-cisd']:
cepa_level = 'cisd'
else:
raise ValidationError("""Error: %s not implemented\n""" % name)
core.set_local_option('FNOCC', 'CEPA_LEVEL', cepa_level.upper())
if name in ['fno-lccd', 'fno-lccsd', 'fno-cepa(0)', 'fno-cepa(1)', 'fno-cepa(3)',
'fno-acpf', 'fno-aqcc', 'fno-cisd']:
core.set_local_option('FNOCC', 'NAT_ORBS', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
reference = core.get_option('SCF', 'REFERENCE')
if core.get_global_option('CC_TYPE') != "CONV":
raise ValidationError("""CEPA methods from FNOCC module require 'cc_type conv'.""")
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
# one-electron properties
if core.get_option('FNOCC', 'DIPMOM'):
if cepa_level in ['cepa(1)', 'cepa(3)']:
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
elif core.get_option('FNOCC', 'NAT_ORBS'):
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
else:
p4util.oeprop(fnocc_wfn, 'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS', title=cepa_level.upper())
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_detcas(name, **kwargs):
"""Function encoding sequence of PSI module calls for
determinant-based multireference wavefuncations,
namely CASSCF and RASSCF.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['SCF_TYPE'],
['ONEPDM'],
['OPDM_RELAX']
)
user_ref = core.get_option('DETCI', 'REFERENCE')
if user_ref not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' % user_ref)
if name == 'rasscf':
core.set_local_option('DETCI', 'WFN', 'RASSCF')
elif name == 'casscf':
core.set_local_option('DETCI', 'WFN', 'CASSCF')
else:
raise ValidationError("Run DETCAS: Name %s not understood" % name)
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_optstash = p4util.OptionsState(
['SCF_TYPE'],
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX']
)
# No real reason to do a conventional guess
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# If RHF get MP2 NO's
# Why doesnt this work for conv?
if (('DF' in core.get_global_option('SCF_TYPE')) and (user_ref == 'RHF') and
(core.get_option('DETCI', 'MCSCF_TYPE') in ['DF', 'AO']) and
(core.get_option("DETCI", "MCSCF_GUESS") == "MP2")):
core.set_global_option('ONEPDM', True)
core.set_global_option('OPDM_RELAX', False)
ref_wfn = run_dfmp2_gradient(name, **kwargs)
else:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
if (core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV'):
mints = core.MintsHelper(ref_wfn.basisset())
mints.set_print(1)
mints.integrals()
ref_optstash.restore()
# The DF case
if core.get_option('DETCI', 'MCSCF_TYPE') == 'DF':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(" Constructing Basis Sets for MCSCF...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
# The AO case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'AO':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DIRECT')
# The conventional case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'PK')
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
raise ValidationError("Run DETCAS: MCSCF_TYPE %s not understood." % str(core.get_option('DETCI', 'MCSCF_TYPE')))
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('DETCI', 'MCSCF_ALGORITHM') in ['AH', 'OS']:
proc_util.check_non_symmetric_jk_density("Second-order MCSCF")
ciwfn = mcscf.mcscf_solver(ref_wfn)
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components by v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ciwfn
def run_efp(name, **kwargs):
"""Function encoding sequence of module calls for a pure EFP
computation (ignore any QM atoms).
"""
efp_molecule = kwargs.get('molecule', core.get_active_molecule())
try:
efpobj = efp_molecule.EFP
except AttributeError:
raise ValidationError("""Method 'efp' not available without EFP fragments in molecule""")
# print efp geom in [A]
core.print_out(efpobj.banner())
core.print_out(efpobj.geometry_summary(units_to_bohr=constants.bohr2angstroms))
# set options
# * 'chtr', 'qm_exch', 'qm_disp', 'qm_chtr' may be enabled in a future libefp release
efpopts = {}
for opt in ['elst', 'exch', 'ind', 'disp',
'elst_damping', 'ind_damping', 'disp_damping']:
psiopt = 'EFP_' + opt.upper()
if core.has_option_changed('EFP', psiopt):
efpopts[opt] = core.get_option('EFP', psiopt)
efpopts['qm_elst'] = False
efpopts['qm_ind'] = False
efpobj.set_opts(efpopts, label='psi', append='psi')
do_gradient = core.get_option('EFP', 'DERTYPE') == 'FIRST'
# compute and report
efpobj.compute(do_gradient=do_gradient)
core.print_out(efpobj.energy_summary(label='psi'))
ene = efpobj.get_energy(label='psi')
core.set_variable('EFP ELST ENERGY', ene['electrostatic'] + ene['charge_penetration'] + ene['electrostatic_point_charges'])
core.set_variable('EFP IND ENERGY', ene['polarization'])
core.set_variable('EFP DISP ENERGY', ene['dispersion'])
core.set_variable('EFP EXCH ENERGY', ene['exchange_repulsion'])
core.set_variable('EFP TOTAL ENERGY', ene['total'])
core.set_variable('CURRENT ENERGY', ene['total'])
if do_gradient:
core.print_out(efpobj.gradient_summary())
torq = efpobj.get_gradient()
torq = core.Matrix.from_array(np.asarray(torq).reshape(-1, 6))
core.set_variable('EFP TORQUE', torq)
return ene['total']
|
ashutoshvt/psi4
|
psi4/driver/procrouting/proc.py
|
Python
|
lgpl-3.0
| 192,397
|
[
"Psi4"
] |
c3d52da0a6414aa684083766a6e5fd4ba18a314fe0a48586316c9f20cf1387c5
|
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from copy import deepcopy
from datetime import timedelta
import os
import random
import time
from typing import Dict, List, Optional, Set
from swh.core.config import merge_configs
from swh.counters import get_counters
from swh.indexer.ctags import CtagsIndexer
from swh.indexer.fossology_license import FossologyLicenseIndexer
from swh.indexer.mimetype import MimetypeIndexer
from swh.indexer.storage import get_indexer_storage
from swh.indexer.storage.model import OriginIntrinsicMetadataRow
from swh.loader.git.from_disk import GitLoaderFromArchive
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_hex
from swh.model.model import (
Content,
Directory,
Origin,
OriginVisit,
OriginVisitStatus,
Snapshot,
)
from swh.search import get_search
from swh.storage import get_storage
from swh.storage.algos.dir_iterators import dir_iterator
from swh.storage.algos.snapshot import snapshot_get_latest
from swh.storage.interface import Sha1
from swh.storage.utils import now
from swh.web import config
from swh.web.browse.utils import (
_re_encode_content,
get_mimetype_and_encoding_for_content,
prepare_content_for_display,
)
from swh.web.common import archive
# Module used to initialize data that will be provided as tests input
# Base content indexer configuration
_TEST_INDEXER_BASE_CONFIG = {
"storage": {"cls": "memory"},
"objstorage": {"cls": "memory", "args": {},},
"indexer_storage": {"cls": "memory", "args": {},},
}
def random_sha1():
return hash_to_hex(bytes(random.randint(0, 255) for _ in range(20)))
def random_sha256():
return hash_to_hex(bytes(random.randint(0, 255) for _ in range(32)))
def random_blake2s256():
return hash_to_hex(bytes(random.randint(0, 255) for _ in range(32)))
def random_content():
return {
"sha1": random_sha1(),
"sha1_git": random_sha1(),
"sha256": random_sha256(),
"blake2s256": random_blake2s256(),
}
_TEST_MIMETYPE_INDEXER_CONFIG = merge_configs(
_TEST_INDEXER_BASE_CONFIG,
{
"tools": {
"name": "file",
"version": "1:5.30-1+deb9u1",
"configuration": {"type": "library", "debian-package": "python3-magic"},
}
},
)
_TEST_LICENSE_INDEXER_CONFIG = merge_configs(
_TEST_INDEXER_BASE_CONFIG,
{
"workdir": "/tmp/swh/indexer.fossology.license",
"tools": {
"name": "nomos",
"version": "3.1.0rc2-31-ga2cbb8c",
"configuration": {"command_line": "nomossa <filepath>",},
},
},
)
_TEST_CTAGS_INDEXER_CONFIG = merge_configs(
_TEST_INDEXER_BASE_CONFIG,
{
"workdir": "/tmp/swh/indexer.ctags",
"languages": {"c": "c"},
"tools": {
"name": "universal-ctags",
"version": "~git7859817b",
"configuration": {
"command_line": """ctags --fields=+lnz --sort=no --links=no """
"""--output-format=json <filepath>"""
},
},
},
)
# Lightweight git repositories that will be loaded to generate
# input data for tests
_TEST_ORIGINS = [
{
"type": "git",
"url": "https://github.com/memononen/libtess2",
"archives": ["libtess2.zip"],
"metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": (
"Game and tools oriented refactored version of GLU tessellator."
),
},
},
{
"type": "git",
"url": "https://github.com/wcoder/highlightjs-line-numbers.js",
"archives": [
"highlightjs-line-numbers.js.zip",
"highlightjs-line-numbers.js_visit2.zip",
],
"metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "Line numbering plugin for Highlight.js",
},
},
{
"type": "git",
"url": "repo_with_submodules",
"archives": ["repo_with_submodules.tgz"],
"metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "This is just a sample repository with submodules",
},
},
]
_contents = {}
def _add_extra_contents(storage, contents):
pbm_image_data = b"""P1
# PBM example
24 7
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0
0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 1 1 0
0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0
0 1 0 0 0 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"""
# add file with mimetype image/x-portable-bitmap in the archive content
pbm_content = Content.from_data(pbm_image_data)
storage.content_add([pbm_content])
contents.add(pbm_content.sha1)
INDEXER_TOOL = {
"tool_name": "swh-web tests",
"tool_version": "1.0",
"tool_configuration": {},
}
ORIGIN_METADATA_KEY = "keywords"
ORIGIN_METADATA_VALUE = "git"
ORIGIN_MASTER_REVISION = {}
def _add_origin(
storage, search, counters, origin_url, visit_type="git", snapshot_branches={}
):
storage.origin_add([Origin(url=origin_url)])
search.origin_update(
[{"url": origin_url, "has_visits": True, "visit_types": [visit_type]}]
)
counters.add("origin", [origin_url])
date = now()
visit = OriginVisit(origin=origin_url, date=date, type=visit_type)
visit = storage.origin_visit_add([visit])[0]
counters.add("origin_visit", [f"{visit.unique_key()}"])
snapshot = Snapshot.from_dict({"branches": snapshot_branches})
storage.snapshot_add([snapshot])
counters.add("snapshot", [snapshot.id])
visit_status = OriginVisitStatus(
origin=origin_url,
visit=visit.visit,
date=date + timedelta(minutes=1),
type=visit.type,
status="full",
snapshot=snapshot.id,
)
storage.origin_visit_status_add([visit_status])
counters.add("origin_visit_status", [f"{visit_status.unique_key()}"])
# Tests data initialization
def _init_tests_data():
# To hold reference to the memory storage
storage = get_storage("memory")
# Create search instance
search = get_search("memory")
search.initialize()
search.origin_update({"url": origin["url"]} for origin in _TEST_ORIGINS)
# create the counters instance
counters = get_counters("memory")
# Create indexer storage instance that will be shared by indexers
idx_storage = get_indexer_storage("memory")
# Declare a test tool for origin intrinsic metadata tests
idx_tool = idx_storage.indexer_configuration_add([INDEXER_TOOL])[0]
INDEXER_TOOL["id"] = idx_tool["id"]
# Load git repositories from archives
for origin in _TEST_ORIGINS:
for i, archive_ in enumerate(origin["archives"]):
if i > 0:
# ensure visit dates will be different when simulating
# multiple visits of an origin
time.sleep(1)
origin_repo_archive = os.path.join(
os.path.dirname(__file__), "resources/repos/%s" % archive_
)
loader = GitLoaderFromArchive(
storage, origin["url"], archive_path=origin_repo_archive,
)
result = loader.load()
assert result["status"] == "eventful"
ori = storage.origin_get([origin["url"]])[0]
origin.update(ori.to_dict()) # add an 'id' key if enabled
search.origin_update(
[{"url": origin["url"], "has_visits": True, "visit_types": ["git"]}]
)
for i in range(250):
_add_origin(
storage,
search,
counters,
origin_url=f"https://many.origins/{i+1}",
visit_type="tar",
)
sha1s: Set[Sha1] = set()
directories = set()
revisions = set()
releases = set()
snapshots = set()
content_path = {}
# Get all objects loaded into the test archive
common_metadata = {ORIGIN_METADATA_KEY: ORIGIN_METADATA_VALUE}
for origin in _TEST_ORIGINS:
snp = snapshot_get_latest(storage, origin["url"])
snapshots.add(hash_to_hex(snp.id))
for branch_name, branch_data in snp.branches.items():
target_type = branch_data.target_type.value
if target_type == "revision":
revisions.add(branch_data.target)
if b"master" in branch_name:
# Add some origin intrinsic metadata for tests
metadata = common_metadata
metadata.update(origin.get("metadata", {}))
origin_metadata = OriginIntrinsicMetadataRow(
id=origin["url"],
from_revision=branch_data.target,
indexer_configuration_id=idx_tool["id"],
metadata=metadata,
mappings=[],
)
idx_storage.origin_intrinsic_metadata_add([origin_metadata])
search.origin_update(
[{"url": origin["url"], "intrinsic_metadata": metadata}]
)
ORIGIN_MASTER_REVISION[origin["url"]] = hash_to_hex(
branch_data.target
)
elif target_type == "release":
release = storage.release_get([branch_data.target])[0]
revisions.add(release.target)
releases.add(hash_to_hex(branch_data.target))
for rev_log in storage.revision_shortlog(set(revisions)):
rev_id = rev_log[0]
revisions.add(rev_id)
for rev in storage.revision_get(revisions):
if rev is None:
continue
dir_id = rev.directory
directories.add(hash_to_hex(dir_id))
for entry in dir_iterator(storage, dir_id):
if entry["type"] == "file":
sha1s.add(entry["sha1"])
content_path[entry["sha1"]] = "/".join(
[hash_to_hex(dir_id), entry["path"].decode("utf-8")]
)
elif entry["type"] == "dir":
directories.add(hash_to_hex(entry["target"]))
_add_extra_contents(storage, sha1s)
# Get all checksums for each content
result: List[Optional[Content]] = storage.content_get(list(sha1s))
contents: List[Dict] = []
for content in result:
assert content is not None
sha1 = hash_to_hex(content.sha1)
content_metadata = {
algo: hash_to_hex(getattr(content, algo)) for algo in DEFAULT_ALGORITHMS
}
path = ""
if content.sha1 in content_path:
path = content_path[content.sha1]
cnt_data = storage.content_get_data(content.sha1)
assert cnt_data is not None
mimetype, encoding = get_mimetype_and_encoding_for_content(cnt_data)
_, _, cnt_data = _re_encode_content(mimetype, encoding, cnt_data)
content_display_data = prepare_content_for_display(cnt_data, mimetype, path)
content_metadata.update(
{
"path": path,
"mimetype": mimetype,
"encoding": encoding,
"hljs_language": content_display_data["language"],
"data": content_display_data["content_data"],
}
)
_contents[sha1] = content_metadata
contents.append(content_metadata)
# Add the empty directory to the test archive
storage.directory_add([Directory(entries=())])
# Add empty content to the test archive
storage.content_add([Content.from_data(data=b"")])
# Add fake git origin with pull request branches
_add_origin(
storage,
search,
counters,
origin_url="https://git.example.org/project",
snapshot_branches={
b"refs/heads/master": {
"target_type": "revision",
"target": next(iter(revisions)),
},
**{
f"refs/pull/{i}".encode(): {
"target_type": "revision",
"target": next(iter(revisions)),
}
for i in range(300)
},
},
)
counters.add("revision", revisions)
counters.add("release", releases)
counters.add("directory", directories)
counters.add("content", [content["sha1"] for content in contents])
# Return tests data
return {
"search": search,
"storage": storage,
"idx_storage": idx_storage,
"counters": counters,
"origins": _TEST_ORIGINS,
"contents": contents,
"directories": list(directories),
"releases": list(releases),
"revisions": list(map(hash_to_hex, revisions)),
"snapshots": list(snapshots),
"generated_checksums": set(),
}
def _init_indexers(tests_data):
# Instantiate content indexers that will be used in tests
# and force them to use the memory storages
indexers = {}
for idx_name, idx_class, idx_config in (
("mimetype_indexer", MimetypeIndexer, _TEST_MIMETYPE_INDEXER_CONFIG),
("license_indexer", FossologyLicenseIndexer, _TEST_LICENSE_INDEXER_CONFIG),
("ctags_indexer", CtagsIndexer, _TEST_CTAGS_INDEXER_CONFIG),
):
idx = idx_class(config=idx_config)
idx.storage = tests_data["storage"]
idx.objstorage = tests_data["storage"].objstorage
idx.idx_storage = tests_data["idx_storage"]
idx.register_tools(idx.config["tools"])
indexers[idx_name] = idx
return indexers
def get_content(content_sha1):
return _contents.get(content_sha1)
_tests_data = None
_current_tests_data = None
_indexer_loggers = {}
def get_tests_data(reset=False):
"""
Initialize tests data and return them in a dict.
"""
global _tests_data, _current_tests_data
if _tests_data is None:
_tests_data = _init_tests_data()
indexers = _init_indexers(_tests_data)
for (name, idx) in indexers.items():
# pytest makes the loggers use a temporary file; and deepcopy
# requires serializability. So we remove them, and add them
# back after the copy.
_indexer_loggers[name] = idx.log
del idx.log
_tests_data.update(indexers)
if reset or _current_tests_data is None:
_current_tests_data = deepcopy(_tests_data)
for (name, logger) in _indexer_loggers.items():
_current_tests_data[name].log = logger
return _current_tests_data
def override_storages(storage, idx_storage, search, counters):
"""
Helper function to replace the storages from which archive data
are fetched.
"""
swh_config = config.get_config()
swh_config.update(
{
"storage": storage,
"indexer_storage": idx_storage,
"search": search,
"counters": counters,
}
)
archive.storage = storage
archive.idx_storage = idx_storage
archive.search = search
archive.counters = counters
|
SoftwareHeritage/swh-web-ui
|
swh/web/tests/data.py
|
Python
|
agpl-3.0
| 15,484
|
[
"VisIt"
] |
519c744724118458218e176a6add46bfdd3034a4aa31110291cd937a3bdc53d5
|
#
# Copyright (C) 2015 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
from numpy import linalg
def GetBestFitPlane(pts, weights=None):
if weights is None:
wSum = len(pts)
origin = np.sum(pts, 0)
origin /= wSum
sumXX = 0
sumXY = 0
sumXZ = 0
sumYY = 0
sumYZ = 0
sumZZ = 0
sums = np.zeros((3, 3), np.double)
for pt in pts:
dp = pt - origin
for i in range(3):
sums[i, i] += dp[i] * dp[i]
for j in range(i + 1, 3):
sums[i, j] += dp[i] * dp[j]
sums[j, i] += dp[i] * dp[j]
sums /= wSum
vals, vects = linalg.eigh(sums)
order = np.argsort(vals)
normal = vects[:, order[0]]
plane = np.zeros((4, ), np.double)
plane[:3] = normal
plane[3] = -1 * normal.dot(origin)
return plane
def PBFRD(mol, confId=-1):
conf = mol.GetConformer(confId)
if not conf.Is3D():
return 0
pts = np.array([list(conf.GetAtomPosition(x)) for x in range(mol.GetNumAtoms())])
plane = GetBestFitPlane(pts)
denom = np.dot(plane[:3], plane[:3])
denom = denom**0.5
# add up the distance from the plane for each point:
res = 0.0
for pt in pts:
res += np.abs(pt.dot(plane[:3]) + plane[3])
res /= denom
res /= len(pts)
return res
if __name__ == '__main__':
suppl = Chem.SDMolSupplier('./testData/egfr.sdf', removeHs=False)
expected = open('./testData/egfr.out', 'r')
for m in suppl:
res = PBFRD(m)
inl = next(expected).strip().split()
expect = float(inl[1])
assert abs(res - expect) < 1e-4
|
rvianello/rdkit
|
Contrib/PBF/pbf.py
|
Python
|
bsd-3-clause
| 1,745
|
[
"RDKit"
] |
1f598facc77cd13cf0c1b1d2c5dfdd636af05ea574354be6a0a68fac2f527d01
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
'''
Created on Jan 18, 2010
@author: tw55413
'''
from progress_page import ProgressPage
class UpdateEntitiesPage(ProgressPage):
"""A progress page that updates each entity in a collection,
then flushes the entity, and informs all views that the entity
has been updated. Subclass this page and implement update_entity
to make this page do something.
"""
def __init__(self, collection_getter, parent):
super(UpdateEntitiesPage, self).__init__( parent )
self._wizard = parent
self._collection_getter = collection_getter
def update_entity(self, entity):
"""Implement this method to update the entities in the
collection.
:param entity: the entity that should be updated
:return: None or a string that will be displayed in the progress
screen.
"""
pass
def run(self):
from sqlalchemy.orm.session import Session
from camelot.view.remote_signals import get_signal_handler
signal_handler = get_signal_handler()
collection = list(self._collection_getter())
self.update_maximum_signal.emit( len(collection) )
for i, entity in enumerate(collection):
message = self.update_entity(entity)
Session.object_session( entity ).flush( [entity] )
signal_handler.sendEntityUpdate( self, entity )
self.update_progress_signal.emit( i, message or '')
|
kurtraschke/camelot
|
camelot/view/wizard/pages/update_entities_page.py
|
Python
|
gpl-2.0
| 2,533
|
[
"VisIt"
] |
e4848e0ba2b1c6dce88229508bb119761a5f4387baa3828e6d6e5e2def5a05e2
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
#! /usr/bin/env python
#from decorator import decorator
#@decorator
# def count_calls(method, *args, **kwargs):
# """ Adds call counting to a method. """
# result = method(*args, **kwargs)
# args[0]._nbcalls += 1
# return result
def getx_from_specie(specie):
result = []
for nlep_params in specie.U:
# print "nlep_param = ", nlep_params
if nlep_params["func"] == "nlep":
if nlep_params["fitU"]:
result.append(nlep_params["U"])
elif nlep_params["func"] == "enlep":
if (nlep_params["fitU0"]):
result.append(nlep_params["U0"]) # first energy
if (nlep_params["fitU1"]):
result.append(nlep_params["U1"]) # second energy
return result
def get_range_from_specie(specie):
result = []
for nlep_params in specie.U:
# print "nlep_param = ", nlep_params
if nlep_params["func"] == "nlep":
if nlep_params["fitU"]:
result.append(nlep_params["U_range"])
elif nlep_params["func"] == "enlep":
if (nlep_params["fitU0"]):
result.append(nlep_params["U0_range"]) # first energy range
if (nlep_params["fitU1"]):
result.append(nlep_params["U1_range"]) # second energy range
return result
def set_nlep_fromx(args, i, specie):
for nlep_params in specie.U:
if nlep_params["func"] == "nlep":
if nlep_params["fitU0"]:
assert args.shape[0] > i, RuntimeError("%i > %i\n" % (args.shape[0], i))
nlep_params["U"] = args[i]
i += 1
elif nlep_params["func"] == "enlep":
if nlep_params["fitU0"]:
assert args.shape[0] > i, RuntimeError("%i > %i\n" % (args.shape[0], i))
nlep_params["U0"] = args[i] # first energy
i += 1
if nlep_params["fitU1"]:
assert args.shape[0] > i, RuntimeError("%i > %i\n" % (args.shape[0], i))
nlep_params["U1"] = args[i] # second energy
i += 1
return i
class Objective(object):
""" Objective function to optimize.
The vasp object is the one to make actual VASP calls and should be set up
prior to minimization.
"""
def __init__(self, vasp, dft, gw, outdir="nlep_fit", comm=None, units=None):
from os import makedirs
from os.path import exists
from shutil import rmtree
from boost.mpi import world
from pylada.crystal import Structure
self.gw = gw
self.dft = dft
self.gw.comm = comm
self.dft.comm = comm
# since comm has changed, makes sure there are no issues with caching
self.gw.uncache()
self.dft.uncache()
self.vasp = vasp
self.system = Structure(dft.structure)
self._nbcalls = 0
self.outdir = outdir
self.comm = comm if comm != None else world
self.units = units if units != None else 1e0
self.use_syscall = True
if self.comm.rank == 0 and exists(self.outdir):
rmtree(self.outdir)
makedirs(self.outdir)
self.comm.barrier()
def _get_x0(self):
""" Returns vector of parameters from L{vasp} attribute. """
from numpy import array
result = []
for symbol, specie in self.vasp.species.items():
result += getx_from_specie(specie)
return array(result, dtype="float64") * self.units
def _set_x0(self, args):
""" Sets L{vasp} attribute from input vector. """
i = 0
args = args.copy() / self.units
for symbol, specie in self.vasp.species.items():
i = set_nlep_fromx(args, i, specie)
x = property(_get_x0, _set_x0)
""" Vector of parameters. """
def syscall_vasp(self, this_outdir):
import os
from pylada.vasp import Extract
cwd = os.getcwd()
os.chdir(this_outdir)
# print "NOW calling vasp from: ", os.getcwd()
os.system("vasp > stdout")
out = Extract(outcar="")
os.chdir(cwd)
return out
#@count_calls
def __call__(self, args):
import os
from boost.mpi import world
from pylada.vasp import files
# transfers parameters to vasp object
self.x = args
# performs calculation in new directory
# this_outdir = join(self.outdir, str(self._nbcalls)),
this_outdir = "%s/%d" % (self.outdir, self._nbcalls)
this_outdir = os.path.abspath(this_outdir)
if (not os.path.isdir(this_outdir)):
os.mkdir(this_outdir)
print("rank %d calling vasp in dir %s" % (world.rank, this_outdir))
if self.use_syscall:
out = self.vasp\
(self.system,
outdir=this_outdir,
comm=self.comm,
repat=files.minimal + files.input,
norun=True)
out = self.syscall_vasp(this_outdir)
else:
out = self.vasp\
(self.system,
outdir=this_outdir,
comm=self.comm,
repat=files.minimal + files.input)
# assert out.success,\
# RuntimeError\
# (
# "VASP calculation in %s_%i did not complete."\
# % (self.outdir, self._nbcalls)
# )
self._nbcalls += 1
# return raw values for subsequent processing
eigs = out.eigenvalues
pc = out.partial_charges
pressure = out.pressure
occs = out.occupations
return eigs, pc, pressure, occs
|
pylada/pylada-light
|
src/pylada/vasp/nlep/nlep.py
|
Python
|
gpl-3.0
| 6,815
|
[
"CRYSTAL",
"VASP"
] |
fe90e31afdead9a5cbf8664557a7dff883ac9b0e7b1551309668c63ee8e7bb55
|
from p5 import *
def setup():
size(640, 360)
background(255)
def draw():
# Get a gaussian number w/ mean of 0 and standard deviation of 1.0
xloc = randomGaussian()
sd = 60
mean = width/2
xloc = (xloc * sd) + mean
noStroke()
fill(0, 10)
ellipse(xloc, height/2, 16, 16)
run()
|
croach/natureofcode.py
|
introduction/noc_i_4_gaussian.py
|
Python
|
mit
| 320
|
[
"Gaussian"
] |
e7aa54cdc52c0c3332f10e75dd9ce8cc4d1694104dd54e0c665c866bb6cc2795
|
import numpy as np
import histomicstk.utils as htk_utls
def reg_edge(im_input, im_phi, well='double', sigma=1.5, dt=1.0, mu=0.2,
lamda=1, alpha=-3, epsilon=1.5, iter=100):
"""Distance-regularized edge-based level sets.
Distance-regularization is used in this edge-based level set implementation
to avoid numerical problems requiring costly re-initialization. Provides
cost terms for boundary length, area, and regularization of the level set
function. Foreground objects are assumed to have larger intensity values
than background.
Parameters
----------
im_input : array_like
A floating-point intensity image.
im_phi : array_like
A floating-point initalization of the level-set image. Interior values
are set to -c0, and exterior values set to c0, where c0 > 0.
well : string
Choice of well function for regularization. Can be set to either
'single' or 'double' for single-well or double-well regularization, or
any other value for no regularization. Default value = 'double'.
sigma : double
Standard deviation of smoothing filter for input image im_input.
dt : double
Time step for evolving im_phi. Default value = 1.0.
mu : double
Regularization weight for energy function. Default value = 0.2.
lamda : double
Boundary length weight for energy function. Default value = 1.0.
alpha : double
Area weight for energy function. A negative value is used to seed the
interior of the foreground objects and then evolve the boundary
outwards. A positive value assumes that the boundary begins outside the
foreground objects and collapses to their high-gradient edges.
Default value = -3.
epsilon: double
Coefficient used to smooth the Dirac and Heaviside functions. Default
value = 1.5.
iter: double
Number of iterations to evolve curve level set function over. Default
value = 100.
Returns
-------
im_phi : array_like
An intensity image where the zero level set defines object boundaries.
Can be further processed with fast marching methods or other to obtain
smooth boundaries, or simply thresholded to define the object mask.
See Also
--------
histomicstk.segmentation.nuclear.gaussian_voting
References
----------
.. [#] C. Li, C. Xu, C. Gui, M.D. Fox, "Distance Regularized Level Set
Evolution and Its Application to Image Segmentation," in IEEE
Transactions on Image Processing, vol.19,no.12,pp.3243-54, 2010.
"""
import scipy.ndimage.filters as filters
# smoothed gradient of input image
sI = filters.gaussian_filter(im_input, sigma, mode='constant', cval=0)
dsI = np.gradient(sI)
G = 1/(1 + dsI[0]**2 + dsI[1]**2)
dG = np.gradient(G)
# perform regularized level-set evolutions with time step dt
for i in range(0, iter):
# fix boundary conditions
im_phi = neumann_bounds(im_phi)
# calculate gradient of level set image
dPhi = np.gradient(im_phi)
mPhi = (dPhi[0]**2 + dPhi[1]**2)**0.5 # gradient magnitude
Curve = np.gradient(dPhi[0] / (mPhi + 1e-10))[0] + \
np.gradient(dPhi[1] / (mPhi + 1e-10))[1] # divergence
# build regularization function
if well == 'single':
Reg = single_well(im_phi, Curve)
elif well == 'double':
Reg = double_well(im_phi, dPhi, mPhi, Curve, i)
else:
Reg = np.zeros(im_phi.shape)
# area and boundary-length energy function terms
iPhi = impulse(im_phi, epsilon)
Area = iPhi * G
Edge = iPhi * (dG[0] * (dPhi[0] / (mPhi + 1e-10)) +
dG[1] * (dPhi[1] / (mPhi + 1e-10))) + iPhi * G * Curve
# evolve level-set function
im_phi = im_phi + dt * (mu * Reg + lamda * Edge + alpha * Area)
# return evolved level-set function following iterations
return im_phi
def initialize(Mask, c0=2):
# initialize scaled binary-step image
Phi0 = np.zeros(Mask.shape)
Phi0[Mask > 0] = -c0
Phi0[Mask == 0] = c0
return Phi0
def single_well(Phi, Curve):
# Single-well potential function
return 4 * htk_utls.del2(Phi)-Curve
def double_well(Phi, dPhi, mPhi, Curve, i):
# Double-well potential function
SmallMask = (mPhi <= 1) & (mPhi >= 0)
LargeMask = (mPhi > 1)
P = SmallMask * np.sin(2 * np.pi * mPhi) / \
(2 * np.pi) + LargeMask * (mPhi - 1)
dP = ((P != 0) * P + (P == 0)) / ((mPhi != 0) * mPhi + (mPhi == 0))
Well = np.gradient(dP * dPhi[0] - dPhi[0])[0] + \
np.gradient(dP * dPhi[1] - dPhi[1])[1] + 4 * htk_utls.del2(Phi)
return Well
def impulse(X, Epsilon):
# Smooth dirac delta function.
# calculate smoothed impulse everywhere
Xout = (1 + np.cos(np.pi * X / Epsilon)) / (2 * Epsilon)
# zero out values |x| > Epsilon
Xout[np.absolute(X) > Epsilon] = 0
return Xout
def neumann_bounds(Phi):
# Transofrm input to enforce Neumann boundary conditions.
# copy input
PhiOut = Phi
# capture image size
m = Phi.shape[0]
n = Phi.shape[1]
# deal with corners
PhiOut[0, 0] = PhiOut[2, 2]
PhiOut[0, n-1] = PhiOut[0, -3]
PhiOut[m-1, 0] = PhiOut[-3, 2]
PhiOut[m-1, n-1] = PhiOut[-3, -3]
# deal with edges
PhiOut[0, 1:-1] = PhiOut[2, 1:-1]
PhiOut[m-1, 1:-1] = PhiOut[m-3, 1:-1]
PhiOut[1:-1, 0] = PhiOut[1:-1, 2]
PhiOut[1:-1, n-1] = PhiOut[1:-1, n-3]
return PhiOut
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/segmentation/level_set/reg_edge.py
|
Python
|
apache-2.0
| 5,602
|
[
"DIRAC"
] |
d89b64feeccf75b89b0bf02afa11be4d5a3d6c44282eed87f18a20296b2c8cf9
|
# coding: utf-8
"""Parser for KumaScript used in compatibility data.
KumaScript is a macro system used on MDN:
https://github.com/mozilla/kumascript
KumaScript uses a JS-like syntax. The source is stored as pages on MDN:
https://developer.mozilla.org/en-US/docs/Template:SpecName
KumaScript can query the database, do math, and generate text using all the
power of JavaScript. It's slow, so it is rendered server-side and cached.
The unrendered version of a page can be accessed by asking for the raw version:
https://developer.mozilla.org/en-US/docs/Web/CSS/display
https://developer.mozilla.org/en-US/docs/Web/CSS/display?raw
The MDN importer needs to recognize KumaScript templates in the raw page, and:
1. For valid KumaScript, extract data and/or render HTML
2. For invalid KumaScript, generate an error
3. For unknown KumaScript, generate a different error
The Compat API will not support KumaScript.
"""
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six import text_type
from django.utils.text import get_text_list
from parsimonious.grammar import Grammar
from parsimonious.nodes import Node
from .data import Data
from .html import HTMLInterval, HTMLText, HTMLVisitor, html_grammar_source
from .utils import format_version
kumascript_grammar_source = html_grammar_source + r"""
#
# KumaScript tokens
#
kumascript = ks_esc_start ks_name ks_arglist? ks_esc_end
ks_esc_start = "{{" _
ks_name = ~r"(?P<content>[^\(\}\s]*)\s*"s
ks_arglist = ks_func_start ks_arg ks_arg_rest* ks_func_end
ks_func_start = "(" _
ks_func_arg = _ "," _
ks_func_end = _ ")" _
ks_esc_end = "}}" _
ks_arg = (double_quoted_text / single_quoted_text / ks_bare_arg)
ks_bare_arg = ~r"(?P<content>.*?(?=[,)]))"
ks_arg_rest = ks_func_arg ks_arg
#
# WhyNoSpec block
whynospec = _ whynospec_start whynospec_content whynospec_end
whynospec_start = ks_esc_start ~r"WhyNoSpecStart"s _ ks_esc_end _
whynospec_content = ~r".*?(?={{\s*WhyNoSpecEnd)"s
whynospec_end = ks_esc_start ~r"WhyNoSpecEnd"s _ ks_esc_end _
#
# Add KumaScript to text
#
text_token = whynospec / kumascript / text_item
text_item = ~r"(?P<content>(?:[^{<]|{(?!{))+)"s
"""
kumascript_grammar = Grammar(kumascript_grammar_source)
SCOPES = set((
'specification name',
'specification maturity',
'specification description',
'compatibility feature',
'compatibility support',
'footnote',
))
MDN_DOMAIN = 'https://developer.mozilla.org'
MDN_DOCS = MDN_DOMAIN + '/en-US/docs'
@python_2_unicode_compatible
class KumaScript(HTMLText):
"""A KumaScript macro."""
def __init__(self, args=None, scope=None, **kwargs):
"""Initialize components of a KumaScript macro."""
super(KumaScript, self).__init__(**kwargs)
self.args = args or []
self.scope = scope or '(unknown scope)'
def arg(self, pos):
"""Return argument, or None if not enough arguments."""
try:
return self.args[pos]
except IndexError:
return None
def __str__(self):
"""Create the programmer debug string."""
args = []
for arg in self.args:
if '"' in arg:
quote = "'"
else:
quote = '"'
args.append('{0}{1}{0}'.format(quote, arg))
if args:
argtext = '(' + ', '.join(args) + ')'
else:
argtext = ''
name = getattr(self, 'name', 'KumaScript')
return '{{{{{}{}}}}}'.format(name, argtext)
def to_html(self):
"""Convert to HTML. Default is an empty string."""
return ''
def _make_issue(self, issue_slug, **extra_kwargs):
"""Create an importer issue with standard KumaScript parameters."""
assert self.scope
kwargs = {'name': self.name, 'args': self.args, 'scope': self.scope,
'kumascript': str(self)}
kwargs.update(extra_kwargs)
return (issue_slug, self.start, self.end, kwargs)
class UnknownKumaScript(KumaScript):
"""An unknown KumaScript macro."""
def __init__(self, name, **kwargs):
"""Initialize name of an unknown KumaScript macro."""
super(UnknownKumaScript, self).__init__(**kwargs)
self.name = name
@property
def known(self):
return False
@property
def issues(self):
"""Return the list of issues with this KumaScript in this scope."""
return super(UnknownKumaScript, self).issues + [
self._make_issue('unknown_kumascript')]
class KnownKumaScript(KumaScript):
"""Base class for known KumaScript macros."""
min_args = 0
max_args = 0
arg_names = []
expected_scopes = SCOPES
def __init__(self, args=None, scope=None, **kwargs):
"""Validate arg count of a known KumaScript macro."""
super(KnownKumaScript, self).__init__(**kwargs)
self.args = args or []
self.scope = scope or '(unknown scope)'
assert self.max_args >= self.min_args
assert len(self.arg_names) == self.max_args
@property
def known(self):
return True
@property
def name(self):
return getattr(self, 'canonical_name', self.__class__.__name__)
def _validate(self):
"""Return validation issues or empty list."""
issues = []
count = len(self.args)
if count < self.min_args or count > self.max_args:
extra = {
'max': self.max_args, 'min': self.min_args, 'count': count,
'arg_names': self.arg_names}
if self.max_args == 0:
arg_spec = 'no arguments'
else:
if self.max_args == self.min_args:
arg_range = 'exactly {0} argument{1}'.format(
self.max_args, '' if self.max_args == 1 else 's')
else:
arg_range = 'between {0} and {1} arguments'.format(
self.min_args, self.max_args)
names = []
for pos, name in enumerate(self.arg_names):
if pos > self.min_args:
names.append('[{}]'.format(name))
else:
names.append(name)
arg_spec = '{} ({})'.format(arg_range, ', '.join(names))
extra['arg_spec'] = arg_spec
if count == 1:
extra['arg_count'] = '1 argument'
else:
extra['arg_count'] = '{0} arguments'.format(count)
issues.append(self._make_issue('kumascript_wrong_args', **extra))
assert not (self.expected_scopes - SCOPES)
if self.scope not in self.expected_scopes:
expected = get_text_list(sorted(self.expected_scopes))
issues.append(self._make_issue(
'unexpected_kumascript', expected_scopes=expected))
return issues
@property
def issues(self):
return super(KumaScript, self).issues + self._validate()
class Bug(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:Bug
min_args = max_args = 1
arg_names = ['number']
canonical_name = 'bug'
expected_scopes = set(('footnote',))
def __init__(self, **kwargs):
"""
Initialize Bug.
{{bug}} macro takes 3 arguments, but only the 1-argument version is
supported.
"""
super(Bug, self).__init__(**kwargs)
self.number = self.arg(0)
def to_html(self):
return (
'<a href="https://bugzilla.mozilla.org/show_bug.cgi?id={number}">'
'bug {number}</a>').format(number=self.number)
class CompatKumaScript(KnownKumaScript):
"""Base class for KumaScript specifying a browser version."""
min_args = max_args = 1
expected_scopes = set(('compatibility support', ))
def to_html(self):
return self.version
class CompatBasicKumaScript(CompatKumaScript):
"""Base class for KumaScript specifying the actual browser version."""
def __init__(self, **kwargs):
super(CompatBasicKumaScript, self).__init__(**kwargs)
self.version = format_version(self.arg(0))
class CompatAndroid(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatAndroid
arg_names = ['AndroidVersion']
class CompatChrome(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatChrome
arg_names = ['ChromeVer']
class CompatGeckoDesktop(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoDesktop
arg_names = ['GeckoVersion']
geckoversion_to_version = {
'1': '1.0',
'1.0': '1.0',
'1.7 or earlier': '1.0',
'1.7': '1.0',
'1.8': '1.5',
'1.8.1': '2.0',
'1.9': '3.0',
'1.9.1': '3.5',
'1.9.1.4': '3.5.4',
'1.9.2': '3.6',
'1.9.2.4': '3.6.4',
'1.9.2.5': '3.6.5',
'1.9.2.9': '3.6.9',
'2': '4.0',
'2.0': '4.0',
}
def __init__(self, **kwargs):
super(CompatGeckoDesktop, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
@property
def version(self):
try:
return self.geckoversion_to_version[self.gecko_version]
except KeyError:
try:
nversion = float(self.gecko_version)
except ValueError:
return None
if nversion >= 5:
return '{:1.1f}'.format(nversion)
else:
return None
@property
def issues(self):
issues = super(CompatGeckoDesktop, self).issues
if self.version is None:
issues.append(
('compatgeckodesktop_unknown', self.start, self.end,
{'version': self.gecko_version}))
return issues
class CompatGeckoFxOS(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoFxOS
max_args = 2
arg_names = ['GeckoVersion', 'VersionOverride']
def __init__(self, **kwargs):
super(CompatGeckoFxOS, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
over = self.arg(1)
self.override = self.arg(1)
# TODO: Replace with KumaScript logic
try:
nversion = float(self.gecko_version)
except ValueError:
nversion = -1
over = self.override
self.bad_version = False
self.bad_override = False
if (0 <= nversion < 19) and over in (None, '1.0'):
self.version = '1.0'
elif (0 <= nversion < 21) and over == '1.0.1':
self.version = '1.0.1'
elif (0 <= nversion < 24) and over in ('1.1', '1.1.0', '1.1.1'):
self.version = '1.1'
elif (19 <= nversion < 27) and over in (None, '1.2'):
self.version = '1.2'
elif (27 <= nversion < 29) and over in (None, '1.3'):
self.version = '1.3'
elif (29 <= nversion < 31) and over in (None, '1.4'):
self.version = '1.4'
elif (31 <= nversion < 33) and over in (None, '2.0'):
self.version = '2.0'
elif (33 <= nversion < 35) and over in (None, '2.1'):
self.version = '2.1'
elif (35 <= nversion < 38) and over in (None, '2.2'):
self.version = '2.2'
elif (nversion < 0 or nversion >= 38):
self.version = over
self.bad_version = True
else:
self.version = over
self.bad_override = True
self.version = over
@property
def issues(self):
issues = super(CompatGeckoFxOS, self).issues
if self.bad_version:
issues.append(
('compatgeckofxos_unknown', self.start, self.end,
{'version': self.gecko_version}))
if self.bad_override:
issues.append(
('compatgeckofxos_override', self.start, self.end,
{'override': self.override, 'version': self.gecko_version}))
return issues
class CompatGeckoMobile(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoMobile
arg_names = ['GeckoVersion']
def __init__(self, **kwargs):
super(CompatGeckoMobile, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
@property
def version(self):
nversion = self.gecko_version.split('.', 1)[0]
if nversion == '2':
return '4.0'
else:
return '{}.0'.format(nversion)
class CompatIE(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatIE
arg_names = ['IEver']
class CompatNightly(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatNightly
max_args = 1
arg_names = ['browser']
expected_scopes = set(('compatibility support',))
class CompatNo(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatNo
expected_scopes = set(('compatibility support',))
class CompatOpera(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatOpera
arg_names = ['OperaVer']
class CompatOperaMobile(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatOperaMobile
arg_names = ['OperaVer']
class CompatSafari(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatSafari
arg_names = ['SafariVer']
class CompatUnknown(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatUnknown
expected_scopes = set(('compatibility support',))
class CompatVersionUnknown(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatVersionUnknown
expected_scopes = set(('compatibility support',))
class CompatibilityTable(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatibilityTable
expected_scopes = set()
class KumaHTMLElement(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:HTMLElement
min_args = max_args = 1
arg_names = ['ElementName']
canonical_name = 'HTMLElement'
expected_scopes = set((
'compatibility feature', 'compatibility support', 'footnote',
'specification description'))
def __init__(self, **kwargs):
super(KumaHTMLElement, self).__init__(**kwargs)
self.element_name = self.arg(0)
def to_html(self):
if ' ' in self.element_name:
fmt = '<code>{}</code>'
else:
fmt = '<code><{}></code>'
return fmt.format(self.element_name)
class SpecKumaScript(KnownKumaScript):
"""Base class for Spec2 and SpecName."""
def __init__(self, data=None, **kwargs):
super(SpecKumaScript, self).__init__(**kwargs)
self.mdn_key = self.arg(0)
self.spec = None
self.data = data or Data()
if self.mdn_key:
self.spec = self.data.lookup_specification(self.mdn_key)
def to_html(self):
if self.spec:
name = self.spec.name['en']
else:
name = self.mdn_key or '(None)'
return 'specification {}'.format(name)
class Spec2(SpecKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:Spec2
min_args = max_args = 1
arg_names = ['SpecKey']
expected_scopes = set(('specification maturity',))
def _validate(self):
issues = super(Spec2, self)._validate()
if self.mdn_key and not self.spec:
issues.append(
('unknown_spec', self.start, self.end, {'key': self.mdn_key}))
return issues
class SpecName(SpecKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:SpecName
min_args = 1
max_args = 3
arg_names = ['SpecKey', 'Anchor', 'AnchorName']
expected_scopes = set(('specification name', 'specification description'))
def __init__(self, **kwargs):
super(SpecName, self).__init__(**kwargs)
self.subpath = self.arg(1)
self.section_name = self.arg(2)
if self.spec:
self.section_id = self.data.lookup_section_id(
self.spec.id, self.subpath)
else:
self.section_id = None
def _validate(self):
issues = super(SpecName, self)._validate()
if self.mdn_key and not self.spec:
issues.append(
('unknown_spec', self.start, self.end, {'key': self.mdn_key}))
if not self.mdn_key and len(self.args):
issues.append(self._make_issue('specname_blank_key'))
return issues
class CSSBox(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:cssbox
min_args = max_args = 1
arg_names = ['PropertyName']
canonical_name = 'cssbox'
expected_scopes = set()
class XRefBase(KnownKumaScript):
"""Base class for cross-reference KumaScript."""
expected_scopes = set((
'compatibility feature', 'specification description', 'footnote'))
def __init__(self, **kwargs):
super(XRefBase, self).__init__(**kwargs)
self.url = None
self.display = None
self.linked = self.scope in ('specification description', 'footnote')
def to_html(self):
"""Convert macro to link or plain text."""
assert self.display
if self.linked:
assert self.url
return '<a href="{}"><code>{}</code></a>'.format(
self.url, self.display)
else:
return '<code>{}</code>'.format(self.display)
class CSSxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:cssxref
min_args = 1
max_args = 3
arg_names = ['APIName', 'DisplayName', 'Anchor']
canonical_name = 'cssxref'
def __init__(self, **kwargs):
super(CSSxRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
self.anchor = self.arg(2)
self.construct_crossref(
self.api_name, self.display_name, self.anchor)
def construct_crossref(self, api_name, display_name, anchor=None):
self.url = '{}/Web/CSS/{}{}'.format(
MDN_DOCS, api_name, anchor or '')
self.display = display_name or api_name
class DeprecatedInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:deprecated_inline
canonical_name = 'deprecated_inline'
expected_scopes = set(('compatibility feature',))
class DOMEventXRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:domeventxref
min_args = max_args = 1
arg_names = ['api_name']
canonical_name = 'domeventxref'
def __init__(self, **kwargs):
"""Initialize DOMEventXRef.
Only implements the subset of domeventxref used on current pages.
"""
super(DOMEventXRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
assert '()' not in self.api_name
self.url = '{}/DOM/DOM_event_reference/{}'.format(
MDN_DOCS, self.api_name)
self.display = self.api_name
class DOMException(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:exception
min_args = max_args = 1
arg_names = ['exception_id']
canonical_name = 'exception'
def __init__(self, **kwargs):
super(DOMException, self).__init__(**kwargs)
self.exception_id = self.arg(0)
self.url = '{}/Web/API/DOMException#{}'.format(
MDN_DOCS, self.exception_id)
self.display = self.exception_id
class DOMxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:domxref
min_args = 1
max_args = 2
arg_names = ['DOMPath', 'DOMText']
canonical_name = 'domxref'
def __init__(self, **kwargs):
super(DOMxRef, self).__init__(**kwargs)
self.dom_path = self.arg(0)
self.dom_text = self.arg(1)
path = self.dom_path.replace(' ', '_').replace('()', '')
if '.' in path and '..' not in path:
path = path.replace('.', '/')
path = path[0].upper() + path[1:]
self.url = '{}/Web/API/{}'.format(MDN_DOCS, path)
self.display = self.dom_text or self.dom_path
class EmbedCompatTable(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:EmbedCompatTable
min_args = max_args = 1
arg_names = ['slug']
expected_scopes = set(('footnote',))
class Event(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:event
min_args = 1
max_args = 2
arg_names = ['api_name', 'display_name']
canonical_name = 'event'
def __init__(self, **kwargs):
super(Event, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
self.url = '{}/Web/Events/{}'.format(MDN_DOCS, self.api_name)
self.display = self.display_name or self.api_name
class ExperimentalInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:experimental_inline
canonical_name = 'experimental_inline'
expected_scopes = set(('compatibility feature',))
class GeckoRelease(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:geckoRelease
min_args = max_args = 1
arg_names = ['release']
canonical_name = 'geckoRelease'
expected_scopes = set(('footnote',))
early_versions = {
'1.8': ('Firefox 1.5', 'Thunderbird 1.5', 'SeaMonkey 1.0'),
'1.8.1': ('Firefox 2', 'Thunderbird 2', 'SeaMonkey 1.1'),
'1.9': ('Firefox 3',),
'1.9.1': ('Firefox 3.5', 'Thunderbird 3.0', 'SeaMonkey 2.0'),
'1.9.1.4': ('Firefox 3.5.4',),
'1.9.2': ('Firefox 3.6', 'Thunderbird 3.1', 'Fennec 1.0'),
'1.9.2.4': ('Firefox 3.6.4',),
'1.9.2.5': ('Firefox 3.6.5',),
'1.9.2.9': ('Firefox 3.6.9',),
'2.0b2': ('Firefox 4.0b2',),
'2.0b4': ('Firefox 4.0b4',),
'2': ('Firefox 4', 'Thunderbird 3.3', 'SeaMonkey 2.1'),
'2.0': ('Firefox 4', 'Thunderbird 3.3', 'SeaMonkey 2.1'),
'2.1': ('Firefox 4 Mobile',),
}
firefoxos_name = 'Firefox OS {}'
firefoxos_versions = {
'18.0': ('1.0.1', '1.1'),
'26.0': ('1.2',),
'28.0': ('1.3',),
'30.0': ('1.4',),
'32.0': ('2.0',),
}
release_names = (
'Firefox {rnum}', 'Thunderbird {rnum}', 'SeaMonkey 2.{snum}')
def __init__(self, **kwargs):
super(GeckoRelease, self).__init__(**kwargs)
raw_version = self.arg(0)
self.gecko_version = raw_version
self.and_higher = False
if raw_version.endswith('+'):
self.gecko_version = raw_version[:-1]
self.and_higher = True
if self.gecko_version in self.early_versions:
self.releases = self.early_versions[self.gecko_version]
else:
vnum = float(self.gecko_version)
assert vnum >= 5.0
rnum = '{:.1f}'.format(vnum)
snum = int(vnum) - 3
self.releases = [
name.format(rnum=rnum, snum=snum)
for name in self.release_names]
for fxosnum in self.firefoxos_versions.get(rnum, []):
self.releases.append(self.firefoxos_name.format(fxosnum))
def to_html(self):
plus = '+' if self.and_higher else ''
return '(' + ' / '.join([rel + plus for rel in self.releases]) + ')'
class HTMLAttrXRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:htmlattrxref
min_args = 1
max_args = 2
arg_names = ['attribute', 'element']
canonical_name = 'htmlattrxref'
def __init__(self, **kwargs):
super(HTMLAttrXRef, self).__init__(**kwargs)
self.attribute = self.arg(0)
self.element = self.arg(1)
self.text = self.arg(2)
if self.element:
self.url = '{}/Web/HTML/Element/{}'.format(MDN_DOCS, self.element)
else:
self.url = '{}/Web/HTML/Global_attributes'.format(MDN_DOCS)
self.url += '#attr-' + self.attribute.lower()
self.display = self.attribute.lower()
class JSxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:jsxref
min_args = 1
max_args = 2
arg_names = ['API name', 'display name']
canonical_name = 'jsxref'
def __init__(self, **kwargs):
"""
Initialize JSxRef.
{{jsxref}} macro can take 4 arguments, but only handling first two.
"""
super(JSxRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
path_name = self.api_name.replace('.prototype.', '/').replace('()', '')
if path_name.startswith('Global_Objects/'):
path_name = path_name.replace('Global_Objects/', '', 1)
if '.' in path_name and '...' not in path_name:
path_name = path_name.replace('.', '/')
self.url = '{}/Web/JavaScript/Reference/Global_Objects/{}'.format(
MDN_DOCS, path_name)
self.display = self.display_name or self.api_name
class NonStandardInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:non-standard_inline
canonical_name = 'non-standard_inline'
expected_scopes = set(('compatibility feature',))
class NotStandardInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:not_standard_inline
canonical_name = 'not_standard_inline'
expected_scopes = set(('compatibility feature',))
class ObsoleteInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:obsolete_inline
canonical_name = 'obsolete_inline'
expected_scopes = set(('compatibility feature',))
class PropertyPrefix(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:property_prefix
min_args = max_args = 1
arg_names = ['Prefix']
canonical_name = 'property_prefix'
expected_scopes = set(('compatibility support',))
def __init__(self, **kwargs):
super(PropertyPrefix, self).__init__(**kwargs)
self.prefix = self.arg(0)
class WebkitBug(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:WebkitBug
min_args = max_args = 1
arg_names = ['number']
expected_scopes = set(('footnote',))
def __init__(self, **kwargs):
super(WebkitBug, self).__init__(**kwargs)
self.number = self.arg(0)
def to_html(self):
return (
'<a href="https://bugs.webkit.org/show_bug.cgi?id={number}">'
'WebKit bug {number}</a>').format(number=self.number)
class WhyNoSpecBlock(HTMLInterval):
"""Psuedo-element for {{WhyNoSpecStart}}/{{WhyNoSpecEnd}} block.
Stand-alone {{WhyNoSpecStart}} and {{WhyNoSpecEnd}} elements will be
treated as unknown kumascript.
https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecStart
https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecEnd
"""
expected_scopes = set()
def __init__(self, scope=None, **kwargs):
super(WhyNoSpecBlock, self).__init__(**kwargs)
self.scope = scope
def to_html(self, drop_tag=None):
return ''
class XrefCSSBase(CSSxRef):
"""Base class for xref_cssXXX macros."""
min_args = max_args = 0
arg_names = []
def __init__(self, **kwargs):
super(XrefCSSBase, self).__init__(**kwargs)
self.construct_crossref(*self.xref_args)
class XrefCSSAngle(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssangle
canonical_name = 'xref_cssangle'
xref_args = ('angle', '<angle>')
class XrefCSSColorValue(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csscolorvalue
canonical_name = 'xref_csscolorvalue'
xref_args = ('color_value', '<color>')
class XrefCSSGradient(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssgradient
canonical_name = 'xref_cssgradient'
xref_args = ('gradient', '<gradient>')
class XrefCSSImage(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssimage
canonical_name = 'xref_cssimage'
xref_args = ('image', '<image>')
class XrefCSSInteger(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssinteger
canonical_name = 'xref_cssinteger'
xref_args = ('integer', '<integer>')
class XrefCSSLength(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csslength
canonical_name = 'xref_csslength'
xref_args = ('length', '<length>')
class XrefCSSNumber(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssnumber
canonical_name = 'xref_cssnumber'
xref_args = ('number', '<number>')
class XrefCSSPercentage(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csspercentage
canonical_name = 'xref_csspercentage'
xref_args = ('percentage', '<percentage>')
class XrefCSSString(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssstring
canonical_name = 'xref_cssstring'
xref_args = ('string', '<string>')
class XrefCSSVisual(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssvisual
canonical_name = 'xref_cssvisual'
xref_args = ('Media/Visual', '<visual>')
class BaseKumaVisitor(HTMLVisitor):
"""Extract HTML structure from a MDN Kuma raw fragment.
Extracts KumaScript, with special handling if it is known.
"""
scope = None
def __init__(self, **kwargs):
super(BaseKumaVisitor, self).__init__(**kwargs)
self._kumascript_proper_names = None
def _visit_multi_block(self, node, children):
"""Visit a 1-or-more block of tokens."""
assert children
tokens = self.flatten(children)
assert tokens
for token in tokens:
assert isinstance(token, HTMLInterval)
return tokens
def flatten(self, nested_list):
result = []
for item in nested_list:
if isinstance(item, list):
result.extend(self.flatten(item))
else:
result.append(item)
return result
def _visit_multi_token(self, node, children):
"""Visit a single HTMLInterval or list of HTMLIntervals."""
assert len(children) == 1
item = children[0]
if isinstance(item, HTMLInterval):
return item
else:
for subitem in item:
assert isinstance(subitem, HTMLInterval), subitem
if len(item) == 1:
return item[0]
else:
return item
visit_html_block = _visit_multi_block
visit_html_element = _visit_multi_token
visit_text_block = _visit_multi_block
visit_text_token = _visit_multi_token
known_kumascript = {
'Bug': Bug,
'CompatAndroid': CompatAndroid,
'CompatChrome': CompatChrome,
'CompatGeckoDesktop': CompatGeckoDesktop,
'CompatGeckoFxOS': CompatGeckoFxOS,
'CompatGeckoMobile': CompatGeckoMobile,
'CompatIE': CompatIE,
'CompatNightly': CompatNightly,
'CompatNo': CompatNo,
'CompatOpera': CompatOpera,
'CompatOperaMobile': CompatOperaMobile,
'CompatSafari': CompatSafari,
'CompatUnknown': CompatUnknown,
'CompatVersionUnknown': CompatVersionUnknown,
'CompatibilityTable': CompatibilityTable,
'EmbedCompatTable': EmbedCompatTable,
'HTMLElement': KumaHTMLElement,
'Spec2': Spec2,
'SpecName': SpecName,
'WebkitBug': WebkitBug,
'cssbox': CSSBox,
'cssxref': CSSxRef,
'deprecated_inline': DeprecatedInline,
'domeventxref': DOMEventXRef,
'domxref': DOMxRef,
'event': Event,
'exception': DOMException,
'experimental_inline': ExperimentalInline,
'geckoRelease': GeckoRelease,
'htmlattrxref': HTMLAttrXRef,
'jsxref': JSxRef,
'non-standard_inline': NonStandardInline,
'not_standard_inline': NotStandardInline,
'obsolete_inline': ObsoleteInline,
'property_prefix': PropertyPrefix,
'xref_cssangle': XrefCSSAngle,
'xref_csscolorvalue': XrefCSSColorValue,
'xref_cssgradient': XrefCSSGradient,
'xref_cssimage': XrefCSSImage,
'xref_cssinteger': XrefCSSInteger,
'xref_csslength': XrefCSSLength,
'xref_cssnumber': XrefCSSNumber,
'xref_csspercentage': XrefCSSPercentage,
'xref_cssstring': XrefCSSString,
'xref_cssvisual': XrefCSSVisual,
}
def _kumascript_lookup(self, name):
"""
Get the proper name and class for a KumaScript name.
MDN does case-insensitive matching of KumaScript names.
"""
if self._kumascript_proper_names is None:
self._kumascript_proper_names = {}
for k in self.known_kumascript.keys():
self._kumascript_proper_names[k.lower()] = k
proper_name = self._kumascript_proper_names.get(name.lower())
return self.known_kumascript.get(proper_name)
def visit_kumascript(self, node, children):
"""Process a KumaScript macro."""
esc0, name, arglist, esc1 = children
assert isinstance(name, text_type), type(name)
if isinstance(arglist, Node):
assert arglist.start == arglist.end
args = []
else:
assert isinstance(arglist, list), type(arglist)
assert len(arglist) == 1
args = arglist[0]
assert isinstance(args, list), type(args)
if args == ['']:
args = []
ks_cls = self._kumascript_lookup(name)
init_args = {'args': args, 'scope': self.scope}
if ks_cls is None:
ks_cls = UnknownKumaScript
init_args['name'] = name
if issubclass(ks_cls, SpecKumaScript):
init_args['data'] = self.data
return self.process(ks_cls, node, **init_args)
visit_ks_name = HTMLVisitor._visit_content
def visit_ks_arglist(self, node, children):
f0, arg0, argrest, f1 = children
args = [arg0]
if isinstance(argrest, Node):
# No additional args
assert argrest.start == argrest.end
else:
for _, arg in argrest:
args.append(arg)
# Force to strings
arglist = []
for arg in args:
if arg is None:
arglist.append('')
else:
arglist.append(text_type(arg))
return arglist
def visit_ks_arg(self, node, children):
assert isinstance(children, list)
assert len(children) == 1
item = children[0]
assert isinstance(item, text_type)
return item or None
visit_ks_bare_arg = HTMLVisitor._visit_content
def visit_whynospec(self, node, children):
return self.process(WhyNoSpecBlock, node, scope=self.scope)
class KumaVisitor(BaseKumaVisitor):
"""Extract HTML structure from a MDN Kuma raw fragment.
Include extra policy for scraping pages for the importer:
- Converts <span>content</span> to "content", with issues
- Validate and cleanup <a> tags
- Keeps <div id="foo">, for detecting compat divs
- Keeps <td colspan=# rowspan=#>, for detecting spanning compat cells
- Keeps <th colspan=#>, for detecting spanning compat headers
- Keeps <h2 id="id" name="name">, for warning on mismatch
- Raises issues on all other attributes
"""
_default_attribute_actions = {None: 'ban'}
def visit_a_open(self, node, children):
"""Validate and cleanup <a> open tags."""
actions = self._default_attribute_actions.copy()
actions['href'] = 'must'
actions['title'] = 'drop'
actions['class'] = 'keep'
converted = self._visit_open(node, children, actions)
# Convert relative links to absolute links
attrs = converted.attributes.attrs
if 'href' in attrs:
href = attrs['href'].value
if href and href[0] == '/':
attrs['href'].value = MDN_DOMAIN + href
# Drop class attribute, warning if unexpected
if 'class' in attrs:
class_attr = attrs.pop('class')
for value in class_attr.value.split():
if value in ('external', 'external-icon'):
pass
else:
self.add_issue(
'unexpected_attribute', class_attr, node_type='a',
ident='class', value=value,
expected='the attribute href')
return converted
def visit_div_open(self, node, children):
"""Retain id attribute of <div> tags."""
actions = self._default_attribute_actions.copy()
actions['id'] = 'keep'
return self._visit_open(node, children, actions)
def visit_td_open(self, node, children):
"""Retain colspan and rowspan attributes of <td> tags."""
actions = self._default_attribute_actions.copy()
actions['colspan'] = 'keep'
actions['rowspan'] = 'keep'
return self._visit_open(node, children, actions)
def visit_th_open(self, node, children):
"""Retain colspan attribute of <th> tags."""
actions = self._default_attribute_actions.copy()
actions['colspan'] = 'keep'
return self._visit_open(node, children, actions)
def _visit_hn_open(self, node, children, actions=None, **kwargs):
"""Retain id and name attributes of <h#> tags."""
actions = self._default_attribute_actions.copy()
actions['id'] = 'keep'
actions['name'] = 'keep'
return self._visit_open(node, children, actions, **kwargs)
visit_h1_open = _visit_hn_open
visit_h2_open = _visit_hn_open
visit_h3_open = _visit_hn_open
visit_h4_open = _visit_hn_open
visit_h5_open = _visit_hn_open
visit_h6_open = _visit_hn_open
|
jwhitlock/web-platform-compat
|
mdn/kumascript.py
|
Python
|
mpl-2.0
| 38,117
|
[
"VisIt"
] |
ce932df802017404e502071a3bfc7fe32641d5bd4130519319e7f9d2d6835d23
|
#!/usr/bin/env python
import numpy as np
import argparse
import random
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
class PBTBenchmarkExample(tune.Trainable):
"""Toy PBT problem for benchmarking adaptive learning rate.
The goal is to optimize this trainable's accuracy. The accuracy increases
fastest at the optimal lr, which is a function of the current accuracy.
The optimal lr schedule for this problem is the triangle wave as follows.
Note that many lr schedules for real models also follow this shape:
best lr
^
| /\
| / \
| / \
| / \
------------> accuracy
In this problem, using PBT with a population of 2-4 is sufficient to
roughly approximate this lr schedule. Higher population sizes will yield
faster convergence. Training will not converge without PBT.
"""
def setup(self, config):
self.lr = config["lr"]
self.accuracy = 0.0 # end = 1000
def step(self):
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tolerance = 3 # penalize exceeding lr by more than this multiple
noise_level = 2 # add gaussian noise to the acc increase
# triangle wave:
# - start at 0.001 @ t=0,
# - peak at 0.01 @ t=midpoint,
# - end at 0.001 @ t=midpoint * 2,
if self.accuracy < midpoint:
optimal_lr = 0.01 * self.accuracy / midpoint
else:
optimal_lr = 0.01 - 0.01 * (self.accuracy - midpoint) / midpoint
optimal_lr = min(0.01, max(0.001, optimal_lr))
# compute accuracy increase
q_err = max(self.lr, optimal_lr) / min(self.lr, optimal_lr)
if q_err < q_tolerance:
self.accuracy += (1.0 / q_err) * random.random()
elif self.lr > optimal_lr:
self.accuracy -= (q_err - q_tolerance) * random.random()
self.accuracy += noise_level * np.random.normal()
self.accuracy = max(0, self.accuracy)
return {
"mean_accuracy": self.accuracy,
"cur_lr": self.lr,
"optimal_lr": optimal_lr, # for debugging
"q_err": q_err, # for debugging
"done": self.accuracy > midpoint * 2,
}
def save_checkpoint(self, checkpoint_dir):
return {
"accuracy": self.accuracy,
"lr": self.lr,
}
def load_checkpoint(self, checkpoint):
self.accuracy = checkpoint["accuracy"]
def reset_config(self, new_config):
self.lr = new_config["lr"]
self.config = new_config
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
parser.add_argument(
"--cluster", action="store_true", help="Distribute tuning on a cluster"
)
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using " "Ray Client.",
)
args, _ = parser.parse_known_args()
if args.server_address:
ray.init(f"ray://{args.server_address}")
elif args.cluster:
ray.init(address="auto")
elif args.smoke_test:
ray.init(num_cpus=2) # force pausing to happen for test
else:
ray.init()
pbt = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=20,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: random.uniform(0.0001, 0.02),
# allow perturbations within this set of categorical values
"some_other_factor": [1, 2],
},
)
analysis = tune.run(
PBTBenchmarkExample,
name="pbt_test",
scheduler=pbt,
metric="mean_accuracy",
mode="max",
reuse_actors=True,
checkpoint_freq=20,
verbose=False,
stop={
"training_iteration": 200,
},
num_samples=8,
config={
"lr": 0.0001,
# note: this parameter is perturbed but has no effect on
# the model training in this example
"some_other_factor": 1,
},
)
print("Best hyperparameters found were: ", analysis.best_config)
|
ray-project/ray
|
python/ray/tune/examples/pbt_example.py
|
Python
|
apache-2.0
| 4,406
|
[
"Gaussian"
] |
43fc2b126db9b8f05dbfff1578cc61f978253202cca02701cca18a2092fc26d5
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import vtk
from vtk.wx.wxVTKRenderWindowInteractor import wxVTKRenderWindowInteractor
import wx
from module_kits.wx_kit import utils as wxutils
import wx.lib.agw.aui as aui
wx.aui = aui
from wx.html import HtmlWindow
class SimpleHTMLListBox(wx.HtmlListBox):
"""Simple class to emulate normal wx.ListBox (Append, Clear, GetClientData
and GetString methods) with the super-powers of the wx.HtmlListBox.
@author Charl P. Botha <http://cpbotha.net/>
"""
def __init__(self, *args, **kwargs):
wx.HtmlListBox.__init__(self, *args, **kwargs)
self.items = []
self.Clear()
def Append(self, text, data=None, refresh=True):
"""Emulates wx.ListBox Append method, except for refresh bit.
Set refresh to False if you're going to be appending bunches of
items. When you're done, call the DoRefresh() method explicitly.
"""
self.items.append((text, data))
if refresh:
self.SetItemCount(len(self.items))
self.Refresh()
def DoRefresh(self):
"""To be used after adding large amounts of items with Append and
refresh=False.
"""
self.SetItemCount(len(self.items))
self.Refresh()
def Clear(self):
del self.items[:]
self.SetSelection(-1)
self.SetItemCount(0)
def GetClientData(self, n):
if n >= 0 and n < len(self.items):
return self.items[n][1]
else:
return None
def GetCount(self):
return len(self.items)
def GetSelections(self):
"""Return list of selected indices just like the wx.ListBox.
"""
# coded up this method purely to see if we could use SimpleHTMLListBox also
# for the module categories thingy
sels = []
item, cookie = self.GetFirstSelected()
while item != wx.NOT_FOUND:
sels.append(item)
# ... process item ...
item = self.GetNextSelected(cookie)
return sels
def GetString(self, n):
if n >= 0 and n < len(self.items):
return self.items[n][0]
else:
return None
def OnGetItem(self, n):
try:
return '<font size=-1>%s</font>' % (self.items[n][0],)
except IndexError:
return ''
class ProgressStatusBar(wx.StatusBar):
"""
StatusBar with progress gauge embedded.
Code adapted from wxPython demo.py | CustomStatusBar.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
# This status bar has three fields
self.SetFieldsCount(3)
# Sets the three fields to be relative widths to each other.
# status message gets the most room, then memory counter, then progress bar
self.SetStatusWidths([-4, -1, -2])
self.sizeChanged = False
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_IDLE, self.OnIdle)
# This will fall into field 1 (the second field)
# check the Reposition method to see how this is positioned
self.gauge = wx.Gauge(self, -1, 100)
self.gauge.SetValue(50)
# set the initial position of the checkbox
self.Reposition()
def OnSize(self, evt):
self.Reposition() # for normal size events
# Set a flag so the idle time handler will also do the repositioning.
# It is done this way to get around a buglet where GetFieldRect is not
# accurate during the EVT_SIZE resulting from a frame maximize.
self.sizeChanged = True
def OnIdle(self, evt):
if self.sizeChanged:
self.Reposition()
# reposition the checkbox
def Reposition(self):
border = 3
rect = self.GetFieldRect(2)
self.gauge.SetPosition((rect.x+border, rect.y+border))
self.gauge.SetSize((rect.width-border*2, rect.height-border*2))
self.sizeChanged = False
class MainWXFrame(wx.Frame):
"""Class for building main user interface frame.
All event handling and other intelligence should be elsewhere.
"""
def __init__(self, parent, id=-1, title="", pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE |
wx.SUNKEN_BORDER |
wx.CLIP_CHILDREN):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
# tell FrameManager to manage this frame
self._mgr = wx.aui.AuiManager()
self._mgr.SetManagedWindow(self)
self._make_menu()
# statusbar
self.statusbar = ProgressStatusBar(self)
self.SetStatusBar(self.statusbar)
self.SetMinSize(wx.Size(400, 300))
# could make toolbars here
# now we need to add panes
# on GTK, this sequence is flipped! search panel is at the
# bottom, module list at the top.
sp = self._create_module_search_panel()
self._mgr.AddPane(
sp,
wx.aui.AuiPaneInfo().Name('module_search').
Caption('Module Search and Categories').Left().Position(0).
MinSize(sp.GetSize()).
CloseButton(False))
# a little trick I found in the PyAUI source code. This will make
# sure that the pane is as low (small y) as it can be
p = self._mgr.GetPane('module_search')
p.dock_proportion = 0
self.module_list = self._create_module_list()
self._mgr.AddPane(
self.module_list,
wx.aui.AuiPaneInfo().Name('module_list').Caption('Module List').
Left().CloseButton(False))
##################################################################
# setup VTK rendering pipeline for the graph editor
self._rwi, self._ren = self._create_graph_canvas()
self._mgr.AddPane(
self._rwi,
wx.aui.AuiPaneInfo().Name('graph_canvas').
Caption('Graph Canvas').CenterPane())
##################################################################
# these two also get swapped on GTK
self._mgr.AddPane(
self._create_documentation_window(),
wx.aui.AuiPaneInfo().Name('doc_window').
Caption('Module Help').Bottom().CloseButton(False))
self._mgr.AddPane(
self._create_log_window(),
wx.aui.AuiPaneInfo().Name('log_window').
Caption('Log Messages').Bottom().CloseButton(False))
self._mgr.Update()
# save this perspective
self.perspective_default = self._mgr.SavePerspective()
wx.EVT_MENU(self, self.window_default_view_id,
lambda e: self._mgr.LoadPerspective(
self.perspective_default) and self._mgr.Update())
def close(self):
self._ren.RemoveAllViewProps()
del self._ren
self._rwi.GetRenderWindow().Finalize()
self._rwi.SetRenderWindow(None)
del self._rwi
self.Destroy()
def _create_documentation_window(self):
self.doc_window = HtmlWindow(self, -1, size=(200,80))
fsa = wxutils.create_html_font_size_array()
self.doc_window.SetFonts("", "", fsa)
return self.doc_window
def _create_graph_canvas(self):
rwi = wxVTKRenderWindowInteractor(self, -1,
size=(400,400))
# we have to call this, else moving a modal dialogue over the
# graph editor will result in trails. Usually, a wxVTKRWI
# refuses to render if its top-level parent is disabled. This
# is to stop VTK pipeline updates whilst wx.SafeYield() is
# being called. In _this_ case, the VTK pipeline is safe, so
# we can disable this check.
rwi.SetRenderWhenDisabled(True)
ren = vtk.vtkRenderer()
rwi.GetRenderWindow().AddRenderer(ren)
rw = rwi.GetRenderWindow()
rw.SetLineSmoothing(1)
rw.SetPointSmoothing(1)
# PolygonSmoothing is not really necessary for the GraphEditor
# (yet), and on a GeForce 4600 Ti on Linux with driver version
# 1.0-9639, you can see triangle lines bisecting quads. Not
# a nice artifact, so I've disabled this for now.
#rw.SetPolygonSmoothing(1)
return (rwi, ren)
def _create_log_window(self):
tc = wx.TextCtrl(
self, -1, "", size=(200, 80),
style=wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
self.message_log_text_ctrl = tc
return tc
def _create_module_list(self):
self.module_list_box = SimpleHTMLListBox(
self, -1, size=(200,200),
style=wx.LB_SINGLE|wx.LB_NEEDED_SB)
return self.module_list_box
def _create_module_search_panel(self):
search_panel = wx.Panel(self, -1)
self.search = wx.SearchCtrl(search_panel, size=(200,-1), style=wx.TE_PROCESS_ENTER)
self.search.ShowSearchButton(1)
self.search.ShowCancelButton(1)
self.module_cats_choice = wx.Choice(search_panel,-1, size=(200,-1))
tl_sizer = wx.BoxSizer(wx.VERTICAL)
# option=0 so it doesn't fill vertically
tl_sizer.Add(self.search, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 4)
tl_sizer.Add(self.module_cats_choice, 0, wx.EXPAND|wx.ALL, 4)
search_panel.SetAutoLayout(True)
search_panel.SetSizer(tl_sizer)
search_panel.GetSizer().Fit(search_panel)
search_panel.GetSizer().SetSizeHints(search_panel)
return search_panel
def _create_progress_panel(self):
progress_panel = wx.Panel(self, -1)#, size=wx.Size(100, 50))
self.progress_text = wx.StaticText(progress_panel, -1, "...")
self.progress_text.SetFont(
wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.progress_gauge = wx.Gauge(progress_panel, -1, 100)
self.progress_gauge.SetValue(50)
#self.progress_gauge.SetBackgroundColour(wx.Colour(50, 50, 204))
tl_sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer = wx.BoxSizer(wx.VERTICAL)
# these are in a vertical sizer, so expand will make them draw
# out horizontally as well
sizer.Add(self.progress_text, 0, wx.EXPAND | wx.BOTTOM, 4)
sizer.Add(self.progress_gauge, 0, wx.EXPAND)
tl_sizer.Add(sizer, 1, wx.EXPAND | wx.ALL, 7)
#sizer.SetMinSize((100, 50))
progress_panel.SetAutoLayout(True)
progress_panel.SetSizer(tl_sizer)
progress_panel.GetSizer().Fit(progress_panel)
progress_panel.GetSizer().SetSizeHints(progress_panel)
return progress_panel
def _make_menu(self):
# Menu Bar
self.menubar = wx.MenuBar()
self.SetMenuBar(self.menubar)
self.fileNewId = wx.NewId()
self.fileOpenId = wx.NewId()
self.fileOpenSegmentId = wx.NewId()
self.fileSaveId = wx.NewId()
self.id_file_save_as = wx.NewId()
self.id_file_export = wx.NewId()
self.fileSaveSelectedId = wx.NewId()
self.fileExportAsDOTId = wx.NewId()
self.fileExportSelectedAsDOTId = wx.NewId()
self.fileExitId = wx.NewId()
self.window_python_shell_id = wx.NewId()
self.helpShowHelpId = wx.NewId()
self.helpAboutId = wx.NewId()
file_menu = wx.Menu()
file_menu.Append(self.fileNewId, "&New\tCtrl-N",
"Create new network.", wx.ITEM_NORMAL)
file_menu.Append(self.fileOpenId, "&Open\tCtrl-O",
"Open and load existing network.", wx.ITEM_NORMAL)
file_menu.Append(
self.fileOpenSegmentId, "Open as Se&gment\tCtrl-G",
"Open a DeVIDE network as a segment in the copy buffer.",
wx.ITEM_NORMAL)
file_menu.Append(self.fileSaveId, "&Save\tCtrl-S",
"Save the current network.", wx.ITEM_NORMAL)
file_menu.Append(self.id_file_save_as, "Save &As",
"Save the current network with a new filename.", wx.ITEM_NORMAL)
file_menu.Append(self.id_file_export, "&Export\tCtrl-E",
"Export the current network with relative filenames",
wx.ITEM_NORMAL)
file_menu.Append(self.fileSaveSelectedId,
"Save se&lected Glyphs\tCtrl-L",
"Save the selected glyphs as a network.",
wx.ITEM_NORMAL)
file_menu.AppendSeparator()
file_menu.Append(
self.fileExportAsDOTId, "Export as DOT file",
"Export the current network as a GraphViz DOT file.",
wx.ITEM_NORMAL)
file_menu.Append(self.fileExportSelectedAsDOTId,
"Export selection as DOT file",
"Export the selected glyphs as a GraphViz DOT file.",
wx.ITEM_NORMAL)
file_menu.AppendSeparator()
file_menu.Append(self.fileExitId, "E&xit\tCtrl-Q",
"Exit DeVIDE!", wx.ITEM_NORMAL)
self.menubar.Append(file_menu, "&File")
self.edit_menu = wx.Menu()
self.menubar.Append(self.edit_menu, "&Edit")
modules_menu = wx.Menu()
self.id_modules_search = wx.NewId()
modules_menu.Append(
self.id_modules_search, "Search for modules\tCtrl-F",
"Change input "
"focus to module search box.", wx.ITEM_NORMAL)
self.id_rescan_modules = wx.NewId()
modules_menu.Append(
self.id_rescan_modules, "Rescan modules", "Recheck all module "
"directories for new modules and metadata.", wx.ITEM_NORMAL)
self.id_refresh_module_kits = wx.NewId()
modules_menu.Append(
self.id_refresh_module_kits, "Refresh module kits",
"Attempt to refresh / reload all module_kits.", wx.ITEM_NORMAL)
self.menubar.Append(modules_menu, "&Modules")
self.network_menu = wx.Menu()
self.menubar.Append(self.network_menu, "&Network")
window_menu = wx.Menu()
self.window_default_view_id = wx.NewId()
window_menu.Append(
self.window_default_view_id, "Restore &default view",
"Restore default perspective / window configuration.",
wx.ITEM_NORMAL)
window_menu.Append(self.window_python_shell_id, "&Python Shell",
"Show the Python Shell interface.",
wx.ITEM_NORMAL)
self.menubar.Append(window_menu, "&Window")
help_menu = wx.Menu()
help_menu.Append(self.helpShowHelpId, "Show &Help\tF1", "",
wx.ITEM_NORMAL)
help_menu.Append(self.helpAboutId, "About", "",
wx.ITEM_NORMAL)
self.menubar.Append(help_menu, "&Help")
# Menu Bar end
def set_progress(self, percentage, message):
self.statusbar.gauge.SetValue(percentage)
self.statusbar.SetStatusText(message, 0)
|
nagyistoce/devide
|
interfaces/wx_interface/main_frame.py
|
Python
|
bsd-3-clause
| 15,352
|
[
"VTK"
] |
9e158a488434dac4028b9a62d14a8509fc3b4cab49b09c28d847c97c00e66538
|
praatscripts = {
'formants.praat':"""
form Variables
sentence filename
real nformants
real ceiling
endform
Read from file... 'filename$'
To Formant (burg)... 0 'nformants' 'ceiling' 0.025 50
frames = Get number of frames
output$ = "time"+tab$+"F1"+tab$+"B1"+tab$+"F2"+tab$+"B2"+newline$
for f from 1 to frames
t = Get time from frame number... 'f'
t$ = fixed$(t, 3)
f1 = Get value at time... 1 't' Hertz Linear
f1$ = fixed$(f1, 2)
f2 = Get value at time... 2 't' Hertz Linear
f2$ = fixed$(f2, 2)
b1 = Get bandwidth at time... 1 't' Hertz Linear
b1$ = fixed$(b1, 2)
b2 = Get bandwidth at time... 2 't' Hertz Linear
b2$ = fixed$(b2, 2)
output$ = output$+t$+tab$+f1$+tab$+b1$+tab$+f2$+tab$+b2$+newline$
endfor
echo 'output$'""",
'formant_list.praat':"""
form Variables
sentence filename
real nformants
real ceiling
endform
Read from file... 'filename$'
To Formant (burg)... 0 'nformants' 'ceiling' 0.025 50
List... 0 1 3 0 1 0 1 1""",
'extract.praat':"""
form Variables
sentence filename
real begin
real end
sentence outname
endform
Read from file... 'filename$'
Extract part... 'begin' 'end' rectangular 1.0 0
Save as WAV file... 'outname$'""",
'pitch.praat': """
form Variables
sentence filename
real timestep
endform
Read from file... 'filename$'
To Pitch (ac)... 'timestep' 75.0 15 yes 0.03 0.45 0.01 0.35 0.14 600.0
frames = Get number of frames
output$ = "Time"+tab$+"Pitch"+newline$
for f from 1 to frames
t = Get time from frame number... 'f'
t$ = fixed$(t, 3)
v = Get value in frame... 'f' Hertz
v$ = fixed$(v, 2)
output$ = output$+t$+tab$+v$+newline$
endfor
echo 'output$'""",
'intensity.praat': """
form Variables
sentence filename
real timestep
endform
Read from file... 'filename$'
To Intensity... 100 'timestep' yes
frames = Get number of frames
output$ = "time(s)"+tab$+"Intensity(dB)"+newline$
for f from 1 to frames
t = Get time from frame number... 'f'
t$ = fixed$(t, 3)
v = Get value in frame... 'f'
v$ = fixed$(v, 2)
output$ = output$+t$+tab$+v$+newline$
endfor
echo 'output$'""",
'spectroPic.praat':"""
form Variables
sentence filename
boolean formants
real nformants
real ceiling
real numBounds
text boundaries
endform
Erase all
Read from file... 'filename$'
outname$ = filename$ -".wav"+"-spectro.eps"
name$ = selected$("Sound")
dur = Get total duration
step = dur / 512
Colour... black
To Spectrogram... 0.005 'ceiling' 'step' 20 Gaussian
Paint... 0.0 0.0 0.0 0.0 100 1 50.0 6.0 0.0 1
bound$ = boundaries$
for i from 1 to numBounds
if index(bound$,",") = 0
b$ = bound$
else
b$ = left$(bound$,index(bound$,",")-1)
bound$ = right$(bound$,length(bound$)-index(bound$,","))
endif
Colour... blue
Draw line... 'b$' 0 'b$' 'ceiling'
endfor
if formants = 1
select Sound 'name$'
To Formant (burg)... 0.0 'nformants' 'ceiling' 0.025 50
Colour... red
Speckle... 0.0 0.0 'ceiling' 30 1
Draw tracks... 0.0 0.0 'ceiling' 1
endif
Save as EPS file... 'outname$'""",
'waveformPic.praat':"""
form Variables
sentence filename
real numBounds
text boundaries
endform
outname$ = filename$-".wav"+"-waveform.eps"
printline 'numBounds'
Erase all
Read from file... 'filename$'
min = Get minimum... 0.0 0.0 None
max = Get maximum... 0.0 0.0 None
Draw... 0.0 0.0 0.0 0.0 1 Curve
bound$ = boundaries$
for i from 1 to numBounds
if index(bound$,",") = 0
b$ = bound$
else
b$ = left$(bound$,index(bound$,",")-1)
bound$ = right$(bound$,length(bound$)-index(bound$,","))
endif
Colour... blue
Draw line... 'b$' 'min' 'b$' 'max'
endfor
Save as EPS file... 'outname$'"""
}
|
mmcauliffe/linguistic-helper-functions
|
linghelper/phonetics/praat/scripts.py
|
Python
|
gpl-3.0
| 4,319
|
[
"Gaussian"
] |
aa85d84cba0b907a6821b600d7c926379e15a0ce3454f0c79f79ba1f3e2ffd49
|
import numpy, mlpy, time, scipy, os
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.lda import LDA
import csv, os.path, sklearn, sklearn.hmm, cPickle, glob
# # # # # # # # # # # # # # #
# General utility functions #
# # # # # # # # # # # # # # #
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError, ""
if inputSignal.size < windowLen:
raise ValueError, "Input vector needs to be bigger than window size."
if windowLen<3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag<len(Flags)-1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop==0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag==len(Flags)-1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag*window))
classes.append(preVal)
segs = numpy.zeros ((len(segsList),2))
for i in range(len(segsList)):
if i>0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0;
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <=segEnd[i]:
break;
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []; segEnd = []; segLabel = []
for row in reader:
if len(row)==3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE = False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min( flagsInd.shape[0], flagsIndGT.shape[0] )
if minLength>0:
accuracy = numpy.count_nonzero(flagsInd[0:minLength]==flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1];
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg,1]-segs[iSeg,0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0*SPercentages[i] / Duration
S = sum(1 for c in classes if c==classNames[i])
if S>0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print classNames[i], Percentages[i], AvDurations[i]
font = {'family' : 'fantasy', 'size' : 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(flagsInd)))*mtStep+mtStep/2.0, flagsInd)
if flagsIndGT.shape[0]>0:
ax1.plot(numpy.array(range(len(flagsIndGT)))*mtStep+mtStep/2.0, flagsIndGT+0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy>=0:
plt.title('Accuracy = {0:.1f}%'.format(100.0*accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(classNames)+1, 0, 100))
ax2.set_xticks(numpy.array(range(len(classNames)+1)))
ax2.set_xticklabels([" "] + classNames)
ax2.bar(numpy.array(range(len(classNames)))+0.5, Percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
ax3.set_xticks(numpy.array(range(len(classNames)+1)))
ax3.set_xticklabels([" "] + classNames)
ax3.bar(numpy.array(range(len(classNames)))+0.5, AvDurations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min( flags.shape[0], flagsGT.shape[0] )
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros(( uFlags.shape[0], uFlagsGT.shape[0] ))
for i in range(minLength):
cMatrix[ int(numpy.nonzero(uFlags==flags[i])[0]), int(numpy.nonzero(uFlagsGT==flagsGT[i])[0]) ] += 1.0
Nc, Ns = cMatrix.shape;
N_s = numpy.sum(cMatrix,axis=0);
N_c = numpy.sum(cMatrix,axis=1);
N = numpy.sum(cMatrix);
purityCluster = numpy.zeros( (Nc,) )
puritySpeaker = numpy.zeros( (Ns,) )
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max( (cMatrix[i,:]) )/ (N_c[i]);
for j in range(Ns):
puritySpeaker[j] = numpy.max( (cMatrix[:,j]) )/ (N_s[j]);
purityClusterMean = numpy.sum(purityCluster*N_c) / N;
puritySpeakerMean = numpy.sum(puritySpeaker*N_s) / N;
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print "trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!"
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i,u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels==u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i+1])] += 1;
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i,:] = numpy.matrix(features[:,numpy.nonzero(labels==uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros( (nComps, nFeatures) );
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i,:] = numpy.std(features[:,numpy.nonzero(labels==uLabels[i])[0]], axis = 1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read ground truth data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to fix-sized sequence of flags
[Fs, x] = audioBasicIO.readAudioFile(wavFile); # read audio data
#F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*0.050), round(Fs*0.050)); # feature extraction
startprob, transmat, means, cov = trainHMM_computeStatistics(F, flags) # compute HMM statistics (priors, transition matrix, etc)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # output to file
cPickle.dump(hmm, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol = cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classNames
def trainHMM_fromDir(dirPath, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
flagsAll = numpy.array([])
classesAll = []
for i,f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
wavFile = f;
gtFile = f.replace('.wav', '.segments'); # open for annotated file
if not os.path.isfile(gtFile): # if current WAV file does not have annotation -> skip
continue;
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
for c in classNames: # update classnames:
if c not in classesAll:
classesAll.append(c)
[Fs, x] = audioBasicIO.readAudioFile(wavFile); # read audio data
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*0.050), round(Fs*0.050)); # feature extraction
lenF = F.shape[1]; lenL = len(flags); MIN = min(lenF, lenL)
F = F[:, 0:MIN]
flags = flags[0:MIN]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append( classesAll.index( classNames[flags[j]] ) )
flagsAll = numpy.append(flagsAll, numpy.array(flagsNew))
if i==0:
Fall = F;
else:
Fall = numpy.concatenate((Fall, F), axis = 1)
startprob, transmat, means, cov = trainHMM_computeStatistics(Fall, flagsAll) # compute HMM statistics
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # train HMM
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # save HMM model
cPickle.dump(hmm, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classesAll, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol = cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classesAll
def hmmSegmentation(wavFileName, hmmModelName, PLOT = False, gtFileName = ""):
[Fs, x] = audioBasicIO.readAudioFile(wavFileName); # read audio data
try:
fo = open(hmmModelName, "rb")
except IOError:
print "didn't find file"
return
try:
hmm = cPickle.load(fo)
classesAll = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
except:
fo.close()
fo.close()
#Features = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs); # feature extraction
[Features, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*0.050), round(Fs*0.050));
flagsInd = hmm.predict(Features.T) # apply model
#for i in range(len(flagsInd)):
# if classesAll[flagsInd[i]]=="silence":
# flagsInd[i]=classesAll.index("speech")
# plot results
if os.path.isfile(gtFileName):
[segStart, segEnd, segLabels] = readSegmentGT(gtFileName)
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)
flagsGTNew = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classesAll:
flagsGTNew.append( classesAll.index( classNamesGT[flagsGT[j]] ) )
else:
flagsGTNew.append( -1 )
flagsIndGT = numpy.array(flagsGTNew)
else:
flagsIndGT = numpy.array([]);
acc = plotSegmentationResults(flagsInd, flagsIndGT, classesAll, mtStep, not PLOT)
if acc>=0:
print "Overall Accuracy: {0:.2f}".format(acc)
return flagsInd, classesAll, acc
def mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = ""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- inputFile: path of the input WAV file
- modelName: name of the classification model
- modelType: svm or knn depending on the classifier type
- plotResults: True if results are to be plotted using matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
if not os.path.isfile(modelName):
print "mtFileClassificationError: input modelType not found!"
return (-1,-1,-1)
# Load classifier:
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
if computeBEAT:
print "Model " + modelName + " contains long-term music features (beat etc) and cannot be used in segmentation"
return (-1,-1,-1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # load input file
if Fs == -1: # could not read file
return (-1,-1,-1)
x = audioBasicIO.stereo2mono(x); # convert stereo (if) to mono
Duration = len(x) / Fs
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep));
flags = []; Ps = []; flagsInd = []
for i in range(MidTermFeatures.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
curFV = (MidTermFeatures[:, i] - MEAN) / STD; # normalize current feature vector
[Result, P] = aT.classifierWrapper(Classifier, modelType, curFV) # classify vector
flagsInd.append(Result)
flags.append(classNames[int(Result)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flagsInd = numpy.array(flagsInd)
# 1-window smoothing
for i in range(1, len(flagsInd)-1):
if flagsInd[i-1]==flagsInd[i+1]:
flagsInd[i] = flagsInd[i+1]
(segs, classes) = flags2segs(flags, mtStep) # convert fix-sized flags to segments and classes
segs[-1] = len(x) / float(Fs)
# Load grount-truth:
if os.path.isfile(gtFile):
[segStartGT, segEndGT, segLabelsGT] = readSegmentGT(gtFile)
flagsGT, classNamesGT = segs2flags(segStartGT, segEndGT, segLabelsGT, mtStep)
flagsIndGT = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classNames:
flagsIndGT.append( classNames.index( classNamesGT[flagsGT[j]] ) )
else:
flagsIndGT.append( -1 )
flagsIndGT = numpy.array(flagsIndGT)
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, not plotResults)
if acc>=0:
print "Overall Accuracy: {0:.3f}".format(acc)
return (flagsInd, classNames, acc)
def evaluateSegmentationClassificationDir(dirName, modelName, methodName):
flagsAll = numpy.array([])
classesAll = []
accuracys = []
for i,f in enumerate(glob.glob(dirName + os.sep + '*.wav')): # for each WAV file
wavFile = f;
print wavFile
gtFile = f.replace('.wav', '.segments'); # open for annotated file
if methodName.lower() in ["svm", "knn"]:
flagsInd, classNames, acc = mtFileClassification(wavFile, modelName, methodName, False, gtFile)
else:
flagsInd, classNames, acc = hmmSegmentation(wavFile, modelName, False, gtFile)
if acc>-1:
accuracys.append(acc)
print " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
print "Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).mean())
print "Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracys)))
print "Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).min())
print "Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).max())
def silenceRemoval(x, Fs, stWin, stStep, smoothWindow = 0.5, Weight = 0.5, plot = False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- Fs: sampling freq
- stWin, stStep: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- Weight: (optinal) weight factor (0 < Weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight>=1:
Weight = 0.99;
if Weight<=0:
Weight = 0.01;
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x); # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin*Fs, stStep*Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E)/10) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:,numpy.where(EnergySt<T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:,numpy.where(EnergySt>T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T]; # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:,i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.pred_probability(curFV)[1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10;
T = (numpy.mean( (1-Weight)*ProbOnsetSorted[0:Nt] ) + Weight*numpy.mean(ProbOnsetSorted[-Nt::]) )
MaxIdx = numpy.where(ProbOnset>T)[0]; # get the indices of the frames that satisfy the thresholding
i = 0;
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i<len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i==len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i==len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0]*stStep, curCluster[-1]*stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2;
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2;
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs) , 1.0/Fs)
plt.subplot(2,1,1); plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0]);
plt.axvline(x=s[1]);
plt.subplot(2,1,2); plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset);
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0]);
plt.axvline(x=s[1]);
plt.title('SVM Probability')
plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize = 2.0, mtStep=0.2, stWin=0.05, LDAdim = 35, PLOT = False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x);
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel("data/knnSpeakerAll")
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel("data/knnSpeakerFemaleMale")
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stWin*0.5));
MidTermFeatures2 = numpy.zeros( (MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1] ) )
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:,i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:,i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001;
MidTermFeatures2[MidTermFeatures.shape[0]+len(classNames1)::, i] = P2 + 0.0001;
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 0C
iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect,:]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2*MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0*(numOfWindows-iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin));
mtStepRatio = int(round(stWin / stWin));
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2;
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos<N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros( (mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1] ) )
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:,i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:,i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0]+len(classNames1), i] = P1 + 0.0001;
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001;
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect,:]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1],));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = LDA(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels, tol=0.000001)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers<=0:
sRange = range(2,10)
else:
sRange = [numOfSpeakers]
clsAll = []; silAll = []; centersAll = []
for iSpeakers in sRange:
cls, means, steps = mlpy.kmeans(MidTermFeaturesNorm.T, k=iSpeakers, plus=True) # perform k-means clustering
#YDist = distance.pdist(MidTermFeaturesNorm.T, metric='euclidean')
#print distance.squareform(YDist).shape
#hc = mlpy.HCluster()
#hc.linkage(YDist)
#cls = hc.cut(14.5)
#print cls
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of the maximum sillouette value
nSpeakersFinal = sRange[imax] # optimal number of clusters
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
cls = numpy.zeros((numOfWindows,))
for i in range(numOfWindows):
j = numpy.argmin(numpy.abs(i-iNonOutLiers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
hmm = sklearn.hmm.GaussianHMM(startprob.shape[0], "diag", startprob, transmat) # hmm training
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(MidTermFeaturesNormOr.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = silAll[imax] # final sillouette
classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gtFile = fileName.replace('.wav', '.segments'); # open for annotated file
if os.path.isfile(gtFile): # if groundturh exists
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
if PLOT:
fig = plt.figure()
if numOfSpeakers>0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(classNames))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(range(len(cls)))*mtStep+mtStep/2.0, cls)
if os.path.isfile(gtFile):
if PLOT:
ax1.plot(numpy.array(range(len(flagsGT)))*mtStep+mtStep/2.0, flagsGT, 'r')
purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
print "{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean)
if PLOT:
plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
if PLOT:
plt.xlabel("time (seconds)")
#print sRange, silAll
if numOfSpeakers<=0:
plt.subplot(212)
plt.plot(sRange, silAll)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
def speakerDiarizationEvaluateScript(folderName, LDAs):
'''
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth)
ARGUMENTS:
- folderName: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored
- LDAs: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folderName, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wavFile in wavFilesList:
gtFile = wavFile.replace('.wav', '.segments');
if os.path.isfile(gtFile):
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
N.append(len(list(set(segLabels))))
else:
N.append(-1)
for l in LDAs:
print "LDA = {0:d}".format(l)
for i, wavFile in enumerate(wavFilesList):
speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT = False)
print
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0):
'''
This function detects instances of the most representative part of a music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- Fs: sampling frequency
- shortTermSize: window size (in seconds)
- shortTermStep: window step (in seconds)
- thumbnailSize: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[Fs, x] = basicIO.readAudioFile(inputFile)
[A1, A2, B1, B2] = musicThumbnailing(x, Fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
# self-similarity matrix
S = selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
MIN = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN;
# find max position:
maxVal = numpy.max(S)
I = numpy.argmax(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return (shortTermStep*i1, shortTermStep*i2, shortTermStep*j1, shortTermStep*j2, S)
|
bossjones/pyAudioAnalysis
|
audioSegmentation.py
|
Python
|
apache-2.0
| 38,084
|
[
"Gaussian"
] |
fa508d556be97dcc197c0c79b2e24ac69b4e37388e294d36580d15783a6419a9
|
"""
Unit tests for enrollment methods in views.py
"""
import ddt
from mock import patch
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from instructor.views.legacy import get_and_clean_student_list, send_mail_to_student
from django.core import mail
USER_COUNT = 4
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestInstructorEnrollsStudent(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check Enrollment/Unenrollment with/without auto-enrollment on activation and with/without email notification
"""
def setUp(self):
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password='test')
self.course = CourseFactory.create()
self.users = [
UserFactory.create(username="student%d" % i, email="student%d@test.com" % i)
for i in xrange(USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
# Empty the test outbox
mail.outbox = []
def test_unenrollment_email_off(self):
"""
Do un-enrollment email off test
"""
course = self.course
# Run the Un-enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(
url,
{
'action': 'Unenroll multiple students',
'multiple_students': 'student0@test.com student1@test.com'
}
)
# Check the page output
self.assertContains(response, '<td>student0@test.com</td>')
self.assertContains(response, '<td>student1@test.com</td>')
self.assertContains(response, '<td>un-enrolled</td>')
# Check the enrollment table
user = User.objects.get(email='student0@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student1@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_enrollment_new_student_autoenroll_on_email_off(self):
"""
Do auto-enroll on, email off test
"""
course = self.course
# Run the Enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student1_1@test.com, student1_2@test.com', 'auto_enroll': 'on'})
# Check the page output
self.assertContains(response, '<td>student1_1@test.com</td>')
self.assertContains(response, '<td>student1_2@test.com</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
# Check the enrollmentallowed db entries
cea = CourseEnrollmentAllowed.objects.filter(email='student1_1@test.com', course_id=course.id)
self.assertEqual(1, cea[0].auto_enroll)
cea = CourseEnrollmentAllowed.objects.filter(email='student1_2@test.com', course_id=course.id)
self.assertEqual(1, cea[0].auto_enroll)
# Check there is no enrollment db entry other than for the other students
ce = CourseEnrollment.objects.filter(course_id=course.id, is_active=1)
self.assertEqual(4, len(ce))
# Create and activate student accounts with same email
self.student1 = 'student1_1@test.com'
self.password = 'bar'
self.create_account('s1_1', self.student1, self.password)
self.activate_user(self.student1)
self.student2 = 'student1_2@test.com'
self.create_account('s1_2', self.student2, self.password)
self.activate_user(self.student2)
# Check students are enrolled
user = User.objects.get(email='student1_1@test.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student1_2@test.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, course.id))
def test_repeat_enroll(self):
"""
Try to enroll an already enrolled student
"""
course = self.course
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student0@test.com', 'auto_enroll': 'on'})
self.assertContains(response, '<td>student0@test.com</td>')
self.assertContains(response, '<td>already enrolled</td>')
def test_enrollmemt_new_student_autoenroll_off_email_off(self):
"""
Do auto-enroll off, email off test
"""
course = self.course
# Run the Enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student2_1@test.com, student2_2@test.com'})
# Check the page output
self.assertContains(response, '<td>student2_1@test.com</td>')
self.assertContains(response, '<td>student2_2@test.com</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment off</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
# Check the enrollmentallowed db entries
cea = CourseEnrollmentAllowed.objects.filter(email='student2_1@test.com', course_id=course.id)
self.assertEqual(0, cea[0].auto_enroll)
cea = CourseEnrollmentAllowed.objects.filter(email='student2_2@test.com', course_id=course.id)
self.assertEqual(0, cea[0].auto_enroll)
# Check there is no enrollment db entry other than for the setup instructor and students
ce = CourseEnrollment.objects.filter(course_id=course.id, is_active=1)
self.assertEqual(4, len(ce))
# Create and activate student accounts with same email
self.student = 'student2_1@test.com'
self.password = 'bar'
self.create_account('s2_1', self.student, self.password)
self.activate_user(self.student)
self.student = 'student2_2@test.com'
self.create_account('s2_2', self.student, self.password)
self.activate_user(self.student)
# Check students are not enrolled
user = User.objects.get(email='student2_1@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student2_2@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
def test_get_and_clean_student_list(self):
"""
Clean user input test
"""
string = "abc@test.com, def@test.com ghi@test.com \n \n jkl@test.com \n mno@test.com "
cleaned_string, cleaned_string_lc = get_and_clean_student_list(string)
self.assertEqual(cleaned_string, ['abc@test.com', 'def@test.com', 'ghi@test.com', 'jkl@test.com', 'mno@test.com'])
@ddt.data('http', 'https')
def test_enrollment_email_on(self, protocol):
"""
Do email on enroll test
"""
course = self.course
# Create activated, but not enrolled, user
UserFactory.create(username="student3_0", email="student3_0@test.com", first_name='Autoenrolled')
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
params = {'action': 'Enroll multiple students', 'multiple_students': 'student3_0@test.com, student3_1@test.com, student3_2@test.com', 'auto_enroll': 'on', 'email_students': 'on'}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
# Check the page output
self.assertContains(response, '<td>student3_0@test.com</td>')
self.assertContains(response, '<td>student3_1@test.com</td>')
self.assertContains(response, '<td>student3_2@test.com</td>')
self.assertContains(response, '<td>added, email sent</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in {}'.format(course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Autoenrolled Test\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{}://edx.org/courses/{}/\n\n"
"----\nThis email was automatically sent from edx.org to Autoenrolled Test".format(
course.display_name, protocol, unicode(course.id)
)
)
self.assertEqual(
mail.outbox[1].subject,
'You have been invited to register for {}'.format(course.display_name)
)
self.assertEqual(
mail.outbox[1].body,
"Dear student,\n\nYou have been invited to join "
"{display_name} at edx.org by a member of the "
"course staff.\n\n"
"To finish your registration, please visit "
"{}://edx.org/register and fill out the registration form "
"making sure to use student3_1@test.com in the E-mail field.\n"
"Once you have registered and activated your account, you will "
"see {display_name} listed on your dashboard.\n\n"
"----\nThis email was automatically sent from edx.org to "
"student3_1@test.com".format(protocol, display_name=course.display_name)
)
def test_unenrollment_email_on(self):
"""
Do email on unenroll test
"""
course = self.course
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='student4_0@test.com', course_id=course.id)
cea.save()
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(url, {'action': 'Unenroll multiple students', 'multiple_students': 'student4_0@test.com, student2@test.com, student3@test.com', 'email_students': 'on'})
# Check the page output
self.assertContains(response, '<td>student2@test.com</td>')
self.assertContains(response, '<td>student3@test.com</td>')
self.assertContains(response, '<td>un-enrolled, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {}'.format(course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course "
"{} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n"
"----\nThis email was automatically sent from edx.org "
"to student4_0@test.com".format(course.display_name)
)
self.assertEqual(
mail.outbox[1].subject,
'You have been un-enrolled from {}'.format(course.display_name)
)
def test_send_mail_to_student(self):
"""
Do invalid mail template test
"""
d = {'message': 'message_type_that_doesn\'t_exist'}
send_mail_ret = send_mail_to_student('student0@test.com', d)
self.assertFalse(send_mail_ret)
@ddt.data('http', 'https')
@patch('instructor.views.legacy.uses_shib')
def test_enrollment_email_on_shib_on(self, protocol, mock_uses_shib):
# Do email on enroll, shibboleth on test
course = self.course
mock_uses_shib.return_value = True
# Create activated, but not enrolled, user
UserFactory.create(username="student5_0", email="student5_0@test.com", first_name="ShibTest", last_name="Enrolled")
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
params = {'action': 'Enroll multiple students', 'multiple_students': 'student5_0@test.com, student5_1@test.com', 'auto_enroll': 'on', 'email_students': 'on'}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
# Check the page output
self.assertContains(response, '<td>student5_0@test.com</td>')
self.assertContains(response, '<td>student5_1@test.com</td>')
self.assertContains(response, '<td>added, email sent</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in {}'.format(course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear ShibTest Enrolled\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{}://edx.org/courses/{}/\n\n"
"----\nThis email was automatically sent from edx.org to ShibTest Enrolled".format(
course.display_name, protocol, unicode(course.id)
)
)
self.assertEqual(
mail.outbox[1].subject,
'You have been invited to register for {}'.format(course.display_name)
)
self.assertEqual(
mail.outbox[1].body,
"Dear student,\n\nYou have been invited to join "
"{} at edx.org by a member of the "
"course staff.\n\n"
"To access the course visit {}://edx.org/courses/{}/ and login.\n\n"
"----\nThis email was automatically sent from edx.org to "
"student5_1@test.com".format(
course.display_name, protocol, course.id
)
)
|
wwj718/ANALYSE
|
lms/djangoapps/instructor/tests/test_legacy_enrollment.py
|
Python
|
agpl-3.0
| 15,172
|
[
"VisIt"
] |
f0d9937cd67099a2e07ec2447e40ae4447bb82729d87bed1a9ee4895596936d9
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.pagination_response import PaginationResponse # noqa: F401,E501
from swagger_client.models.visit import Visit # noqa: F401,E501
class GetClientVisitsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_response': 'PaginationResponse',
'visits': 'list[Visit]'
}
attribute_map = {
'pagination_response': 'PaginationResponse',
'visits': 'Visits'
}
def __init__(self, pagination_response=None, visits=None): # noqa: E501
"""GetClientVisitsResponse - a model defined in Swagger""" # noqa: E501
self._pagination_response = None
self._visits = None
self.discriminator = None
if pagination_response is not None:
self.pagination_response = pagination_response
if visits is not None:
self.visits = visits
@property
def pagination_response(self):
"""Gets the pagination_response of this GetClientVisitsResponse. # noqa: E501
Contains information about the pagination used. # noqa: E501
:return: The pagination_response of this GetClientVisitsResponse. # noqa: E501
:rtype: PaginationResponse
"""
return self._pagination_response
@pagination_response.setter
def pagination_response(self, pagination_response):
"""Sets the pagination_response of this GetClientVisitsResponse.
Contains information about the pagination used. # noqa: E501
:param pagination_response: The pagination_response of this GetClientVisitsResponse. # noqa: E501
:type: PaginationResponse
"""
self._pagination_response = pagination_response
@property
def visits(self):
"""Gets the visits of this GetClientVisitsResponse. # noqa: E501
Contains information about client visits. # noqa: E501
:return: The visits of this GetClientVisitsResponse. # noqa: E501
:rtype: list[Visit]
"""
return self._visits
@visits.setter
def visits(self, visits):
"""Sets the visits of this GetClientVisitsResponse.
Contains information about client visits. # noqa: E501
:param visits: The visits of this GetClientVisitsResponse. # noqa: E501
:type: list[Visit]
"""
self._visits = visits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetClientVisitsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetClientVisitsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mindbody/API-Examples
|
SDKs/Python/swagger_client/models/get_client_visits_response.py
|
Python
|
bsd-2-clause
| 4,569
|
[
"VisIt"
] |
7a94f2a9857353bb0b9bb77c9a2dd209d02fed3af7d318acf75d31aac43b6676
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_common_problems')
@click.argument("job_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, job_id):
"""Query inputs and jobs for common potential problems that might have resulted in job failure.
Output:
dict containing potential problems
.. note::
This method is only supported by Galaxy 19.05 or later.
"""
return ctx.gi.jobs.get_common_problems(job_id)
|
galaxy-iuc/parsec
|
parsec/commands/jobs/get_common_problems.py
|
Python
|
apache-2.0
| 557
|
[
"Galaxy"
] |
436fa497f9e6a97ff1be67273d12fc7887a280a436892e3b936086b18b044d6d
|
#Written by ChocolateBubbles,edited by RobertABT 2014
#I strongly believe the .whatever format should be universal...
import region
from mayavi import mlab
from pylab import imread
from scipy.ndimage import gaussian_filter
from stl_tools import numpy2stl
print 'Format required is as HP40 not hp40'
usrselectedcoords = raw_input("Please enter desired Ordnance Survey map reference to be used: ")
r = region.Region()
r.readgr (usrselectedcoords)
print "Generating STL file from map data..."
print "Close viewer to generate STL"
#print displaying data
s = mlab.surf(r.grid[0:]/10) # divides height data by 10
mlab.show()
filename = str('GENERATED_' + usrselectedcoords +'.stl')
numpy2stl(r.grid/10,(filename), solid=True)
print ('Done! ' + filename + ' is now ready to print!')
|
RobertABT/heightmap
|
editedstlwrite.py
|
Python
|
mit
| 781
|
[
"Mayavi"
] |
4f3316f7d0b615bfc200bcfe6930d5b2e7db6b7774ddad2eda741b363c8f1976
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
# Python 2
from itertools import imap
except ImportError:
# Python 3
imap = map
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = str
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = str
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = str
try:
dict.iteritems
except AttributeError:
# Python 3
def iteritems(d):
return d.items()
else:
# Python 2
def iteritems(d):
return d.iteritems()
try:
reduce
except NameError:
# Python 3
from functools import reduce
try:
NUMBERTYPES = (int, long, float)
except NameError:
# Python 3
NUMBERTYPES = (int, float)
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
from ansible.module_utils.six import PY2, PY3, b, binary_type, text_type, string_types
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
try:
from ast import literal_eval
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
# which is essentially a cut/paste from an earlier (2.6) version of python's
# ast.py
from compiler import ast, parse
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.node
def _convert(node):
if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)):
return node.value
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.nodes))
elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items())
elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
elif isinstance(node, ast.UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
_literal_eval = literal_eval
# Backwards compat. There were present in basic.py before
from ansible.module_utils.pycompat24 import get_exception
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
unsafe_writes = dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch',)
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, unicode):
return d.encode(encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, bytes):
return unicode(d, encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
def return_values(obj):
""" Return stringified values from datastructures. For use with removing
sensitive values pre-jsonification."""
if isinstance(obj, basestring):
if obj:
if isinstance(obj, bytes):
yield obj
else:
# Unicode objects should all convert to utf-8
# (still must deal with surrogateescape on python3)
yield obj.encode('utf-8')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield str(obj)
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, basestring):
if isinstance(value, unicode):
# This should work everywhere on python2. Need to check
# surrogateescape on python3
bytes_value = value.encode('utf-8')
value_is_unicode = True
else:
bytes_value = value
value_is_unicode = False
if bytes_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
bytes_value = bytes_value.replace(omit_me, '*' * 8)
if value_is_unicode:
value = unicode(bytes_value, 'utf-8', errors='replace')
else:
value = bytes_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = str(value)
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log and self._verbosity >= 3:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(path)
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(path):
path = os.path.realpath(path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
def _to_filesystem_str(self, path):
'''Returns filesystem path as a str, if it wasn't already.
Used in selinux interactions because it cannot accept unicode
instances, and specifying complex args in a playbook leaves
you with unicode instances. This method currently assumes
that your filesystem encoding is UTF-8.
'''
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, filename):
filename = os.path.expanduser(filename)
st = os.lstat(filename)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(path)
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(path)
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None):
path = os.path.expanduser(path)
path_stat = os.lstat(path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = oct(prev_mode)
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = oct(mode)
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(path, mode)
else:
if not os.path.islink(path):
os.chmod(path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(path)
os.chmod(path, mode)
new_underlying_stat = os.stat(path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def set_file_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
if os.path.exists(path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(path)
kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
# secontext not yet supported
if os.path.islink(path):
kwargs['state'] = 'link'
elif os.path.isdir(path):
kwargs['state'] = 'directory'
elif os.stat(path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
raise Exception('internal error: aliases must be a list')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
missing = []
for (k,v) in self.argument_spec.items():
required = v.get('required', False)
if required and k not in self.params:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for (key, val, requirements) in spec:
missing = []
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) > 0:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE):
if k in self.params:
if self.params[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if self.params[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(self.params[k],) = overlap
if self.params[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(self.params[k],) = overlap
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, str, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = literal_eval(str)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (str, e)
return str
def _check_type_str(self, value):
if isinstance(value, basestring):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, basestring):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, basestring):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, basestring) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, basestring):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, basestring):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (unicode, bytes)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.items():
wanted = v.get('type', None)
if k not in self.params:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre == True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log(msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, bytes):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (bytes, unicode)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, bytes):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
if not isinstance(param_val, basestring):
param_val = str(param_val)
elif isinstance(param_val, unicode):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = []
for arg in log_args:
arg_val = log_args[arg]
if not isinstance(arg_val, basestring):
arg_val = str(arg_val)
elif isinstance(arg_val, unicode):
arg_val = arg_val.encode('utf-8')
msg.append('%s=%s' % (arg, arg_val))
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s' % arg)
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or type(arg) == bool:
return arg
if isinstance(arg, basestring):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.add_path_info(kwargs)
if not 'changed' in kwargs:
kwargs['changed'] = False
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
self.add_path_info(kwargs)
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename-YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
if os.path.exists(dest):
try:
dest_stat = os.stat(dest)
os.chmod(src, dest_stat.st_mode & PERM_BITS)
os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(dest)
try:
login_name = os.getlogin()
except OSError:
# not having a tty can cause the above to fail, so
# just get the LOGNAME environment variable instead
login_name = os.environ.get('LOGNAME', None)
# if the original login_name doesn't match the currently
# logged-in user, or if the SUDO_USER environment variable
# is set, then this user has switched their credentials
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
else:
dest_dir = os.path.dirname(dest)
dest_file = os.path.basename(dest)
try:
tmp_dest = tempfile.NamedTemporaryFile(
prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (dest_dir, e))
try: # leaves tmp file behind when sudo and not root
if switched_user and os.getuid() != 0:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(src, tmp_dest.name)
else:
shutil.move(src, tmp_dest.name)
if self.selinux_enabled():
self.set_context_if_different(
tmp_dest.name, context, False)
try:
tmp_stat = os.stat(tmp_dest.name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
os.rename(tmp_dest.name, dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
if unsafe_writes and e.errno == errno.EBUSY:
#TODO: issue warning that this is an unsafe operation, but doing it cause user insists
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e))
else:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
self.cleanup(tmp_dest.name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(dest, DEFAULT_PERM & ~umask)
if switched_user:
os.chown(dest, os.getuid(), os.getgid())
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: iIf given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kwarg environ_update: dictionary to *update* os.environ with
'''
shell = False
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([pipes.quote(x) for x in args])
shell = True
elif isinstance(args, basestring) and use_unsafe_shell:
shell = True
elif isinstance(args, string_types):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2 and isinstance(args, text_type):
args = args.encode('utf-8')
elif PY3 and isinstance(args, binary_type):
args = args.decode('utf-8', errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = prompt_regex.encode('utf-8', errors='surrogateescape')
elif PY2:
prompt_regex = prompt_regex.encode('utf-8')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x is not None ]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = args.encode('utf-8')
else:
if isinstance(args, binary_type):
to_clean_args = args.decode('utf-8', errors='replace')
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if cwd and os.path.isdir(cwd):
kwargs['cwd'] = cwd
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
try:
if self._debug:
if isinstance(args, list):
running = ' '.join(args)
else:
running = args
self.log('Executing: ' + running)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
if PY3:
errors = 'surrogateescape'
else:
errors = 'strict'
data = data.encode('utf-8', errors=errors)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if cmd.stdout in rfd:
dat = os.read(cmd.stdout.fileno(), 9000)
stdout += dat
if dat == b(''):
rpipes.remove(cmd.stdout)
if cmd.stderr in rfd:
dat = os.read(cmd.stderr.fileno(), 9000)
stderr += dat
if dat == b(''):
rpipes.remove(cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() == None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
except:
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def pretty_bytes(self,size):
ranges = (
(1<<70, 'ZB'),
(1<<60, 'EB'),
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'KB'),
(1, 'Bytes')
)
for limit, suffix in ranges:
if size >= limit:
break
return '%.2f %s' % (float(size)/ limit, suffix)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
ramondelafuente/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 86,695
|
[
"VisIt"
] |
d2bd42f1a3ad797dd69cab805e7fb591a628b498a3bb240b36d361308eb5278a
|
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order-sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
freqresp -- frequency response of a continuous-time LTI system.
lti -- Linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
bode -- Calculate Bode magnitude and phase data.
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order-sections to zero-pole-gain.
sos2tf -- second-order-sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
"""
from __future__ import division, print_function, absolute_import
from . import sigtools
from .waveforms import *
from ._max_len_seq import max_len_seq
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .cont2discrete import *
from .dltisys import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .windows import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/signal/__init__.py
|
Python
|
mit
| 11,403
|
[
"Gaussian"
] |
52d4bda46d823d19b2d06fc0ef27547a5633a25eaccafc2dca21e91b36df6e8c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
colors.SetColor("BkgColor", [26, 51, 102, 255])
parametricObjects = list()
parametricObjects.append(vtk.vtkParametricBohemianDome())
parametricObjects[-1].SetA(5.0)
parametricObjects[-1].SetB(1.0)
parametricObjects[-1].SetC(2.0)
parametricObjects.append(vtk.vtkParametricBour())
parametricObjects.append(vtk.vtkParametricCatalanMinimal())
parametricObjects.append(vtk.vtkParametricHenneberg())
parametricObjects.append(vtk.vtkParametricKuen())
parametricObjects.append(vtk.vtkParametricPluckerConoid())
parametricObjects.append(vtk.vtkParametricPseudosphere())
parametricFunctionSources = list()
renderers = list()
mappers = list()
actors = list()
textmappers = list()
textactors = list()
# Create one text property for all
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(12)
textProperty.SetJustificationToCentered()
backProperty = vtk.vtkProperty()
backProperty.SetColor(colors.GetColor3d("Tomato"))
# Create a parametric function source, renderer, mapper, and actor
# for each object
for i in range(0, len(parametricObjects)):
parametricFunctionSources.append(
vtk.vtkParametricFunctionSource())
parametricFunctionSources[i].SetParametricFunction(parametricObjects[i])
parametricFunctionSources[i].Update()
mappers.append(vtk.vtkPolyDataMapper())
mappers[i].SetInputConnection(
parametricFunctionSources[i].GetOutputPort())
actors.append(vtk.vtkActor())
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(colors.GetColor3d("Banana"))
actors[i].GetProperty().SetSpecular(.5)
actors[i].GetProperty().SetSpecularPower(20)
actors[i].SetBackfaceProperty(backProperty)
textmappers.append(vtk.vtkTextMapper())
textmappers[i].SetInput(parametricObjects[i].GetClassName())
textmappers[i].SetTextProperty(textProperty)
textactors.append(vtk.vtkActor2D())
textactors[i].SetMapper(textmappers[i])
textactors[i].SetPosition(100, 16)
renderers.append(vtk.vtkRenderer())
renderers[i].AddActor(actors[i])
renderers[i].AddActor(textactors[i])
renderers[i].SetBackground(colors.GetColor3d("BkgColor"))
# Setup the viewports
xGridDimensions = 4
yGridDimensions = 2
rendererSize = 200
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Parametric Objects Demonstration2")
renderWindow.SetSize(rendererSize * xGridDimensions,
rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
if index > (len(actors) - 1):
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("BkgColor"))
ren.SetViewport(viewport)
renderWindow.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].GetActiveCamera().Zoom(0.9)
renderers[index].ResetCameraClippingRange()
renderWindow.AddRenderer(renderers[index])
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Deprecated/GeometricObjects/ParametricObjectsDemo2.py
|
Python
|
apache-2.0
| 4,159
|
[
"VTK"
] |
ffb29f4e4c40ab5dde4b8bef368b86a6cd2d18dd2296212df7fc68630a343ea2
|
#
# Copyright (C) 2001,2002 greg Landrum and Rational Discovery LLC
#
""" Various bits and pieces for calculating descriptors
"""
from __future__ import print_function
from rdkit import RDConfig
class DescriptorCalculator:
""" abstract base class for descriptor calculators
"""
#------------
# methods used to calculate descriptors
#------------
def ShowDescriptors(self):
""" prints out a list of the descriptors
"""
print('#---------')
print('Simple:')
for desc in self.simpleList:
print(desc)
if self.compoundList:
print('#---------')
print('Compound:')
for desc in self.compoundList:
print(desc)
def GetDescriptorNames(self):
""" returns a list of the names of the descriptors this calculator generates
"""
pass
def SaveState(self,fileName):
""" Writes this calculator off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
"""
from rdkit.six.moves import cPickle
try:
f = open(fileName,'wb+')
except:
print('cannot open output file %s for writing'%(fileName))
return
cPickle.dump(self,f)
f.close()
def CalcDescriptors(self,what,*args,**kwargs):
pass
def __init__(self,*args,**kwargs):
""" Constructor
"""
self.simpleList = None
self.descriptorNames = None
self.compoundList = None
|
strets123/rdkit
|
rdkit/ML/Descriptors/Descriptors.py
|
Python
|
bsd-3-clause
| 1,456
|
[
"RDKit"
] |
203cd10f8ce522e22e0b594bcf835c2b6e8f2a14cf42749ed982bc2c23ce0f21
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a GW calculation using Yambo
#
from __future__ import print_function
from yambopy import *
from qepy import *
yambo = 'yambo'
if not os.path.isdir('database'):
os.mkdir('database')
#check if the nscf cycle is present
if os.path.isdir('nscf/bn.save'):
print('nscf calculation found!')
else:
print('nscf calculation not found!')
exit()
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
print('preparing yambo database')
os.system('cd nscf/bn.save; p2y')
os.system('cd nscf/bn.save; yambo')
os.system('mv nscf/bn.save/SAVE database')
if not os.path.isdir('gw'):
os.mkdir('gw')
os.system('cp -r database/SAVE gw')
#create the yambo input file
y = YamboIn('%s -d -g n -V all'%yambo,folder='gw')
QPKrange,_ = y['QPkrange']
y['QPkrange'] = [QPKrange[:2]+[4,5],'']
y['FFTGvecs'] = [30,'Ry']
y['NGsBlkXd'] = [1,'Ry']
y['BndsRnXd'] = [[1,30],'']
y.write('gw/yambo_run.in')
print('running yambo')
os.system('cd gw; %s -F yambo_run.in -J yambo'%yambo)
|
henriquemiranda/yambopy
|
tutorial/bn/gw_bn.py
|
Python
|
bsd-3-clause
| 1,066
|
[
"Yambo"
] |
69b46858ff37a756ce4738920421ef69b69ee840baed1de6740d4551e6324fc7
|
import math
import pickle
import six
from rdkit import Chem
from numpy.testing import assert_almost_equal
from mordred import Calculator, descriptors
from nose.tools import eq_
from mordred.error import MissingValueBase
def test_pickle_calculator():
orig = Calculator(descriptors)
d0 = orig.descriptors[0]
d1 = orig.descriptors[1]
orig.register(
[
d0 + d1,
d0 - d1,
d0 * d1,
d0 // d1,
d0 % d1,
d0 ** d1,
-d0,
+d1,
abs(d0),
math.trunc(d0),
]
)
if six.PY3:
orig.register([math.ceil(d0), math.floor(d1)])
pickled = pickle.loads(pickle.dumps(orig))
mol = Chem.MolFromSmiles("c1ccccc1C(O)O")
for a, b in zip(orig.descriptors, pickled.descriptors):
yield eq_, a, b
for a, b in zip(orig(mol), pickled(mol)):
if isinstance(a, MissingValueBase):
yield eq_, a.__class__, b.__class__
else:
yield assert_almost_equal, a, b
|
mordred-descriptor/mordred
|
mordred/tests/test_pickle.py
|
Python
|
bsd-3-clause
| 1,052
|
[
"RDKit"
] |
fa46227bb4ae280db17105c69d66cf7f1d935b1d7cec15a989a0c40f8923f777
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import List
from copy import copy
from kivy.logger import Logger
from ORCA.actions.Base import cEventActionBase
from ORCA.vars.Replace import ReplaceVars
from ORCA.utils.TypeConvert import ToDic
from ORCA.Action import cAction
from ORCA.actions.ReturnCode import eReturnCode
import ORCA.Globals as Globals
__all__ = ['cEventActionsNotifications']
class cEventActionsNotifications(cEventActionBase):
""" Actions for managing notification """
def ExecuteActionSendNotification(self,oAction:cAction) -> eReturnCode:
"""
WikiDoc:Doc
WikiDoc:Context:ActionsDetails
WikiDoc:Page:Actions-SendNotification
WikiDoc:TOCTitle:noaction
= sendnotification =
Will send an ORCA internal notification
This action will not modify the error code
<div style="overflow:auto; ">
{| border=1 class="wikitable"
! align="left" | string
! align="left" | notification
! align="left" | actionpars
|-
|sendnotification
|notification string to send
|Optional: Actionpars to be submitted: Format "{'parname1':'varvalue1','parname2':'varvalue2'}"
|}</div>
WikiDoc:End
"""
uNotification:str = ReplaceVars(oAction.dActionPars.get("notification",""))
dActionPars:Dict = ToDic(ReplaceVars(oAction.dActionPars.get("actionpars","{}")))
if not isinstance(dActionPars,dict):
dActionPars = ToDic(oAction.dActionPars.get("actionpars", "{}"))
self.oEventDispatcher.LogAction(uTxt=u'SendNotification',oAction=oAction)
Globals.oNotifications.SendNotification(uNotification=uNotification,**dActionPars)
return eReturnCode.Nothing
def ExecuteActionRegisterNotification(self,oAction:cAction) -> eReturnCode:
"""
WikiDoc:Doc
WikiDoc:Context:ActionsDetails
WikiDoc:Page:Actions-RegisterNotification
WikiDoc:TOCTitle:noaction
= registernotification =
Will register an ORCA internal notification
This action will not modify the error code
<div style="overflow:auto; ">
{| border=1 class="wikitable"
! align="left" | string
! align="left" | notification
! align="left" | notifyaction
! align="left" | filterpagename
|-
|registernotification
|notification string to register
|action to be executed
|Page filter on which the the action should be applied. This can be "ALL" execute it independent of the pagename (default). Can be "NOPOPUP" to execute it only on non popup pages, Can be "POPUP" to execute it on all popup pages. Can be "FIRSTPAGE" to execute it only the first shown definition page, Does not execute it multiple time
|}</div>
All further parameter will passed as actions pars to the action
WikiDoc:End
"""
uPageName:str = ReplaceVars(oAction.dActionPars.get("filterpagename", ""))
self.oEventDispatcher.LogAction(uTxt=u'RegisterNotification',oAction=oAction)
if uPageName == u"ALL":
for uPageKey in Globals.oTheScreen.oScreenPages:
oCopyAction:cAction = copy(oAction)
oCopyAction.dActionPars["filterpagename"] = uPageKey
self.ExecuteActionRegisterNotification_sub(oCopyAction)
elif uPageName == u"NOPOPUP":
for uPageKey in Globals.oTheScreen.oScreenPages:
if not Globals.oTheScreen.oScreenPages[uPageKey].bIsPopUp:
oCopyAction:cAction = copy(oAction)
oCopyAction.dActionPars["filterpagename"] = uPageKey
self.ExecuteActionRegisterNotification_sub(oCopyAction)
elif uPageName == u"POPUP":
for uPageKey in Globals.oTheScreen.oScreenPages:
if Globals.oTheScreen.oScreenPages[uPageKey].bIsPopUp:
oCopyAction:cAction = copy(oAction)
oCopyAction.dActionPars["filterpagename"] = uPageKey
self.ExecuteActionRegisterNotification_sub(oCopyAction)
else:
self.ExecuteActionRegisterNotification_sub(oAction)
return eReturnCode.Nothing
def ExecuteActionRegisterNotification_sub(self,oAction:cAction) -> eReturnCode:
uNotification:str = ReplaceVars(oAction.dActionPars.get("notification",""))
uActionName:str = ReplaceVars(oAction.dActionPars.get("notifyaction",""))
uRegisterOption:str = oAction.dActionPars.get("registeroption","replace")
uFilterPageName:str = oAction.dActionPars.get("filterpagename","")
if uRegisterOption == "append":
Globals.oNotifications.RegisterNotification(uNotification=uNotification, fNotifyFunction=self.NotificationHandler, uDescription="Action:" + uActionName, bQuiet=True, **oAction.dActionPars)
else:
uKey:str = uNotification+"_"+uFilterPageName
iHash:int = Globals.oNotifications.dFilterPageNames.get(uKey,0)
if iHash != 0:
Globals.oNotifications.UnRegisterNotification_ByHash(iHash=iHash)
Globals.oNotifications.RegisterNotification(uNotification=uNotification, fNotifyFunction=self.NotificationHandler, uDescription="Action:" + uActionName, bQuiet=True, **oAction.dActionPars)
return eReturnCode.Nothing
# noinspection PyMethodMayBeStatic
def NotificationHandler(self,**kwargs):
uActionName:str = kwargs["notifyaction"]
uFilterPageName:str = kwargs.get("filterpagename","")
if uFilterPageName == "FIRSTPAGE":
uFilterPageName=Globals.oTheScreen.uFirstPageName
if uFilterPageName == "CURRENT":
uFilterPageName=Globals.oTheScreen.uCurrentPageName
if uActionName and ((uFilterPageName == Globals.oTheScreen.uCurrentPageName) or uFilterPageName==u"" ):
aActions:List[cAction]=Globals.oActions.GetActionList(uActionName = uActionName, bNoCopy = False)
if aActions is not None:
aTmpActions:List[cAction] = []
for oAction in aActions:
Globals.oEvents.CopyActionPars(dTarget=oAction.dActionPars,dSource=kwargs,uReplaceOption="donotcopyempty")
aTmpActions.append(oAction)
Logger.debug(u'Notification: Execute Action for notification: %s, Action: %s' % (kwargs["notification"], uActionName))
Globals.oEvents.ExecuteActions( aActions=aTmpActions,oParentWidget=None)
return True
else:
Logger.warning (u'Notification: Action handler not found: Notification: %s, Action: %s'%(kwargs["notification"],uActionName))
else:
pass
# Logger.debug(u'Notification: Action not for this page:%s, Action: %s ' % (kwargs["notification"], uActionName))
|
thica/ORCA-Remote
|
src/ORCA/actions/Notifications.py
|
Python
|
gpl-3.0
| 8,123
|
[
"ORCA"
] |
7f2647e966210096bd2c6c80c66ef68705aeb5a6c1d9f9264cf7c07202210ae3
|
# Copyright 2013 Julian Metzler
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This file contains functions used by more than one example script.
"""
import json
import os
import tweetpony
def authenticate():
try:
api = tweetpony.API(tweetpony.CONSUMER_KEY, tweetpony.CONSUMER_SECRET)
url = api.get_auth_url()
print "Visit this URL to obtain your verification code: %s" % url
verifier = raw_input("Input your code: ")
api.authenticate(verifier)
except tweetpony.APIError as err:
print "Oh no! You could not be authenticated. Twitter returned error #%i and said: %s" % (err.code, err.description)
else:
auth_data = {'access_token': api.access_token, 'access_token_secret': api.access_token_secret}
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".auth_data.json"), 'w') as f:
f.write(json.dumps(auth_data))
print "Hello, @%s! You have been authenticated. You can now run the other example scripts without having to authenticate every time." % api.user.screen_name
def get_api():
if not os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".auth_data.json")):
authenticate()
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".auth_data.json"), 'r') as f:
auth_data = json.loads(f.read())
try:
api = tweetpony.API(tweetpony.CONSUMER_KEY, tweetpony.CONSUMER_SECRET, auth_data['access_token'], auth_data['access_token_secret'])
except tweetpony.APIError as err:
print "Oh no! You could not be authenticated. Twitter returned error #%i and said: %s" % (err.code, err.description)
else:
return api
return False
|
tytek2012/TweetPony
|
examples/_common.py
|
Python
|
agpl-3.0
| 2,207
|
[
"VisIt"
] |
e6da01aed55b06b103741142ddf8b39f8334955b2dd59674643e7e742978eebf
|
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCRequestHandler
class.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _dispatch(self, method, params):
if method == 'pow':
return apply(pow, params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCRequestHandler:
class MathHandler(SimpleXMLRPCRequestHandler):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return apply(func, params)
def log_message(self, format, *args):
pass # maybe do something fancy like write the messages to a file
def export_add(self, x, y):
return x + y
server = SimpleXMLRPCServer(("localhost", 8000), MathHandler)
server.serve_forever()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
import SocketServer
import BaseHTTPServer
import sys
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
XML-RPC requests are dispatched to the _dispatch method, which
may be overriden by subclasses. The default implementation attempts
to dispatch XML-RPC calls to the functions or instance installed
in the server.
"""
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the _dispatch method for handling.
"""
try:
# get arguments
data = self.rfile.read(int(self.headers["content-length"]))
params, method = xmlrpclib.loads(data)
# generate response
try:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
)
else:
response = xmlrpclib.dumps(response, methodresponse=1)
except:
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
it's parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called by SimpleXMLRPCServer.
"""
def resolve_dotted_attribute(obj, attr):
"""resolve_dotted_attribute(math, 'cos.__doc__') => math.cos.__doc__
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts
with a '_'.
"""
for i in attr.split('.'):
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
func = None
try:
# check to see if a matching function has been registered
func = self.server.funcs[method]
except KeyError:
if self.server.instance is not None:
# check for a _dispatch method
if hasattr(self.server.instance, '_dispatch'):
return apply(
getattr(self.server.instance,'_dispatch'),
(method, params)
)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.server.instance,
method
)
except AttributeError:
pass
if func is not None:
return apply(func, params)
else:
raise Exception('method "%s" is not supported' % method)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=1):
self.funcs = {}
self.logRequests = logRequests
self.instance = None
SocketServer.TCPServer.__init__(self, addr, requestHandler)
def register_instance(self, instance):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
it's parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called by SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
"""
self.instance = instance
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
If an instance is also registered then it will only be called
if a matching function is not found.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
if __name__ == '__main__':
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
|
Yinxiaoli/iros2015_folding
|
src/folding_control/src/xmlrpclib-1.0.1/SimpleXMLRPCServer.py
|
Python
|
mit
| 8,882
|
[
"Brian"
] |
2e58d86bc8df62d9aec66d537e7d8141f9a9134658129e980a2d5f73c8d89574
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '3.0.4'
# For VTR documentation support
sys.path.append(os.path.abspath('./vtr-verilog-to-routing/doc/_exts'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.autodoc',
'sphinx.ext.imgmath', # breathe
'breathe',
'symbolator_sphinx',
'sphinxcontrib.images',
'sphinxcontrib.bibtex',
'sdcdomain',
'archdomain',
'rrgraphdomain',
'recommonmark',
'sphinx_verilog_domain'
]
numfig = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SymbiFlow'
basic_filename = u'symbiflow-docs'
authors = u'SymbiFlow'
copyright = authors + u', 2019'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'symbiflow-arch-defs/third_party/**',
'prjtrellis/third_party/**',
'prjxray/third_party/**',
'prjxray/docs/db_dev_process/fuzzers/index/**',
'prjxray/docs/db_dev_process/minitests/index/**',
'vtr-verilog-to-routing/libs/EXTERNAL/**.md',
'vtr-verilog-to-routing/.github/**',
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_symbiflow_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Specify a list of menu in Header.
# Tuples forms:
# ('Name', 'external url or path of pages in the document', boolean, 'icon name')
#
# Third argument:
# True indicates an external link.
# False indicates path of pages in the document.
#
# Fourth argument:
# Specify the icon name.
# For details see link.
# https://material.io/icons/
'header_links' : [
('Home', 'index', False, 'home'),
("Website", "https://symbiflow.github.io", True, 'launch'),
("GitHub", "https://github.com/SymbiFlow", True, 'code')
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color': 'deep_purple',
# Values: Same as primary_color. (Default: pink)
'accent_color': 'purple',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer': True,
'fixed_header': True,
'header_waterfall': True,
'header_scroll': False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title': False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title': True,
# Render footer.
# Values: True, False (Default: True)
'show_footer': True
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = basic_filename
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', basic_filename+'.tex', project,
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', basic_filename, project,
[authors], 1)
]
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
'fontpkg': r'''
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
''',
'preamble': r'''
\usepackage{multicol}
''',
'maketitle': r'''
\renewcommand{\releasename}{}
\maketitle
''',
'classoptions':',openany,oneside',
'babel': r'''
\usepackage[english]{babel}
\makeatletter
\@namedef{ver@color.sty}{}
\makeatother
\usepackage{silence}
\WarningFilter{Fancyhdr}{\fancyfoot's `E' option without twoside}
'''
}
rst_prolog = """
.. role:: raw-latex(raw)
:format: latex
.. role:: raw-html(raw)
:format: html
"""
### BREATHE ###
from pathlib import Path
import subprocess
# For building doxygen only on Read the Docs see:
# https://breathe.readthedocs.io/en/latest/readthedocs.html
def doxygen_generate(log_file=None):
doxygen_cmake_build_dir = Path('../doxygen/build')
if not doxygen_cmake_build_dir.exists():
doxygen_cmake_build_dir.mkdir(parents=True, exist_ok=True)
cmd = "cd " + str(doxygen_cmake_build_dir) + "&& cmake .. && make"
else:
cmd = "cd " + str(doxygen_cmake_build_dir) + "&& make"
subprocess.call(cmd, shell=True, stderr=log_file, stdout=log_file)
doxygen_generate()
breathe_projects = {
"prjxray" : "../build/doxygen/prjxray/xml",
}
### SYMBOLATOR ###
symbolator_cmd_args = ['--transparent']
symbolator_output_format = 'svg'
### PRJXRAY FUZZERS AND MINITESTS LINKS
def prjxray_fuzzers_and_minitests_links():
cmd = "cd prjxray/docs && make links"
subprocess.call(cmd, shell=True)
prjxray_fuzzers_and_minitests_links()
|
SymbiFlow/symbiflow-docs
|
source/conf.py
|
Python
|
isc
| 10,732
|
[
"Amber"
] |
fa15b8fc4d5b31c66f3df0d7fd4453eb8eaafded560102f41d489114857285df
|
"""Used to generate polarization functions for atomic basis sets."""
import sys
import math
import traceback
import numpy as np
from ase import Atom, Atoms
from ase.data import molecules as g2
from gpaw import Calculator
from gpaw.kpoint import KPoint
from gpaw.grid_descriptor import GridDescriptor
from gpaw.spline import Spline
from gpaw.localized_functions import create_localized_functions
from gpaw.atom.all_electron import AllElectron
from gpaw.atom.configurations import configurations
from gpaw.testing.amoeba import Amoeba
from gpaw.utilities import devnull
class LinearCombination:
"""Represents a linear combination of 1D functions."""
def __init__(self, coefs, functions):
self.coefs = coefs
self.functions = functions
def __call__(self, r):
"""Evaluate function values at r, which is a numpy array."""
return sum([coef * function(r) for coef, function
in zip(self.coefs, self.functions)])
def renormalize(self, norm):
"""Divide coefficients by norm."""
self.coefs = [coef/norm for coef in self.coefs]
def gramschmidt(gd, psit_k):
"""Orthonormalize functions on grid using the Gram-Schmidt method.
Modifies the elements of psit_k such that each scalar product
< psit_k[i] | psit_k[j] > = delta[ij], where psit_k are on the grid gd"""
for k in range(len(psit_k)):
psi = psit_k[k]
for l in range(k):
phi = psit_k[l]
psit_k[k] = psi - gd.integrate(psi*phi) * phi
psi = psit_k[k]
psit_k[k] = psi / gd.integrate(psi*psi)**.5
def rotation_test():
molecule = 'NH3'
a = 7.
rcut = 5.
l = 1
from gpaw.output import plot
rotationvector = np.array([1.0, 1.0, 1.0])
angle_increment = 0.3
system = g2.molecule(molecule)
system.set_cell([a, a, a])
system.center()
calc = Calculator(h=0.27, txt=None)
system.set_calculator(calc)
pog = PolarizationOrbitalGenerator(rcut)
r = np.linspace(0., rcut, 300)
maxvalues = []
import pylab
for i in range(0, int(6.28/angle_increment)):
ascii = plot(system.positions,
system.get_atomic_numbers(),
system.get_cell().diagonal())
print ascii
print 'angle=%.03f' % (angle_increment * i)
energy = system.get_potential_energy()
center = (system.positions / system.get_cell().diagonal())[0]
orbital = pog.generate(l, calc.wfs.gd, calc.kpt_u[0].psit_nG, center)
y = orbital(r)
pylab.plot(r, y, label='%.02f' % (i * angle_increment))
maxvalues.append(max(y))
print 'Quality by orbital', #pretty(pog.optimizer.lastterms)
system.rotate(rotationvector, angle_increment)
system.center()
print max(maxvalues) - min(maxvalues)
pylab.legend()
pylab.show()
def make_dummy_reference(l, function=None, rcut=6., a=12., n=60,
dtype=float):
"""Make a mock reference wave function using a made-up radial function
as reference"""
#print 'Dummy reference: l=%d, rcut=%.02f, alpha=%.02f' % (l, rcut, alpha)
r = np.arange(0., rcut, .01)
if function is None:
function = QuasiGaussian(4., rcut)
norm = get_norm(r, function(r), l)
function.renormalize(norm)
#g = QuasiGaussian(alpha, rcut)
mcount = 2*l + 1
fcount = 1
gd = GridDescriptor((n, n, n), (a, a, a), (False, False, False))
spline = Spline(l, r[-1], function(r), points=50)
center = (.5, .5, .5)
lf = create_localized_functions([spline], gd, center, dtype=dtype)
psit_k = gd.zeros(mcount, dtype=dtype)
coef_xi = np.identity(mcount * fcount, dtype=dtype)
lf.add(psit_k, coef_xi)
return gd, psit_k, center, function
def make_dummy_kpt_reference(l, function, k_c,
rcut=6., a=10., n=60, dtype=complex):
r = np.linspace(0., rcut, 300)
mcount = 2*l + 1
fcount = 1
kcount = 1
gd = GridDescriptor((n, n, n), (a, a, a), (True, True, True))
kpt = KPoint([], gd, 1., 0, 0, 0, k_c, dtype)
spline = Spline(l, r[-1], function(r))
center = (.5, .5, .5)
lf = create_localized_functions([spline], gd, center, dtype=dtype)
lf.set_phase_factors([kpt.k_c])
psit_nG = gd.zeros(mcount, dtype=dtype)
coef_xi = np.identity(mcount * fcount, dtype=dtype)
lf.add(psit_nG, coef_xi, k=0)
kpt.psit_nG = psit_nG
print 'Number of boxes', len(lf.box_b)
print 'Phase kb factors shape', lf.phase_kb.shape
return gd, kpt, center
class CoefficientOptimizer:
"""Class for optimizing Gaussian/reference overlap.
Given matrices of scalar products s and S as returned by overlaps(),
finds the optimal set of coefficients resulting in the largest overlap.
ccount is the number of coefficients.
if fix is True, the first coefficient will be set to 1, and only the
remaining coefficients will be subject to optimization.
"""
def __init__(self, s_kmii, S_kmii, ccount, fix=False):
self.s_kmii = s_kmii
self.S_kmii = S_kmii
self.fix = fix
function = self.evaluate
self.lastterms = None
if fix:
function = self.evaluate_fixed
ccount -= 1
ones = np.ones((ccount, ccount))
diag = np.identity(ccount)
simplex = np.concatenate((np.ones((ccount,1)),
ones + .5 * diag), axis=1)
simplex = np.transpose(simplex)
self.amoeba = Amoeba(function, simplex, tolerance=1e-10)
def find_coefficients(self):
self.amoeba.optimize()
coefficients = self.amoeba.simplex[0]
if self.fix:
coefficients = [1.] + list(coefficients)
return coefficients
def evaluate_fixed(self, coef):
return self.evaluate([1.] + list(coef))
def evaluate(self, coef):
coef = np.array(coef) # complex coefficients?
terms_km = np.zeros(self.S_kmii.shape[0:2])
for i, (s_mii, S_mii) in enumerate(zip(self.s_kmii, self.S_kmii)):
for j, (s_ii, S_ii) in enumerate(zip(s_mii, S_mii)):
numerator = np.vdot(coef, np.dot(S_ii, coef))
denominator = np.vdot(coef, np.dot(s_ii, coef))
terms_km[i, j] = numerator / denominator
#print terms_km
self.lastterms = terms_km
quality = terms_km.sum()
badness = - quality
return badness
def norm_squared(r, f, l):
dr = r[1]
frl = f * r**l
assert abs(r[1] - (r[-1] - r[-2])) < 1e-10 # error if not equidistant
return sum(frl * frl * r * r * dr)
def get_norm(r, f, l):
return norm_squared(r, f, l) ** .5
class PolarizationOrbitalGenerator:
"""Convenience class which generates polarization functions."""
def __init__(self, rcut, gaussians=None):
self.rcut = rcut
if gaussians is None:
gaussians = 4
if isinstance(gaussians, int):
self.r_alphas = np.linspace(1., .6 * rcut, gaussians + 1)[1:]
else: # assume it is a list of actual characteristic lengths
self.r_alphas = gaussians
self.alphas = 1. / self.r_alphas ** 2
self.s = None
self.S = None
self.optimizer = None
def generate(self, l, gd, kpt_u, spos_ac, dtype=None):
"""Generate polarization orbital."""
rcut = self.rcut
phi_i = [QuasiGaussian(alpha, rcut) for alpha in self.alphas]
r = np.arange(0, rcut, .01)
dr = r[1] # equidistant
integration_multiplier = r ** (2 * (l + 1))
for phi in phi_i:
y = phi(r)
norm = (dr * sum(y * y * integration_multiplier)) ** .5
phi.renormalize(norm)
splines = [Spline(l, r[-1], phi(r)) for phi in phi_i]
if dtype is None:
if np.any([kpt.dtype == complex for kpt in kpt_u]):
dtype = complex
else:
dtype = float
self.s, self.S = overlaps(l, gd, splines, kpt_u, spos_ac)
self.optimizer = CoefficientOptimizer(self.s, self.S, len(phi_i))
coefs = self.optimizer.find_coefficients()
self.quality = - self.optimizer.amoeba.y[0]
self.qualities = self.optimizer.lastterms
orbital = LinearCombination(coefs, phi_i)
orbital.renormalize(get_norm(r, orbital(r), l))
return orbital
def overlaps(l, gd, splines, kpt_u, spos_ac=((.5, .5, .5),),
txt=devnull):
"""Get scalar products of basis functions and references.
Returns the quadruple-indexed matrices s and S, where::
s = < phi | phi > ,
kmij kmi kmj
-----
\ / | ~ \ / ~ | \
S = ) ( phi | psi ) ( psi | phi )
kmij / \ mi | n / \ n | mj /
-----
n
The functions phi are taken from the given splines, whereas psit
must be on the grid represented by the GridDescriptor gd.
Integrals are evaluated at the relative location given by center.
"""
if txt == '-':
txt = sys.stdout
# XXX
spos_c = spos_ac[0]
assert len(spos_ac) == 1, str(spos_c)
mcount = 2 * l + 1
fcount = len(splines)
kcount = len(kpt_u)
bcount = kpt_u[0].psit_nG.shape[0]
dtype = kpt_u[0].dtype
print >> txt, 'Creating localized functions'
lf = create_localized_functions(splines, gd, spos_c, dtype=dtype)
k_kc = [kpt.k_c for kpt in kpt_u]
if dtype == complex:
lf.set_phase_factors(k_kc)
# make sanity checks
for kpt in kpt_u:
assert kpt.psit_nG.shape[0] == bcount # same band count for all kpts
assert [kpt.dtype for kpt in kpt_u].count(dtype) == kcount # same dtype
lvalues = [spline.get_angular_momentum_number() for spline in splines]
assert lvalues.count(l) == len(lvalues) # all l must be equal
# First we have to calculate the scalar products between
# pairs of basis functions < phi_kmi | phi_kmj >.
s_kmii = np.zeros((kcount, mcount, fcount, fcount), dtype=dtype)
coef_xi = np.identity(mcount * fcount, dtype=dtype)
#phi_miG = gd.zeros(mcount * fcount, dtype=dtype)
print >> txt, 'Calculating phi-phi products'
for kpt in kpt_u:
gramschmidt(gd, kpt.psit_nG)
normsqr = gd.integrate(np.conjugate(kpt.psit_nG) * kpt.psit_nG)
for n in range(bcount):
kpt.psit_nG[n] /= normsqr[n] ** .5
phi_nG = gd.zeros(mcount * fcount, dtype=dtype)
#for lf in lf_a:
# lf.add(phi_nG, coef_xi, k=kpt.k)
lf.add(phi_nG, coef_xi, k=kpt.k)
phi_overlaps_ii = np.zeros((fcount * mcount,
fcount * mcount), dtype=dtype)
# XXX products for different m unneeded. Bottleneck for large fcount
lf.integrate(phi_nG, phi_overlaps_ii, k=kpt.k)
#for lf in lf_a:
# # every integration will add to the result array
# lf.integrate(phi_nG, phi_overlaps_ii, k=kpt.k)
phi_overlaps_ii.shape = (fcount, mcount, fcount, mcount)
for m in range(mcount):
for i in range(fcount):
for j in range(fcount):
s_kmii[kpt.u, m, i, j] = phi_overlaps_ii[i, m, j, m]
# Now calculate scalar products between basis functions and
# reference functions < phi_kmi | psi_kn >.
overlaps_knmi = np.zeros((kcount, bcount, mcount, fcount), dtype=dtype)
print >> txt, 'Calculating phi-psi products'
for kpt in kpt_u:
# Note: will be reashaped to (n, i, m) like its name suggests
overlaps_nim = np.zeros((bcount, mcount * fcount), dtype=dtype)
lf.integrate(kpt.psit_nG, overlaps_nim, k=kpt.k)
overlaps_nim.shape = (bcount, fcount, mcount)
overlaps_knmi[kpt.u, :, :, :] = overlaps_nim.swapaxes(1, 2)
print >> txt, 'Aligning matrices'
for k in range(kcount):
f_n = kpt_u[k].f_n
# Apply weights depending on occupation
for n in range(bcount):
# if n == bcount -1:
# w = 1.#f_n[n]
# else:
# w = 0.
overlaps_knmi[k, n, :, :] *= f_n[n]
S_kmii = np.zeros((kcount, mcount, fcount, fcount), dtype=dtype)
conj_overlaps_knmi = overlaps_knmi.conjugate()
for k in range(kcount):
for m in range(mcount):
for i in range(fcount):
for j in range(fcount):
x1 = conj_overlaps_knmi[k, :, m, i]
x2 = overlaps_knmi[k, :, m, j]
S_kmii[k, m, i, j] = (x1 * x2).sum()
assert s_kmii.shape == S_kmii.shape
return s_kmii, S_kmii
def old_overlaps(l, gd, splines, kpt_u, center=(.5, .5, .5)):
"""Get scalar products of basis functions and references.
Returns the triple-indexed matrices s and S, where::
s = < phi | phi > ,
mij mi mj
-----
\ / | ~ \ / ~ | \
S = ) ( phi | psi ) ( psi | phi )
mij / \ mi | k / \ k | mj /
-----
k
The functions phi are taken from the given splines, whereas psit
must be on the grid represented by the GridDescriptor gd.
Integrals are evaluated at the relative location given by center.
"""
raise DeprecationWarning('Use overlaps method')
# This method will be deleted, but presently we want to keep it
# for testing
assert len(kpt_u) == 1, 'This method only works for one k-point'
kpt = kpt_u[0]
psit_k = kpt.psit_nG
mcounts = [spline.get_angular_momentum_number() for spline in splines]
mcount = mcounts[0]
for mcount_i in mcounts:
assert mcount == mcount_i
mcount = 2*l + 1
fcount = len(splines)
phi_lf = create_localized_functions(splines, gd, center)
#print 'loc funcs boxes',len(phi_lf.box_b)
phi_mi = gd.zeros(fcount * mcount) # one set for each phi
coef_xi = np.identity(fcount * mcount)
phi_lf.add(phi_mi, coef_xi)
integrals = np.zeros((fcount * mcount, fcount * mcount))
phi_lf.integrate(phi_mi, integrals)
"""Integral matrix contents (assuming l==1 so there are three m-values)
--phi1-- --phi2-- --phi3-- ...
m1 m2 m3 m1 m2 m3 m1 m2 m3 ...
+---------------------------------
|
| m1| x 0 0 x 0 0
phi1 m2| 0 x 0 0 x 0 ...
| m3| 0 0 x 0 0 x
|
| m1| .
phi2 m2| .
| m3| .
. |
.
We want < phi_mi | phi_mj >, and thus only the diagonal elements of
each submatrix! For l=1 the diagonal elements are all equal, but this
is not true in general"""
# phiproducts: for each m, < phi_mi | phi_mj >
phiproducts_mij = np.zeros((mcount, fcount, fcount))
for i in range(fcount):
for j in range(fcount):
ioff = mcount * i
joff = mcount * j
submatrix_ij = integrals[ioff:ioff + mcount,joff:joff + mcount]
phiproducts_mij[:, i, j] = submatrix_ij.diagonal()
# This should be ones in submatrix diagonals and zero elsewhere
# Now calculate scalar products < phi_mi | psit_k >, where psit_k are
# solutions from reference calculation
psitcount = len(psit_k)
integrals_kim = np.zeros((psitcount, fcount * mcount))
phi_lf.integrate(psit_k, integrals_kim)
# Now psiproducts[k] is a flat list, but we want it to be a matrix with
# dimensions corresponding to f and m.
# The first three elements correspond to the same localized function
# and so on.
# What we want is one whole matrix for each m-value.
psiproducts_mik = np.zeros((mcount, fcount, psitcount))
for m in range(mcount):
for i in range(fcount):
for k in range(psitcount):
w = kpt.f_n[k] * kpt.weight
psiproducts_mik[m, i, k] = w * integrals_kim[k, mcount * i + m]
# s[mij] = < phi_mi | phi_mj >
s = np.array([phiproducts_mij])
# S[mij] = sum over k: < phi_mi | psit_k > < psit_k | phi_mj >
S = np.array([[np.dot(psiproducts_ik, np.transpose(psiproducts_ik))
for psiproducts_ik in psiproducts_mik]])
return s, S
def main():
"""Testing."""
args = sys.argv[1:]
if len(args) == 0:
args = g2.atoms
rcut = 6.
generator = PolarizationOrbitalGenerator(rcut)
import pylab
for symbol in args:
gd, psit_k, center = Reference(symbol, txt=None).get_reference_data()
psitcount = len(psit_k)
gramschmidt(gd, psit_k)
print 'Wave function count', psitcount
psit_k_norms = gd.integrate(psit_k * psit_k)
Z, states = configurations[symbol]
highest_state = states[-1]
n, l_atom, occupation, energy = highest_state
l = l_atom + 1
phi = generator.generate(l, gd, psit_k, center, dtype=float)
r = np.arange(0., rcut, .01)
norm = get_norm(r, phi(r), l)
quality = generator.quality
orbital = 'spdf'[l]
style = ['-.', '--','-',':'][l]
pylab.plot(r, phi(r) * r**l, style,
label='%s [type=%s][q=%.03f]' % (symbol, orbital, quality))
pylab.legend()
pylab.show()
def dummy_kpt_test():
l = 0
rcut = 6.
a = 5.
k_kc = [(.5, .5, .5)]#[(0., 0., 0.), (0.5, 0.5, 0.5)]
kcount = len(k_kc)
dtype = complex
r = np.arange(0., rcut, .01)
spos_ac_ref = [(0., 0., 0.)]#, (.2, .2, .2)]
spos_ac = [(0., 0., 0.), (.2, .2, .2)]
ngaussians = 4
realgaussindex = (ngaussians - 1) / 2
rchars = np.linspace(1., rcut, ngaussians)
splines = []
gaussians = [QuasiGaussian(1./rch**2., rcut) for rch in rchars]
for g in gaussians:
norm = get_norm(r, g(r), l)
g.renormalize(norm)
spline = Spline(l, r[-1], g(r))
splines.append(spline)
refgauss = gaussians[realgaussindex]
refspline = splines[realgaussindex]
gd = GridDescriptor((60, 60, 60), (a,a,a), (1,1,1))
reflf_a = [create_localized_functions([refspline], gd, spos_c, dtype=dtype)
for spos_c in spos_ac_ref]
for reflf in reflf_a:
reflf.set_phase_factors(k_kc)
kpt_u = [KPoint([], gd, 1., 0, k, k, k_c, dtype)
for k, k_c in enumerate(k_kc)]
for kpt in kpt_u:
kpt.allocate(1)
kpt.f_n[0] = 1.
psit_nG = gd.zeros(1, dtype=dtype)
coef_xi = np.identity(1, dtype=dtype)
integral = np.zeros((1, 1), dtype=dtype)
for reflf in reflf_a:
reflf.add(psit_nG, coef_xi, k=kpt.k)
reflf.integrate(psit_nG, integral, k=kpt.k)
kpt.psit_nG = psit_nG
print 'ref norm', integral
print 'calculating overlaps'
os_kmii, oS_kmii = overlaps(l, gd, splines, kpt_u,
spos_ac=spos_ac_ref)
print 'done'
lf_a = [create_localized_functions(splines, gd, spos_c, dtype=dtype)
for spos_c in spos_ac]
for lf in lf_a:
lf.set_phase_factors(k_kc)
s_kii = np.zeros((kcount, ngaussians, ngaussians), dtype=dtype)
S_kii = np.zeros((kcount, ngaussians, ngaussians), dtype=dtype)
for kpt in kpt_u:
k = kpt.k
all_integrals = np.zeros((1, ngaussians), dtype=dtype)
tempgrids = gd.zeros(ngaussians, dtype=dtype)
tempcoef_xi = np.identity(ngaussians, dtype=dtype)
for lf in lf_a:
lf.integrate(kpt.psit_nG, all_integrals, k=k)
lf.add(tempgrids, tempcoef_xi, k=k)
lf.integrate(tempgrids, s_kii[k], k=k)
print 'all <phi|psi>'
print all_integrals
conj_integrals = np.conj(all_integrals)
for i in range(ngaussians):
for j in range(ngaussians):
S_kii[k, i, j] = conj_integrals[0, i] * all_integrals[0, j]
print 'handmade s_kmii'
print s_kii
print 'handmade S_ii'
print S_kii
s_kmii = s_kii.reshape(kcount, 1, ngaussians, ngaussians)
S_kmii = S_kii.reshape(kcount, 1, ngaussians, ngaussians)
print 'matrices from overlap function'
print 's_kmii'
print os_kmii
print 'S_kmii'
print oS_kmii
optimizer = CoefficientOptimizer(s_kmii, S_kmii, ngaussians)
coefficients = optimizer.find_coefficients()
optimizer2 = CoefficientOptimizer(os_kmii, oS_kmii, ngaussians)
coefficients2 = optimizer2.find_coefficients()
print 'coefs'
print coefficients
print 'overlaps() coefs'
print coefficients2
print 'badness'
print optimizer.evaluate(coefficients)
exactsolution = [0.] * ngaussians
exactsolution[realgaussindex] = 1.
print 'badness of exact solution'
print optimizer.evaluate(exactsolution)
orbital = LinearCombination(coefficients, gaussians)
orbital2 = LinearCombination(coefficients2, gaussians)
norm = get_norm(r, orbital(r), l)
norm2 = get_norm(r, orbital2(r), l)
orbital.renormalize(norm)
orbital2.renormalize(norm2)
import pylab
pylab.plot(r, refgauss(r), label='ref')
pylab.plot(r, orbital(r), label='opt')
pylab.plot(r, orbital2(r), '--', label='auto')
pylab.legend()
pylab.show()
def dummy_kpt_test2():
l = 0
rcut = 6.
a = 5.
k_c = (0.5,0.5,0.5)
dtype=complex
r = np.arange(0., rcut, .01)
ngaussians = 8
rchars = np.linspace(1., rcut/2., ngaussians + 1)[1:]
print 'rchars',rchars
rchar_ref = rchars[ngaussians // 2]
print 'rchar ref',rchar_ref
generator = PolarizationOrbitalGenerator(rcut, gaussians=rchars)
# Set up reference system
#alpha_ref = 1 / (rcut/4.) ** 2.
alpha_ref = 1 / rchar_ref ** 2.
ref = QuasiGaussian(alpha_ref, rcut)
norm = get_norm(r, ref(r), l)
ref.renormalize(norm)
gd, kpt, center = make_dummy_kpt_reference(l, ref, k_c,
rcut, a, 40, dtype)
psit_nG = kpt.psit_nG
kpt.f_n = np.array([1.])
print 'Norm sqr', gd.integrate(psit_nG * psit_nG)
#gramschmidt(gd, psit_nG)
print 'Normalized norm sqr', gd.integrate(psit_nG * psit_nG)
quasigaussians = [QuasiGaussian(1/rchar**2., rcut) for rchar in rchars]
y = []
for g in quasigaussians:
norm = get_norm(r, g(r), l)
g.renormalize(norm)
y.append(g(r))
splines = [Spline(l, rcut, f_g) for f_g in y]
s_kmii, S_kmii = overlaps(l, gd, splines, [kpt],
spos_ac=[(.5, .5, .5)])
orbital = generator.generate(l, gd, [kpt], [center], dtype=complex)
print 'coefs'
print np.array(orbital.coefs)
print 'quality'
print generator.qualities
import pylab
pylab.plot(r, ref(r), label='ref')
pylab.plot(r, orbital(r), label='interp')
pylab.legend()
pylab.show()
def dummy_test(lmax=4, rcut=6., lmin=0): # fix args
"""Run a test using a Gaussian reference function."""
dtype = complex
generator = PolarizationOrbitalGenerator(rcut, gaussians=4)
r = np.arange(0., rcut, .01)
alpha_ref = 1. / (rcut/4.) ** 2.
import pylab
for l in range(lmin, lmax + 1):
g = QuasiGaussian(alpha_ref, rcut)
norm = get_norm(r, g(r), l)
g.renormalize(norm)
gd, psit_k, center, ref = make_dummy_reference(l, g, rcut,
dtype=dtype)
k_kc = ((0.,0.,0.), (.5,.5,.5))
kpt_u = [KPoint([], gd, 1., 0, i, i, k_c, dtype=dtype)
for i, k_c in enumerate(k_kc)]
for kpt in kpt_u:
kpt.allocate(1)
kpt.f_n = np.array([2.])
kpt.psit_nG = psit_k
phi = generator.generate(l, gd, kpt_u, [center], dtype=dtype)
pylab.figure(l)
#pylab.plot(r, ref(r)*r**l, 'g', label='ref')
pylab.plot(r, g(r)*r**l, 'b', label='g')
pylab.plot(r, phi(r)*r**l, 'r--', label='pol')
pylab.title('l=%d' % l)
pylab.legend()
pylab.show()
restart_filename = 'ref.%s.gpw'
output_filename = 'ref.%s.txt'
# XXX find a better way to do this
# Default characteristic radii when using only one gaussian
# Systems for non-dimer-forming or troublesome atoms
# 'symbol' : (g2 key, index of desired atom)
special_systems = {'Li' : ('LiF', 0),
'B' : ('BCl3', 0), # No boron dimer
'C' : ('CH4', 0), # No carbon dimer
'N' : ('NH3', 0), # double/triple bonds tend to be bad
'O' : ('H2O', 0), # O2 requires spin polarization
'F' : ('HF', 0),
'Na' : ('NaCl', 0),
'Al' : ('AlCl3', 0),
'Si' : ('SiO', 0), # No reason really.
'P' : ('PH3', 0),
'S' : ('SH2', 0), # S2 requires spin polarization
}
def get_system(symbol):
"""Get default reference formula or atomic index."""
system = special_systems.get(symbol)
if system is None:
system = (symbol + '2', 0)
return system
def get_systems(symbols=None):
if symbols is None:
symbols = g2.atoms
systems = []
for symbol in symbols:
systems.append(get_system(symbol))
return systems
class Reference:
"""Represents a reference function loaded from a file."""
def __init__(self, symbol, filename=None, index=None, txt=None):
if filename is None or filename == '-':
formula, index = get_system(symbol)
filename = restart_filename % formula
calc = Calculator(filename, txt=txt)
atoms = calc.get_atoms()
symbols = atoms.get_chemical_symbols()
if index is None:
index = symbols.index(symbol)
else:
if not symbols[index] == symbol:
raise ValueError(('Atom (%s) at specified index (%d) not of '+
'requested type (%s)') % (symbols[index],
index, symbol))
self.calc = calc
self.filename = filename
self.atoms = atoms
self.symbol = symbol
self.symbols = symbols
self.index = index
self.cell = atoms.get_cell().diagonal() # cubic cell
#self.center = atoms.positions[index]
self.spos_ac = atoms.positions / self.cell
self.gpts = calc.wfs.gd.N_c
if calc.kpt_u[0].psit_nG is None:
raise RuntimeError('No wave functions found in .gpw file')
def get_reference_data(self):
c = self.calc
for kpt in c.kpt_u:
kpt.psit_nG = kpt.psit_nG[:] # load wave functions from the file
# this is an ugly way to do it, by the way, but it probably works
# Right now we only use one nuclear position, but maybe this
# is to be changed in the future
return c.wfs.gd, c.kpt_u, self.spos_ac[self.index:self.index+1]
if __name__ == '__main__':
pass
|
qsnake/gpaw
|
gpaw/atom/polarization.py
|
Python
|
gpl-3.0
| 27,154
|
[
"ASE",
"GPAW",
"Gaussian"
] |
8b1a8699d16820d5091290e5d43dd05b152cfeb1395dd260665d06c10d226ad8
|
""" Contains unit tests of NetworkAgent module
"""
import DIRAC.AccountingSystem.Agent.NetworkAgent as module
import unittest
from mock.mock import MagicMock
__RCSID__ = "$Id$"
MQURI1 = 'mq.dirac.net::Topic::perfsonar.summary.packet-loss-rate'
MQURI2 = 'mq.dirac.net::Queue::perfsonar.summary.histogram-owdelay'
ROOT_PATH = '/Resources/Sites'
SITE1 = 'LCG.Dirac.net'
SITE2 = 'LCG.DiracToRemove.net'
SITE3 = 'VAC.DiracToAdd.org'
SITE1_HOST1 = 'perfsonar.diracold.net'
SITE1_HOST2 = 'perfsonar-to-disable.diracold.net'
SITE2_HOST1 = 'perfsonar.diractoremove.net'
SITE3_HOST1 = 'perfsonar.diractoadd.org'
INITIAL_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE2, SITE2_HOST1 ): 'True'
}
UPDATED_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'False',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE3, SITE3_HOST1 ): 'True'
}
class NetworkAgentSuccessTestCase( unittest.TestCase ):
""" Test class to check success scenarios.
"""
def setUp( self ):
# external dependencies
module.datetime = MagicMock()
# internal dependencies
module.S_ERROR = MagicMock()
module.S_OK = MagicMock()
module.gLogger = MagicMock()
module.AgentModule = MagicMock()
module.Network = MagicMock()
module.gConfig = MagicMock()
module.CSAPI = MagicMock()
module.createConsumer = MagicMock()
# prepare test object
module.NetworkAgent.__init__ = MagicMock( return_value = None )
module.NetworkAgent.am_getOption = MagicMock( return_value = 100 ) # buffer timeout
self.agent = module.NetworkAgent()
self.agent.initialize()
def test_updateNameDictionary( self ):
module.gConfig.getConfigurationTree.side_effect = [
{'OK': True, 'Value': INITIAL_CONFIG },
{'OK': True, 'Value': UPDATED_CONFIG },
]
# check if name dictionary is empty
self.assertFalse( self.agent.nameDictionary )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE1_HOST2], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE2_HOST1], SITE2 )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE3_HOST1], SITE3 )
# check if hosts were removed form dictionary
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE1_HOST2] )
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE2_HOST1] )
def test_agentExecute( self ):
module.NetworkAgent.am_getOption.return_value = '%s, %s' % ( MQURI1, MQURI2 )
module.gConfig.getConfigurationTree.return_value = {'OK': True, 'Value': INITIAL_CONFIG }
# first run
result = self.agent.execute()
self.assertTrue( result['OK'] )
# second run (simulate new messages)
self.agent.messagesCount += 10
result = self.agent.execute()
self.assertTrue( result['OK'] )
# third run (no new messages - restart consumers)
result = self.agent.execute()
self.assertTrue( result['OK'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( NetworkAgentSuccessTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
Andrew-McNab-UK/DIRAC
|
AccountingSystem/Agent/test/Test_NetworkAgent.py
|
Python
|
gpl-3.0
| 3,707
|
[
"DIRAC"
] |
ffca2adb8ead450208c51cd96443a6234dd4f0587f19b4ef1d017673d84a1d06
|
import numpy as np
tiny = 1e-10
import logging, sys
import myio
import pickle
'''
Copyright (c) UWM, Ali Dashti 2016 (original matlab version)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Copyright (c) Columbia University Hstau Liao 2018 (python version)
'''
#_logger = logging.getLogger(__name__)
#_logger.setLevel(logging.DEBUG)
def hist_match(source, template): # by ali_m
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def histeq(src,thist): # by Zagurskin; does not work well,
nbr_bins = len(thist)
bins = np.linspace(0, 1, nbr_bins + 1)
# hist, bins = np.histogram(src.flatten(), nbr_bins, normed=True)
hist, bb = np.histogram(src.flatten(), bins) # nbr_bins, normed=True)
cdfsrc = hist.cumsum() # cumulative distribution function
cdfsrc = (nbr_bins * cdfsrc / cdfsrc[-1]).astype(np.uint8) # normalize
cdftint = thist.cumsum() # cumulative distribution function
cdftint = (nbr_bins * cdftint / cdftint[-1]).astype(np.uint8) # normalize
h2 = np.interp(src.flatten(), bins[:-1], cdfsrc)
h3 = np.interp(h2, cdftint, bins[:-1])
return h3
def eul_to_quat(phi, theta, psi, flip=True):
try:
assert (len(phi) > 0 and len(theta) > 0 and len(psi) > 0)
except AssertionError:
_logger.error('subroutine eul_to_quat: some Euler angles are missing')
_logger.exception('subroutine eul_to_quat: some Euler angles are missing')
raise
sys.exit(1)
zros = np.zeros(phi.shape[0])
qz = np.vstack((np.cos(phi / 2), zros, zros, -np.sin(phi / 2)))
qy = np.vstack((np.cos(theta / 2), zros, -np.sin(theta / 2), zros))
sp = np.sin(psi / 2)
if flip:
sp = -sp
qzs = np.vstack((np.cos(psi / 2), zros, zros, sp))
return (qz, qy, qzs)
def augment(q):
try:
assert (q.shape[0] > 3)
except AssertionError:
_logger.error('subroutine augment: q has wrong dimensions')
_logger.exception('subroutine augment: q has wrong diemnsions')
raise
sys.exit(1)
qc = np.vstack((-q[1, :], q[0, :], -q[3, :], q[2, :]))
q = np.hstack((q, qc))
# print q.shape
return q
def useless_loop(sizeToConOrderRatio,tauInDir,xAll,xSelect,psinums,posPaths):
ang_res = 3
for x in xSelect:
gC = xAll[1, x]
prD = xAll[0, x]
psinum2 = psinums[1, x]
psinum1 = psinums[0, x]
string = '{}gC{}_prD{}_tautotEL'.format(tauInDir,gC,prD)
data = myio.fin(string,['tautotAll','listBad'])
tautotAll = data[0]
listBad = data[1]
tau = np.zeros((len(tautotAll[0]),ang_res))
for i in xrange(ang_res):
tau[:, i] = tautotAll[i].flatten()
posPath = posPaths[x]
nS = len(posPath)
#ConOrders[x] = max(5, np.floor(nS / sizeToConOrderRatio))
#taus[x] = tau
#listBads[x] = listBad
return
def make_indeces(inputGCs):
with open(inputGCs, 'rb') as f:
param = pickle.load(f)
f.close()
GCnum = len(param['CGtot'])
prDs = len(param['CGtot'][0])
x1 = np.tile(range(prDs), (1, GCnum))
x2 = np.array([])
for i in range(GCnum):
x2 = np.append(x2, np.tile(i, (1, prDs)))
xAll = np.vstack((x1, x2)).astype(int)
xSelect = range(xAll.shape[1])
return xAll,xSelect
def interv(s):
#return np.arange(-s/2,s/2)
if s%2 == 0:
a = -s/2
b = s/2-1
else:
a = -(s-1)/2
b = (s-1)/2
return np.linspace(a,b,s)
def filter_fourier(inp, sigma):
# filter Gauss
nPix1 = inp.shape[1]
nPix2 = inp.shape[0]
X, Y = np.meshgrid(interv(nPix1), interv(nPix2))
'''
# nPix1 and nPix2 odd
if nPix1%2 == 0 and nPix2%2 == 0:
ab = np.arange(-(nPix2 - 1) / 2,(nPix2 - 1) / 2)
X, Y = np.meshgrid(interv(nPix1),interv(nPix2))
elif nPix1%2 == 1 && nPix2%2 == 1:
aa = np.aranage(-nPix1 / 2,nPix1 / 2 - 1)
ab = np.aranage(-nPix2 / 2,nPix2 / 2 - 1)
X, Y = np.meshgrid(aa, ab)
# nPix1 and nPix2 even
elif ~mod(nPix1, 2) && mod(nPix2, 2):
X, Y = meshgrid(-nPix1 / 2:nPix1 / 2 - 1, -(nPix2 - 1) / 2:(nPix2 - 1) / 2)
# nPix1 even and nPix2 odd
elif mod(nPix1, 2) && ~mod(nPix2, 2):
X, Y = meshgrid(-(nPix1 - 1) / 2:(nPix1 - 1) / 2, -nPix2 / 2:nPix2 / 2 - 1)
# nPix1 odd and nPix2 even
'''
Rgrid = nPix2 / 2.
Q = (1 / Rgrid) * np.sqrt(X ** 2 + Y ** 2) # Q in units of Nyquist frequency
N = 4
G = np.sqrt(1. / (1 + (Q / sigma) ** (2 * N))) # ButterWorth
# G = exp(-(log(2) / 2) * (Q / sigmaH). ^ 2);Gaussian
# Filter images in Fourier space
G = np.fft.ifftshift(G)
inp = np.real(np.fft.ifft2(G * np.fft.fft2(inp)))
return inp
|
hstau/covar-cryo
|
covariance/util.py
|
Python
|
gpl-2.0
| 6,217
|
[
"Gaussian"
] |
0639c15853cf72b117f07e34064c8e33f4a598cb8b29eda70964bc6eefa72cf7
|
#!/usr/bin/env python
import sys
from Bio.Blast.NCBIXML import parse
def get_query_ids(blast_file_handle):
parser = parse(blast_file_handle)
records = [i for i in parser]
return [i.query for i in records if len(i.alignments)]
if __name__=='__main__':
for i in sys.argv[1:]:
with open(i) as f:
print i
print '\n'.join(get_query_ids(f))
|
Serpens/small_bioinfo
|
blastxml_nonzero_queries.py
|
Python
|
gpl-3.0
| 388
|
[
"BLAST"
] |
84f4985460f2a0be007afae7d1f22a94a3a3eb133efe81a7cbfa8a41d7f7796e
|
#!/usr/bin/env python3
from pysisyphus.calculators.Gaussian16 import Gaussian16
from pysisyphus.helpers import geom_from_library, geom_from_xyz_file, do_final_hessian
# from pysisyphus.optimizers.ANCOptimizer import ANCOptimizer
from pysisyphus.optimizers.NCOptimizer import NCOptimizer
# geom = geom_from_library("azetidine_guess.xyz")
# calc = Gaussian16("HF 4-31G", pal=4)
# geom = geom_from_xyz_file("guess.xyz")
#geom = geom_from_xyz_file("guess2.xyz")
geom = geom_from_xyz_file("guess3.xyz")
calc = Gaussian16("PM6", pal=4)
# from pysisyphus.calculators.XTB import XTB
# calc = XTB(pal=4)
geom.set_calculator(calc)
# opt = ANCOptimizer(geom, dump=True)
opt_kwargs = {
"dump": True,
"hessian_init": "calc",
"freeze_modes": 200,
"max_cycles": 20,
"prefix": "frozen_"
}
opt = NCOptimizer(geom, **opt_kwargs)
opt.run()
do_final_hessian(geom)
# from pysisyphus.Geometry import Geometry
# from pysisyphus.tsoptimizers.RSIRFOptimizer import RSIRFOptimizer
# geom = Geometry(geom.atoms, geom.coords, coord_type="redund", define_prims=((20, 19),))
# geom.set_calculator(calc)
# tsopt = RSIRFOptimizer(geom, hessian_recalc=5, trust_max=0.3)
# tsopt.run()
# do_final_hessian(geom)
# from pysisyphus.irc.EulerPC import EulerPC
# geom = Geometry(geom.atoms, geom.cart_coords)
# geom.set_calculator(calc)
# irc = EulerPC(geom)
# irc.run()
|
eljost/pysisyphus
|
tests_staging/test_ancopt/test_ncopt.py
|
Python
|
gpl-3.0
| 1,357
|
[
"xTB"
] |
e319706c91865eefd4115da49af3ac307b538a0150699a293af7c702f53c158c
|
"""
Tests for serial.py.
"""
import cPickle
from cStringIO import StringIO
import gzip
import shutil
import tempfile
import unittest
from rdkit import Chem
from rdkit.Chem import AllChem
from vs_utils.utils.rdkit_utils import conformers, serial
class TestMolIO(unittest.TestCase):
"""
Base test class for molecule I/O.
"""
def setUp(self):
"""
Write SDF and SMILES molecules to temporary files.
"""
self.temp_dir = tempfile.mkdtemp()
# aspirin
self.aspirin = self._get_mol_from_smiles('CC(=O)OC1=CC=CC=C1C(=O)O',
'aspirin')
self.aspirin_h = Chem.AddHs(self.aspirin)
self.aspirin_sodium = self._get_mol_from_smiles(
'CC(=O)OC1=CC=CC=C1C(=O)[O-].[Na+]', 'aspirin sodium')
# levalbuterol (chiral)
self.levalbuterol = self._get_mol_from_smiles(
'CC(C)(C)NC[C@@H](C1=CC(=C(C=C1)O)CO)O', 'levalbuterol')
self.levalbuterol_hcl = self._get_mol_from_smiles(
'CC(C)(C)NC[C@@H](C1=CC(=C(C=C1)O)CO)O.Cl',
'levalbuterol hydrochloride')
self.ref_mols = [self.aspirin, self.levalbuterol]
self.reader = serial.MolReader(compute_2d_coords=False)
def _get_mol_from_smiles(self, smiles, name=None):
"""
Construct a molecule from a SMILES string.
Molecules loaded from SMILES strings have zero conformers, but
molecules loaded from SDF blocks are treated as 3D and have one
conformer even if coordinates are not set. This method dumps the
molecule to SDF and loads it again to obtain a molecule with one
conformer.
Parameters
----------
smiles : str
SMILES string.
name : str, optional
Molecule name.
"""
mol = Chem.MolFromSmiles(smiles)
if name is not None:
mol.SetProp('_Name', name)
AllChem.Compute2DCoords(mol) # required to preserve stereo
sdf = Chem.MolToMolBlock(mol, includeStereo=True)
mol_with_conf = Chem.MolFromMolBlock(sdf)
return mol_with_conf
def tearDown(self):
"""
Clean up temporary files.
"""
shutil.rmtree(self.temp_dir)
def test_guess_mol_format(self):
"""
Test MolIO.guess_mol_format.
"""
mol_formats = {
'pkl': ['test.pkl', 'test.pkl.gz', 'test.test.pkl',
'test.test.pkl.gz'],
'sdf': ['test.sdf', 'test.sdf.gz', 'test.test.sdf',
'test.test.sdf.gz'],
'smi': ['test.smi', 'test.smi.gz', 'test.can', 'test.can.gz',
'test.ism', 'test.ism.gz', 'test.test.smi',
'test.test.smi.gz']
}
for mol_format in mol_formats.keys():
for filename in mol_formats[mol_format]:
assert self.reader.guess_mol_format(filename) == mol_format
def test_close_context(self):
"""
Make sure MolIO closes files it opened.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
self.reader.open(filename)
self.reader.close()
assert self.reader.f.closed
# also test the context manager
with self.reader.open(filename):
pass
assert self.reader.f.closed
def test_not_close_other(self):
"""
Make sure MolIO doesn't close files it didn't open.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
reader.close()
assert not f.closed
# also test the context manager
with open(filename) as g:
with serial.MolReader(g, mol_format='sdf'):
pass
assert not g.closed
class TestMolReader(TestMolIO):
"""
Test MolReader.
"""
def test_read_sdf(self):
"""
Read an SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_sdf_gz(self):
"""
Read a compressed SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_smi(self):
"""
Read a SMILES file.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToSmiles(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_smi_title(self):
"""
Read a SMILES file with molecule titles.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write('{}\t{}'.format(Chem.MolToSmiles(self.aspirin), 'aspirin'))
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
assert mol.ToBinary() == self.aspirin.ToBinary()
assert mol.GetProp('_Name') == self.aspirin.GetProp('_Name')
def test_read_smi_gz(self):
"""
Read a compressed SMILES file.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToSmiles(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_pickle(self):
"""
Read from a pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
with open(filename, 'wb') as f:
cPickle.dump([self.aspirin], f, cPickle.HIGHEST_PROTOCOL)
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_pickle_gz(self):
"""
Read from a compressed pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
cPickle.dump([self.aspirin], f, cPickle.HIGHEST_PROTOCOL)
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_file_like(self):
"""
Read from a file-like object.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
with open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_compressed_file_like(self):
"""
Read from a file-like object using gzip.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
with gzip.open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_multiple_sdf(self):
"""
Read a multiple-molecule SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
sdf = Chem.MolToMolBlock(mol)
f.write(sdf)
f.write('$$$$\n') # add molecule delimiter
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for i in xrange(len(mols)):
assert mols[i].ToBinary() == self.ref_mols[i].ToBinary()
def test_read_multiple_smiles(self):
"""
Read a multiple-molecule SMILES file.
"""
ref_mols = []
for mol in self.ref_mols:
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
ref_mols.append(mol)
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
smiles = Chem.MolToSmiles(mol)
name = mol.GetProp('_Name')
f.write('{}\t{}\n'.format(smiles, name))
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for i in xrange(len(mols)):
assert mols[i].ToBinary() == ref_mols[i].ToBinary()
def test_read_multiconformer(self):
"""
Read a multiconformer SDF file.
"""
# generate conformers
engine = conformers.ConformerGenerator(max_conformers=3,
pool_multiplier=1)
ref_mol = engine.generate_conformers(self.aspirin)
assert ref_mol.GetNumConformers() > 1
# write to disk
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for conf in ref_mol.GetConformers():
f.write(Chem.MolToMolBlock(ref_mol, confId=conf.GetId()))
f.write('$$$$\n') # add molecule delimiter
# compare
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 1
# FIXME get ToBinary test to work
# assert mols[0].ToBinary() == ref_mol.ToBinary()
assert Chem.MolToMolBlock(mols[0]) == Chem.MolToMolBlock(ref_mol)
def test_read_multiple_multiconformer(self):
"""
Read a multiconformer SDF file containing multiple molecules.
"""
# generate conformers
ref_mols = []
engine = conformers.ConformerGenerator(max_conformers=3,
pool_multiplier=1)
for mol in self.ref_mols:
expanded = engine.generate_conformers(mol)
assert expanded.GetNumConformers() > 1
ref_mols.append(expanded)
# write to disk
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in ref_mols:
for conf in mol.GetConformers():
f.write(Chem.MolToMolBlock(mol, includeStereo=1,
confId=conf.GetId()))
f.write('$$$$\n') # add molecule delimiter
# compare
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for mol, ref_mol in zip(mols, ref_mols):
# FIXME get ToBinary test to work
# assert mol.ToBinary() == ref_mol.ToBinary()
assert Chem.MolToMolBlock(
mol, includeStereo=1) == Chem.MolToMolBlock(ref_mol,
includeStereo=1)
def test_are_same_molecule(self):
"""
Test MolReader.are_same_molecule.
"""
assert self.reader.are_same_molecule(self.aspirin, self.aspirin)
assert not self.reader.are_same_molecule(self.aspirin,
self.levalbuterol)
def test_no_remove_hydrogens(self):
"""
Test hydrogen retention.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin_h))
reader = serial.MolReader(remove_hydrogens=False, remove_salts=False)
reader.open(filename)
mols = reader.get_mols()
# FIXME get ToBinary test to work
# assert mols.next().ToBinary() == self.aspirin_h.ToBinary()
assert Chem.MolToMolBlock(mols.next()) == Chem.MolToMolBlock(
self.aspirin_h)
def test_remove_hydrogens(self):
"""
Test hydrogen removal.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin_h))
reader = serial.MolReader(remove_hydrogens=True)
reader.open(filename)
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_remove_salts(self):
"""
Test salt removal.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in [self.aspirin_sodium, self.levalbuterol_hcl]:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
ref_mols = [self.aspirin_sodium, self.levalbuterol_hcl]
self.reader = serial.MolReader(remove_salts=True)
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for mol, ref_mol in zip(mols, ref_mols):
assert mol.GetNumAtoms() < ref_mol.GetNumAtoms()
desalted = self.reader.clean_mol(ref_mol)
assert mol.ToBinary() == desalted.ToBinary()
def test_no_remove_salts(self):
"""
Test salt retention.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in [self.aspirin_sodium, self.levalbuterol_hcl]:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
ref_mols = [self.aspirin_sodium, self.levalbuterol_hcl]
self.reader = serial.MolReader(remove_salts=False)
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
self.reader = serial.MolReader(remove_salts=True)
for mol, ref_mol in zip(mols, ref_mols):
assert mol.ToBinary() == ref_mol.ToBinary()
desalted = self.reader.clean_mol(ref_mol)
assert mol.GetNumAtoms() > desalted.GetNumAtoms()
def test_iterator(self):
"""
Test MolWriter.__iter__.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
self.reader.open(filename)
for i, mol in enumerate(self.reader):
assert mol.ToBinary() == self.ref_mols[i].ToBinary()
def test_context_manager(self):
"""
Test using 'with' statement to read molecules.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
with self.reader.open(filename) as reader:
for i, mol in enumerate(reader):
assert mol.ToBinary() == self.ref_mols[i].ToBinary()
def test_skip_failures(self):
"""
Test skip read failures.
"""
smiles = 'CO(C)C'
reader = serial.MolReader(StringIO(smiles), 'smi')
mols = list(reader.get_mols())
assert len(mols) == 0
def test_is_a_salt(self):
"""
Test that a molecule that _is_ a salt is not returned empty.
"""
smiles = 'C(=CC(=O)O)C(=O)O'
reader = serial.MolReader(StringIO(smiles), 'smi', remove_salts=True)
mols = list(reader.get_mols())
assert len(mols) == 1 and mols[0].GetNumAtoms()
def test_read_multiple_pickles(self):
"""
Test reading a file containing multiple pickles. This can occur if
MolWriter.write is called multiple times.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
with serial.MolWriter().open(filename) as writer:
writer.write([self.aspirin])
writer.write([self.levalbuterol])
with self.reader.open(filename) as reader:
mols = list(reader)
assert len(mols) == 2
assert mols[0].ToBinary() == self.aspirin.ToBinary()
assert mols[1].ToBinary() == self.levalbuterol.ToBinary()
class TestMolWriter(TestMolIO):
"""
Test MolWriter.
"""
def setUp(self):
"""
Add writer to inherited setup.
"""
super(TestMolWriter, self).setUp()
self.writer = serial.MolWriter()
self.aspirin_sdf = Chem.MolToMolBlock(self.aspirin)
self.aspirin_smiles = Chem.MolToSmiles(self.aspirin) + '\taspirin'
def test_write_sdf(self):
"""
Write an SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
def test_write_sdf_gz(self):
"""
Write a compressed SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
def test_write_smiles(self):
"""
Write a SMILES file.
"""
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
self.aspirin.RemoveAllConformers() # SMILES are read without confs
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data.strip() == self.aspirin_smiles
def test_write_smiles_gz(self):
"""
Write a compressed SMILES file.
"""
_, filename = tempfile.mkstemp(suffix='.smi.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
self.aspirin.RemoveAllConformers() # SMILES are read without confs
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data.strip() == self.aspirin_smiles
def test_write_pickle(self):
"""
Write a pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == cPickle.dumps([self.aspirin],
cPickle.HIGHEST_PROTOCOL)
def test_write_pickle_gz(self):
"""
Write a compressed pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data == cPickle.dumps([self.aspirin],
cPickle.HIGHEST_PROTOCOL)
def test_stereo_setup(self):
"""
Make sure chiral reference molecule is correct.
"""
smiles = Chem.MolToSmiles(self.levalbuterol, isomericSmiles=True)
assert '@' in smiles # check for stereochemistry flag
# check that removing stereochemistry changes the molecule
original = self.levalbuterol.ToBinary()
AllChem.RemoveStereochemistry(self.levalbuterol)
assert self.levalbuterol.ToBinary() != original
def test_stereo_sdf(self):
"""
Test stereochemistry preservation when writing to SDF.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
writer = serial.MolWriter(stereo=True)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.levalbuterol.ToBinary()
def test_stereo_smi(self):
"""
Test stereochemistry preservation when writing to SMILES.
"""
# FIXME avoid this and use self.levalbuterol.RemoveAllConformers()
ref_mol = Chem.MolFromSmiles(Chem.MolToSmiles(self.levalbuterol,
isomericSmiles=True))
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
writer = serial.MolWriter(stereo=True)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == ref_mol.ToBinary()
def test_no_stereo_sdf(self):
"""
Test stereochemistry removal when writing to SDF.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
writer = serial.MolWriter(stereo=False)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
# make sure the written molecule differs from the reference
assert mol.ToBinary() != self.levalbuterol.ToBinary()
# check again after removing stereochemistry
AllChem.RemoveStereochemistry(self.levalbuterol)
# FIXME get ToBinary test to work
# assert mol.ToBinary() == self.levalbuterol.ToBinary()
assert Chem.MolToMolBlock(
mol, includeStereo=True) == Chem.MolToMolBlock(
self.levalbuterol, includeStereo=True)
def test_no_stereo_smiles(self):
"""
Test stereochemistry removal when writing to SMILES.
"""
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
writer = serial.MolWriter(stereo=False)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
# make sure the written molecule differs from the reference
assert mol.ToBinary() != self.levalbuterol.ToBinary()
# check again after removing stereochemistry
AllChem.RemoveStereochemistry(self.levalbuterol)
# FIXME get ToBinary test to work
# assert mol.ToBinary() == self.levalbuterol.ToBinary()
assert Chem.MolToSmiles(mol, isomericSmiles=True) == Chem.MolToSmiles(
self.levalbuterol, isomericSmiles=True)
def test_context_manager(self):
"""
Test use of 'with' statement to write molecules.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with self.writer.open(filename) as writer:
writer.write([self.aspirin])
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
|
rbharath/vs-utils
|
vs_utils/utils/rdkit_utils/tests/test_serial.py
|
Python
|
gpl-3.0
| 25,593
|
[
"RDKit"
] |
4bc619b10a7018ef8bed1caf7259cf3dd7778e5eb420e04d4e61da350944cdd9
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012-2015 Google, Inc.
# Copyright (c) 2013 moxian <aleftmail@inbox.ru>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 frost-nzcr4 <frost.nzcr4@jagmort.com>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2015 Harut <yes@harutune.name>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2015 Pavel Roskin <proski@gnu.org>
# Copyright (c) 2015 Mike Frysinger <vapier@gentoo.org>
# Copyright (c) 2015 Mihai Balint <balint.mihai@gmail.com>
# Copyright (c) 2015 Fabio Natali <me@fabionatali.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
from functools import reduce # pylint: disable=redefined-builtin
import keyword
import tokenize
import sys
import six
from six.moves import zip, map, filter # pylint: disable=redefined-builtin
from astroid import nodes
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope, OPTION_RGX
_CONTINUATION_BLOCK_OPENERS = ['elif', 'except', 'for', 'if', 'while', 'def', 'class']
_KEYWORD_TOKENS = ['assert', 'del', 'elif', 'except', 'for', 'if', 'in', 'not',
'raise', 'return', 'while', 'yield']
if sys.version_info < (3, 0):
_KEYWORD_TOKENS.append('print')
_SPACED_OPERATORS = ['==', '<', '>', '!=', '<>', '<=', '>=',
'+=', '-=', '*=', '**=', '/=', '//=', '&=', '|=', '^=',
'%=', '>>=', '<<=']
_OPENING_BRACKETS = ['(', '[', '{']
_CLOSING_BRACKETS = [')', ']', '}']
_TAB_LENGTH = 8
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
_JUNK_TOKENS = (tokenize.COMMENT, tokenize.NL)
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
# Whitespace checking config constants
_DICT_SEPARATOR = 'dict-separator'
_TRAILING_COMMA = 'trailing-comma'
_EMPTY_LINE = 'empty-line'
_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR, _EMPTY_LINE]
_DEFAULT_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR]
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s/%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'C0303': ('Trailing whitespace',
'trailing-whitespace',
'Used when there is whitespace between the end of a line and the '
'newline.'),
'C0304': ('Final newline missing',
'missing-final-newline',
'Used when the last line in a file is missing a newline.'),
'C0305': ('Trailing newlines',
'trailing-newlines',
'Used when there are trailing blank lines in a file.'),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'C0330': ('Wrong %s indentation%s%s.\n%s%s',
'bad-continuation',
'TODO'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0325' : ('Unnecessary parens after %r keyword',
'superfluous-parens',
'Used when a single item in parentheses follows an if, for, or '
'other keyword.'),
'C0326': ('%s space %s %s %s\n%s',
'bad-whitespace',
('Used when a wrong number of spaces is used around an operator, '
'bracket or block opener.'),
{'old_names': [('C0323', 'no-space-after-operator'),
('C0324', 'no-space-after-comma'),
('C0322', 'no-space-before-operator')]}),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"',
{'maxversion': (3, 0)}),
'C0327': ('Mixed line endings LF and CRLF',
'mixed-line-endings',
'Used when there are mixed (LF and CRLF) newline signs in a file.'),
'C0328': ('Unexpected line ending format. There is \'%s\' while it should be \'%s\'.',
'unexpected-line-ending-format',
'Used when there is different newline than expected.'),
}
def _underline_token(token):
length = token[3][1] - token[2][1]
offset = token[2][1]
referenced_line = token[4]
# If the referenced line does not end with a newline char, fix it
if referenced_line[-1] != '\n':
referenced_line += '\n'
return referenced_line + (' ' * offset) + ('^' * length)
def _column_distance(token1, token2):
if token1 == token2:
return 0
if token2[3] < token1[3]:
token1, token2 = token2, token1
if token1[3][0] != token2[2][0]:
return None
return token2[2][1] - token1[3][1]
def _last_token_on_line_is(tokens, line_end, token):
return (line_end > 0 and tokens.token(line_end-1) == token or
line_end > 1 and tokens.token(line_end-2) == token
and tokens.type(line_end-1) == tokenize.COMMENT)
def _token_followed_by_eol(tokens, position):
return (tokens.type(position+1) == tokenize.NL or
tokens.type(position+1) == tokenize.COMMENT and
tokens.type(position+2) == tokenize.NL)
def _get_indent_length(line):
"""Return the length of the indentation on the given token's line."""
result = 0
for char in line:
if char == ' ':
result += 1
elif char == '\t':
result += _TAB_LENGTH
else:
break
return result
def _get_indent_hint_line(bar_positions, bad_position):
"""Return a line with |s for each of the positions in the given lists."""
if not bar_positions:
return ('', '')
delta_message = ''
markers = [(pos, '|') for pos in bar_positions]
if len(markers) == 1:
# if we have only one marker we'll provide an extra hint on how to fix
expected_position = markers[0][0]
delta = abs(expected_position - bad_position)
direction = 'add' if expected_position > bad_position else 'remove'
delta_message = _CONTINUATION_HINT_MESSAGE % (
direction, delta, 's' if delta > 1 else '')
markers.append((bad_position, '^'))
markers.sort()
line = [' '] * (markers[-1][0] + 1)
for position, marker in markers:
line[position] = marker
return (''.join(line), delta_message)
class _ContinuedIndent(object):
__slots__ = ('valid_outdent_offsets',
'valid_continuation_offsets',
'context_type',
'token',
'position')
def __init__(self,
context_type,
token,
position,
valid_outdent_offsets,
valid_continuation_offsets):
self.valid_outdent_offsets = valid_outdent_offsets
self.valid_continuation_offsets = valid_continuation_offsets
self.context_type = context_type
self.position = position
self.token = token
# The contexts for hanging indents.
# A hanging indented dictionary value after :
HANGING_DICT_VALUE = 'dict-value'
# Hanging indentation in an expression.
HANGING = 'hanging'
# Hanging indentation in a block header.
HANGING_BLOCK = 'hanging-block'
# Continued indentation inside an expression.
CONTINUED = 'continued'
# Continued indentation in a block header.
CONTINUED_BLOCK = 'continued-block'
SINGLE_LINE = 'single'
WITH_BODY = 'multi'
_CONTINUATION_MSG_PARTS = {
HANGING_DICT_VALUE: ('hanging', ' in dict value'),
HANGING: ('hanging', ''),
HANGING_BLOCK: ('hanging', ' before block'),
CONTINUED: ('continued', ''),
CONTINUED_BLOCK: ('continued', ' before block'),
}
_CONTINUATION_HINT_MESSAGE = ' (%s %d space%s)' # Ex: (remove 2 spaces)
def _Offsets(*args):
"""Valid indentation offsets for a continued line."""
return dict((a, None) for a in args)
def _BeforeBlockOffsets(single, with_body):
"""Valid alternative indent offsets for continued lines before blocks.
:param int single: Valid offset for statements on a single logical line.
:param int with_body: Valid offset for statements on several lines.
:returns: A dictionary mapping indent offsets to a string representing
whether the indent if for a line or block.
:rtype: dict
"""
return {single: SINGLE_LINE, with_body: WITH_BODY}
class TokenWrapper(object):
"""A wrapper for readable access to token information."""
def __init__(self, tokens):
self._tokens = tokens
def token(self, idx):
return self._tokens[idx][1]
def type(self, idx):
return self._tokens[idx][0]
def start_line(self, idx):
return self._tokens[idx][2][0]
def start_col(self, idx):
return self._tokens[idx][2][1]
def line(self, idx):
return self._tokens[idx][4]
class ContinuedLineState(object):
"""Tracker for continued indentation inside a logical line."""
def __init__(self, tokens, config):
self._line_start = -1
self._cont_stack = []
self._is_block_opener = False
self.retained_warnings = []
self._config = config
self._tokens = TokenWrapper(tokens)
@property
def has_content(self):
return bool(self._cont_stack)
@property
def _block_indent_size(self):
return len(self._config.indent_string.replace('\t', ' ' * _TAB_LENGTH))
@property
def _continuation_size(self):
return self._config.indent_after_paren
def handle_line_start(self, pos):
"""Record the first non-junk token at the start of a line."""
if self._line_start > -1:
return
self._is_block_opener = self._tokens.token(pos) in _CONTINUATION_BLOCK_OPENERS
self._line_start = pos
def next_physical_line(self):
"""Prepares the tracker for a new physical line (NL)."""
self._line_start = -1
self._is_block_opener = False
def next_logical_line(self):
"""Prepares the tracker for a new logical line (NEWLINE).
A new logical line only starts with block indentation.
"""
self.next_physical_line()
self.retained_warnings = []
self._cont_stack = []
def add_block_warning(self, token_position, state, valid_offsets):
self.retained_warnings.append((token_position, state, valid_offsets))
def get_valid_offsets(self, idx):
"""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if self._tokens.token(idx) in ('}', 'for') and self._cont_stack[-1].token == ':':
stack_top = -2
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_offsets = indent.valid_outdent_offsets
else:
valid_offsets = indent.valid_continuation_offsets
return indent, valid_offsets.copy()
def _hanging_indent_after_bracket(self, bracket, position):
"""Extracts indentation information for a hanging indent."""
indentation = _get_indent_length(self._tokens.line(position))
if self._is_block_opener and self._continuation_size == self._block_indent_size:
return _ContinuedIndent(
HANGING_BLOCK,
bracket,
position,
_Offsets(indentation + self._continuation_size, indentation),
_BeforeBlockOffsets(indentation + self._continuation_size,
indentation + self._continuation_size * 2))
elif bracket == ':':
# If the dict key was on the same line as the open brace, the new
# correct indent should be relative to the key instead of the
# current indent level
paren_align = self._cont_stack[-1].valid_outdent_offsets
next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
next_align_keys = list(next_align.keys())
next_align[next_align_keys[0] + self._continuation_size] = True
# Note that the continuation of
# d = {
# 'a': 'b'
# 'c'
# }
# is handled by the special-casing for hanging continued string indents.
return _ContinuedIndent(HANGING_DICT_VALUE, bracket, position, paren_align, next_align)
else:
return _ContinuedIndent(
HANGING,
bracket,
position,
_Offsets(indentation, indentation + self._continuation_size),
_Offsets(indentation + self._continuation_size))
def _continuation_inside_bracket(self, bracket, pos):
"""Extracts indentation information for a continued indent."""
indentation = _get_indent_length(self._tokens.line(pos))
token_start = self._tokens.start_col(pos)
next_token_start = self._tokens.start_col(pos + 1)
if self._is_block_opener and next_token_start - indentation == self._block_indent_size:
return _ContinuedIndent(
CONTINUED_BLOCK,
bracket,
pos,
_Offsets(token_start),
_BeforeBlockOffsets(next_token_start, next_token_start + self._continuation_size))
else:
return _ContinuedIndent(
CONTINUED,
bracket,
pos,
_Offsets(token_start),
_Offsets(next_token_start))
def pop_token(self):
self._cont_stack.pop()
def push_token(self, token, position):
"""Pushes a new token for continued indentation on the stack.
Tokens that can modify continued indentation offsets are:
* opening brackets
* 'lambda'
* : inside dictionaries
push_token relies on the caller to filter out those
interesting tokens.
:param int token: The concrete token
:param int position: The position of the token in the stream.
"""
if _token_followed_by_eol(self._tokens, position):
self._cont_stack.append(
self._hanging_indent_after_bracket(token, position))
else:
self._cont_stack.append(
self._continuation_inside_bracket(token, position))
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 100, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('ignore-long-lines',
{'type': 'regexp', 'metavar': '<regexp>',
'default': r'^\s*(# )?<?https?://\S+>?$',
'help': ('Regexp for a line that is allowed to be longer than '
'the limit.')}),
('single-line-if-stmt',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : ('Allow the body of an if to be on the same '
'line as the test if there is no else.')}),
('no-space-check',
{'default': ','.join(_DEFAULT_NO_SPACE_CHECK_CHOICES),
'metavar': ','.join(_NO_SPACE_CHECK_CHOICES),
'type': 'multiple_choice',
'choices': _NO_SPACE_CHECK_CHOICES,
'help': ('List of optional constructs for which whitespace '
'checking is disabled. '
'`'+ _DICT_SEPARATOR + '` is used to allow tabulation '
'in dicts, etc.: {1 : 1,\\n222: 2}. '
'`'+ _TRAILING_COMMA + '` allows a space between comma '
'and closing bracket: (a, ). '
'`'+ _EMPTY_LINE + '` allows space-only lines.')}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually '
'" " (4 spaces) or "\\t" (1 tab).'}),
('indent-after-paren',
{'type': 'int', 'metavar': '<int>', 'default': 4,
'help': 'Number of spaces of indent required inside a hanging '
' or continued line.'}),
('expected-line-ending-format',
{'type': 'choice', 'metavar': '<empty or LF or CRLF>', 'default': '',
'choices': ['', 'LF', 'CRLF'],
'help': ('Expected format of line ending, '
'e.g. empty (any line ending), LF or CRLF.')}),
)
def __init__(self, linter=None):
BaseTokenChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
self._bracket_stack = [None]
def _pop_token(self):
self._bracket_stack.pop()
self._current_line.pop_token()
def _push_token(self, token, idx):
self._bracket_stack.append(token)
self._current_line.push_token(token, idx)
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ';'):
self.add_message('unnecessary-semicolon', line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_module(self, module):
self._keywords_with_parens = set()
if 'print_function' in module.future_imports:
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
"""Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if self._inside_brackets(':') and tokens[start][1] == 'for':
self._pop_token()
if tokens[start+1][1] != '(':
return
found_and_or = False
depth = 0
keyword_token = tokens[start][1]
line_num = tokens[start][2][0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == '(':
depth += 1
elif token[1] == ')':
depth -= 1
if depth:
continue
# ')' can't happen after if (foo), since it would be a syntax error.
if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
tokens[i+1][0] in (tokenize.NEWLINE,
tokenize.ENDMARKER,
tokenize.COMMENT)):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == 'not':
if not found_and_or:
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token in ('return', 'yield'):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token not in self._keywords_with_parens:
if not (tokens[i+1][1] == 'in' and found_and_or):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ',':
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
elif token[1] in ('and', 'or'):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == 'yield':
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == 'for':
return
def _opening_bracket(self, tokens, i):
self._push_token(tokens[i][1], i)
# Special case: ignore slices
if tokens[i][1] == '[' and tokens[i+1][1] == ':':
return
if (i > 0 and (tokens[i-1][0] == tokenize.NAME and
not (keyword.iskeyword(tokens[i-1][1]))
or tokens[i-1][1] in _CLOSING_BRACKETS)):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_IGNORE, _MUST_NOT))
def _closing_bracket(self, tokens, i):
if self._inside_brackets(':'):
self._pop_token()
self._pop_token()
# Special case: ignore slices
if tokens[i-1][1] == ':' and tokens[i][1] == ']':
return
policy_before = _MUST_NOT
if tokens[i][1] in _CLOSING_BRACKETS and tokens[i-1][1] == ',':
if _TRAILING_COMMA in self.config.no_space_check:
policy_before = _IGNORE
self._check_space(tokens, i, (policy_before, _IGNORE))
def _check_equals_spacing(self, tokens, i):
"""Check the spacing of a single equals sign."""
if self._inside_brackets('(') or self._inside_brackets('lambda'):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_MUST, _MUST))
def _open_lambda(self, tokens, i): # pylint:disable=unused-argument
self._push_token('lambda', i)
def _handle_colon(self, tokens, i):
# Special case: ignore slices
if self._inside_brackets('['):
return
if (self._inside_brackets('{') and
_DICT_SEPARATOR in self.config.no_space_check):
policy = (_IGNORE, _IGNORE)
else:
policy = (_MUST_NOT, _MUST)
self._check_space(tokens, i, policy)
if self._inside_brackets('lambda'):
self._pop_token()
elif self._inside_brackets('{'):
self._push_token(':', i)
def _handle_comma(self, tokens, i):
# Only require a following whitespace if this is
# not a hanging comma before a closing bracket.
if tokens[i+1][1] in _CLOSING_BRACKETS:
self._check_space(tokens, i, (_MUST_NOT, _IGNORE))
else:
self._check_space(tokens, i, (_MUST_NOT, _MUST))
if self._inside_brackets(':'):
self._pop_token()
def _check_surrounded_by_space(self, tokens, i):
"""Check that a binary operator is surrounded by exactly one space."""
self._check_space(tokens, i, (_MUST, _MUST))
def _check_space(self, tokens, i, policies):
def _policy_string(policy):
if policy == _MUST:
return 'Exactly one', 'required'
else:
return 'No', 'allowed'
def _name_construct(token):
if token[1] == ',':
return 'comma'
elif token[1] == ':':
return ':'
elif token[1] in '()[]{}':
return 'bracket'
elif token[1] in ('<', '>', '<=', '>=', '!=', '=='):
return 'comparison'
else:
if self._inside_brackets('('):
return 'keyword argument assignment'
else:
return 'assignment'
good_space = [True, True]
token = tokens[i]
pairs = [(tokens[i-1], token), (token, tokens[i+1])]
for other_idx, (policy, token_pair) in enumerate(zip(policies, pairs)):
if token_pair[other_idx][0] in _EOL or policy == _IGNORE:
continue
distance = _column_distance(*token_pair)
if distance is None:
continue
good_space[other_idx] = (
(policy == _MUST and distance == 1) or
(policy == _MUST_NOT and distance == 0))
warnings = []
if not any(good_space) and policies[0] == policies[1]:
warnings.append((policies[0], 'around'))
else:
for ok, policy, position in zip(good_space, policies, ('before', 'after')):
if not ok:
warnings.append((policy, position))
for policy, position in warnings:
construct = _name_construct(token)
count, state = _policy_string(policy)
self.add_message('bad-whitespace', line=token[2][0],
args=(count, state, position, construct,
_underline_token(token)))
def _inside_brackets(self, left):
return self._bracket_stack[-1] == left
def _prepare_token_dispatcher(self):
raw = [
(_KEYWORD_TOKENS,
self._check_keyword_parentheses),
(_OPENING_BRACKETS, self._opening_bracket),
(_CLOSING_BRACKETS, self._closing_bracket),
(['='], self._check_equals_spacing),
(_SPACED_OPERATORS, self._check_surrounded_by_space),
([','], self._handle_comma),
([':'], self._handle_colon),
(['lambda'], self._open_lambda),
]
dispatch = {}
for tokens, handler in raw:
for token in tokens:
dispatch[token] = handler
return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
self._bracket_stack = [None]
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
token_handlers = self._prepare_token_dispatcher()
self._last_line_ending = None
last_blank_line_num = 0
self._current_line = ContinuedLineState(tokens, self.config)
for idx, (tok_type, token, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx-1, idx+1)
else:
self.new_line(TokenWrapper(tokens), idx-1, idx)
if tok_type == tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._process_retained_warnings(TokenWrapper(tokens), idx)
self._current_line.next_logical_line()
self._check_line_ending(token, line_num)
elif tok_type == tokenize.INDENT:
check_equal = False
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
elif tok_type == tokenize.NL:
if not line.strip('\r\n'):
last_blank_line_num = line_num
self._check_continued_indentation(TokenWrapper(tokens), idx+1)
self._current_line.next_physical_line()
elif tok_type != tokenize.COMMENT:
self._current_line.handle_line_start(idx)
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and token.endswith('l'):
self.add_message('lowercase-l-suffix', line=line_num)
try:
handler = token_handlers[token]
except KeyError:
pass
else:
handler(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
# Get the line where the too-many-lines (or its message id)
# was disabled or default to 1.
symbol = self.linter.msgs_store.check_message_id('too-many-lines')
names = (symbol.msgid, 'too-many-lines')
line = next(filter(None,
map(self.linter._pragma_lineno.get, names)), 1)
self.add_message('too-many-lines',
args=(line_num, self.config.max_module_lines),
line=line)
# See if there are any trailing lines. Do not complain about empty
# files like __init__.py markers.
if line_num == last_blank_line_num and line_num > 0:
self.add_message('trailing-newlines', line=line_num)
def _check_line_ending(self, line_ending, line_num):
# check if line endings are mixed
if self._last_line_ending is not None:
if line_ending != self._last_line_ending:
self.add_message('mixed-line-endings', line=line_num)
self._last_line_ending = line_ending
# check if line ending is as expected
expected = self.config.expected_line_ending_format
if expected:
# reduce multiple \n\n\n\n to one \n
line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
line_ending = 'LF' if line_ending == '\n' else 'CRLF'
if line_ending != expected:
self.add_message('unexpected-line-ending-format', args=(line_ending, expected),
line=line_num)
def _process_retained_warnings(self, tokens, current_pos):
single_line_block_stmt = not _last_token_on_line_is(tokens, current_pos, ':')
for indent_pos, state, offsets in self._current_line.retained_warnings:
block_type = offsets[tokens.start_col(indent_pos)]
hints = dict((k, v) for k, v in six.iteritems(offsets)
if v != block_type)
if single_line_block_stmt and block_type == WITH_BODY:
self._add_continuation_message(state, hints, tokens, indent_pos)
elif not single_line_block_stmt and block_type == SINGLE_LINE:
self._add_continuation_message(state, hints, tokens, indent_pos)
def _check_continued_indentation(self, tokens, next_idx):
def same_token_around_nl(token_type):
return (tokens.type(next_idx) == token_type and
tokens.type(next_idx-2) == token_type)
# Do not issue any warnings if the next line is empty.
if not self._current_line.has_content or tokens.type(next_idx) == tokenize.NL:
return
state, valid_offsets = self._current_line.get_valid_offsets(next_idx)
# Special handling for hanging comments and strings. If the last line ended
# with a comment (string) and the new line contains only a comment, the line
# may also be indented to the start of the previous token.
if same_token_around_nl(tokenize.COMMENT) or same_token_around_nl(tokenize.STRING):
valid_offsets[tokens.start_col(next_idx-2)] = True
# We can only decide if the indentation of a continued line before opening
# a new block is valid once we know of the body of the block is on the
# same line as the block opener. Since the token processing is single-pass,
# emitting those warnings is delayed until the block opener is processed.
if (state.context_type in (HANGING_BLOCK, CONTINUED_BLOCK)
and tokens.start_col(next_idx) in valid_offsets):
self._current_line.add_block_warning(next_idx, state, valid_offsets)
elif tokens.start_col(next_idx) not in valid_offsets:
self._add_continuation_message(state, valid_offsets, tokens, next_idx)
def _add_continuation_message(self, state, offsets, tokens, position):
readable_type, readable_position = _CONTINUATION_MSG_PARTS[state.context_type]
hint_line, delta_message = _get_indent_hint_line(offsets, tokens.start_col(position))
self.add_message(
'bad-continuation',
line=tokens.start_line(position),
args=(readable_type, readable_position, delta_message,
tokens.line(position), hint_line))
@check_messages('multiple-statements')
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if (isinstance(node, nodes.TryExcept) and
isinstance(node.parent, nodes.TryFinally)):
return
if (isinstance(node.parent, nodes.If) and not node.parent.orelse
and self.config.single_line_if_stmt):
return
self.add_message('multiple-statements', node=node)
self._visited_lines[line] = 2
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
for line in lines.splitlines(True):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
stripped_line = line.rstrip()
if not stripped_line and _EMPTY_LINE in self.config.no_space_check:
# allow empty lines
pass
elif line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('mixed-indentation', args=args, line=line_num)
return level
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('bad-indentation', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/checkers/format.py
|
Python
|
apache-2.0
| 41,989
|
[
"VisIt"
] |
6d83b11836c3e59d22489a784a04b08d22980fd0337f12732aaff6b9a957b508
|
from numpy import array, arange, frombuffer, load, asarray, random, \
fromstring, expand_dims, unravel_index, prod
try:
buffer
except NameError:
buffer = memoryview
from ..utils import check_spark, check_options
spark = check_spark()
def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels)
def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels)
def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels)
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from lightning.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from lightning.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from lightning.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype']
def fromrandom(shape=(100, 10), npartitions=1, seed=42, engine=None):
"""
Generate random gaussian series data.
Parameters
----------
shape : tuple, optional, default = (100,10)
Dimensions of data.
npartitions : int, optional, default = 1
Number of partitions with which to distribute data.
seed : int, optional, default = 42
Randomization seed.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(shape[1])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('lightning-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data
|
alexandonian/lightning
|
lightning/series/readers.py
|
Python
|
apache-2.0
| 14,558
|
[
"Gaussian"
] |
d5fd2e658de86859bad38d0c004d3fa9a11dfa5ecc8779207d0e2be94e6285b9
|
# -*- coding: utf-8 -*-
"""Utilities for the PyBEL database manager."""
from typing import Dict, Mapping, Optional, Tuple, Union
from ..utils import parse_datetime
def extract_shared_required(config, definition_header: str = "Namespace"):
"""Get the required annotations shared by BEL namespace and annotation resource documents.
:param dict config: The configuration dictionary representing a BEL resource
:param definition_header: ``Namespace`` or ``AnnotationDefinition``
:rtype: dict
"""
return {
"keyword": config[definition_header]["Keyword"],
"created": parse_datetime(config[definition_header]["CreatedDateTime"]),
}
def extract_shared_optional(bel_resource, definition_header: str = "Namespace"):
"""Get the optional annotations shared by BEL namespace and annotation resource documents.
:param dict bel_resource: A configuration dictionary representing a BEL resource
:param definition_header: ``Namespace`` or ``AnnotationDefinition``
:rtype: dict
"""
shared_mapping = {
"description": (definition_header, "DescriptionString"),
"version": (definition_header, "VersionString"),
"author": ("Author", "NameString"),
"license": ("Author", "CopyrightString"),
"contact": ("Author", "ContactInfoString"),
"citation": ("Citation", "NameString"),
"citation_description": ("Citation", "DescriptionString"),
"citation_version": ("Citation", "PublishedVersionString"),
"citation_url": ("Citation", "ReferenceURL"),
}
result = {}
update_insert_values(bel_resource, shared_mapping, result)
if "PublishedDate" in bel_resource.get("Citation", {}):
result["citation_published"] = parse_datetime(bel_resource["Citation"]["PublishedDate"])
return result
def update_insert_values(
bel_resource: Mapping,
mapping: Mapping[str, Tuple[str, str]],
values: Dict[str, str],
) -> None:
"""Update the value dictionary with a BEL resource dictionary."""
for database_column, (section, key) in mapping.items():
if section in bel_resource and key in bel_resource[section]:
values[database_column] = bel_resource[section][key]
def int_or_str(v: Optional[str]) -> Union[None, int, str]:
"""Safe converts an string represent an integer to an integer or passes through ``None``."""
if v is None:
return
try:
return int(v)
except ValueError:
return v
|
pybel/pybel
|
src/pybel/manager/utils.py
|
Python
|
mit
| 2,492
|
[
"Pybel"
] |
7cd23955b03f4c3eeae3dd843562508b4f6b9f1f9198bf24853650deb3120dcd
|
#######################################################################
#
#
# Volume Text Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from enigma import eLabel, eDVBVolumecontrol, eTimer
from Components.Renderer.Renderer import Renderer
class VVolumeText(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.vol_timer = eTimer()
self.vol_timer.callback.append(self.pollme)
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
self.text = str(eDVBVolumecontrol.getInstance().getVolume())
def pollme(self):
self.changed(None)
def onShow(self):
self.suspended = False
self.vol_timer.start(200)
def onHide(self):
self.suspended = True
self.vol_timer.stop()
|
openatv/enigma2
|
lib/python/Components/Renderer/VVolumeText.py
|
Python
|
gpl-2.0
| 1,574
|
[
"VisIt"
] |
c8f8edf0f2c1b0baf69f0a7f3ff09811b3b4ea9e9a984340d108b35672655333
|
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world
import time
from urllib import quote_plus
from selenium.common.exceptions import WebDriverException, StaleElementReferenceException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from lettuce.django import django_url
@world.absorb
def wait(seconds):
time.sleep(float(seconds))
@world.absorb
def wait_for(func):
WebDriverWait(world.browser.driver, 5).until(func)
@world.absorb
def visit(url):
world.browser.visit(django_url(url))
@world.absorb
def url_equals(url):
return world.browser.url == django_url(url)
@world.absorb
def is_css_present(css_selector, wait_time=5):
return world.browser.is_element_present_by_css(css_selector, wait_time=wait_time)
@world.absorb
def is_css_not_present(css_selector, wait_time=5):
return world.browser.is_element_not_present_by_css(css_selector, wait_time=wait_time)
@world.absorb
def css_has_text(css_selector, text):
return world.css_text(css_selector) == text
@world.absorb
def css_find(css, wait_time=5):
def is_visible(driver):
return EC.visibility_of_element_located((By.CSS_SELECTOR, css,))
world.browser.is_element_present_by_css(css, wait_time=wait_time)
wait_for(is_visible)
return world.browser.find_by_css(css)
@world.absorb
def css_click(css_selector):
"""
Perform a click on a CSS selector, retrying if it initially fails
"""
assert is_css_present(css_selector)
try:
world.browser.find_by_css(css_selector).click()
except WebDriverException:
# Occassionally, MathJax or other JavaScript can cover up
# an element temporarily.
# If this happens, wait a second, then try again
world.wait(1)
world.browser.find_by_css(css_selector).click()
@world.absorb
def css_click_at(css, x=10, y=10):
'''
A method to click at x,y coordinates of the element
rather than in the center of the element
'''
e = css_find(css).first
e.action_chains.move_to_element_with_offset(e._element, x, y)
e.action_chains.click()
e.action_chains.perform()
@world.absorb
def id_click(elem_id):
"""
Perform a click on an element as specified by its id
"""
world.css_click('#%s' % elem_id)
@world.absorb
def css_fill(css_selector, text):
assert is_css_present(css_selector)
world.browser.find_by_css(css_selector).first.fill(text)
@world.absorb
def click_link(partial_text):
world.browser.find_link_by_partial_text(partial_text).first.click()
@world.absorb
def css_text(css_selector):
# Wait for the css selector to appear
if world.is_css_present(css_selector):
try:
return world.browser.find_by_css(css_selector).first.text
except StaleElementReferenceException:
# The DOM was still redrawing. Wait a second and try again.
world.wait(1)
return world.browser.find_by_css(css_selector).first.text
else:
return ""
@world.absorb
def css_visible(css_selector):
assert is_css_present(css_selector)
return world.browser.find_by_css(css_selector).visible
@world.absorb
def dialogs_closed():
def are_dialogs_closed(driver):
'''
Return True when no modal dialogs are visible
'''
return not css_visible('.modal')
wait_for(are_dialogs_closed)
return not css_visible('.modal')
@world.absorb
def save_the_html(path='/tmp'):
u = world.browser.url
html = world.browser.html.encode('ascii', 'ignore')
filename = '%s.html' % quote_plus(u)
f = open('%s/%s' % (path, filename), 'w')
f.write(html)
f.close()
@world.absorb
def click_course_settings():
course_settings_css = 'li.nav-course-settings'
if world.browser.is_element_present_by_css(course_settings_css):
world.css_click(course_settings_css)
@world.absorb
def click_tools():
tools_css = 'li.nav-course-tools'
if world.browser.is_element_present_by_css(tools_css):
world.css_click(tools_css)
|
elimence/edx-platform
|
common/djangoapps/terrain/ui_helpers.py
|
Python
|
agpl-3.0
| 4,151
|
[
"VisIt"
] |
df548a5912fc57f6ec9637c5b25fc2341b0dda4f260bc02067786ebeb510cc18
|
from trustbutverify import analyzers
import mdtraj as md
import pandas as pd
analyzer = analyzers.ChemicalShiftAnalyzer("1am7", "/home/kyleb/src/choderalab/ForcefieldData/nmr/bacteriophage_lysozyme/16664.str")
expt = analyzer.load_expt()
analyzer = analyzers.ScalarCouplingAnalyzer("1am7", "/home/kyleb/src/choderalab/ForcefieldData/nmr/bacteriophage_lysozyme/19127.str")
expt = analyzer.load_expt()
traj = md.load("/home/kyleb/dat/TrustButVerify/production/amber03_tip3pfb_ACE_AG_NH2.dcd", top="/home/kyleb/dat/TrustButVerify/equil/amber03_tip3pfb_ACE_AG_NH2.pdb")
analyzer.analyze(traj)
|
choderalab/TrustButVerify
|
scripts/test_analyzer.py
|
Python
|
gpl-2.0
| 593
|
[
"MDTraj"
] |
0a95b71108d2c639f713f1bdcdde1be748dd0ae18591040b369a40c422aace0c
|
########################################################################
# File : CPUNormalization.py
# Author : Ricardo Graciani
########################################################################
""" DIRAC Workload Management System Client module that encapsulates all the
methods necessary to handle CPU normalization
"""
import os
import urllib
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.SiteCEMapping import getQueueInfo
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft
from DIRAC.WorkloadManagementSystem.Client.DIRACbenchmark import singleDiracBenchmark
__RCSID__ = "$Id$"
# TODO: This should come from some place in the configuration
NORMALIZATIONCONSTANT = 60. / 250. # from minutes to seconds and from SI00 to HS06 (ie min * SI00 -> sec * HS06 )
UNITS = {'HS06': 1., 'SI00': 1. / 250.}
# TODO: This is still fetching directly from MJF rather than going through
# the MJF module and the values it saves in the local DIRAC configuration
def __getFeatures(envVariable, items):
""" Extract features """
features = {}
featuresDir = os.environ.get(envVariable)
if featuresDir is None:
return features
for item in items:
fname = os.path.join(featuresDir, item)
try:
# Only keep features that do exist
features[item] = urllib.urlopen(fname).read()
except IOError:
pass
return features
def getMachineFeatures():
""" This uses the _old_ MJF information """
return __getFeatures("MACHINEFEATURES", ('hs06', 'jobslots', 'log_cores', 'phys_cores'))
# TODO: log_cores and phys_cores are deprecated and from old MJF specificationa and not collected
# by the MJF module!
def getJobFeatures():
""" This uses the _new_ MJF information """
return __getFeatures("JOBFEATURES", ('hs06_job', 'allocated_cpu'))
def getPowerFromMJF():
""" Extracts the machine power from either JOBFEATURES or MACHINEFEATURES """
try:
features = getJobFeatures()
hs06Job = features.get('hs06_job')
# If the information is there and non zero, return, otherwise go to machine features
if hs06Job:
return round(float(hs06Job), 2)
features = getMachineFeatures()
totalPower = float(features.get('hs06', 0))
logCores = float(features.get('log_cores', 0))
physCores = float(features.get('phys_cores', 0))
jobSlots = float(features.get('jobslots', 0))
denom = min(max(logCores, physCores), jobSlots) if (logCores or physCores) and jobSlots else None
if totalPower and denom:
return round(totalPower / denom, 2)
return None
except ValueError as e:
gLogger.exception("Exception getting MJF information", lException=e)
return None
def queueNormalizedCPU(ceUniqueID):
""" Report Normalized CPU length of queue
"""
result = getQueueInfo(ceUniqueID)
if not result['OK']:
return result
ceInfoDict = result['Value']
siteCSSEction = ceInfoDict['SiteCSSEction']
queueCSSection = ceInfoDict['QueueCSSection']
benchmarkSI00 = __getQueueNormalization(queueCSSection, siteCSSEction)
maxCPUTime = __getMaxCPUTime(queueCSSection)
if maxCPUTime and benchmarkSI00:
normCPUTime = NORMALIZATIONCONSTANT * maxCPUTime * benchmarkSI00
else:
if not benchmarkSI00:
subClusterUniqueID = ceInfoDict['SubClusterUniqueID']
return S_ERROR('benchmarkSI00 info not available for %s' % subClusterUniqueID)
if not maxCPUTime:
return S_ERROR('maxCPUTime info not available')
return S_OK(normCPUTime)
def getQueueNormalization(ceUniqueID):
""" Report Normalization Factor applied by Site to the given Queue
"""
result = getQueueInfo(ceUniqueID)
if not result['OK']:
return result
ceInfoDict = result['Value']
siteCSSEction = ceInfoDict['SiteCSSEction']
queueCSSection = ceInfoDict['QueueCSSection']
subClusterUniqueID = ceInfoDict['SubClusterUniqueID']
benchmarkSI00 = __getQueueNormalization(queueCSSection, siteCSSEction)
if benchmarkSI00:
return S_OK(benchmarkSI00)
return S_ERROR('benchmarkSI00 info not available for %s' % subClusterUniqueID)
# errorList.append( ( subClusterUniqueID , 'benchmarkSI00 info not available' ) )
# exitCode = 3
def __getQueueNormalization(queueCSSection, siteCSSEction):
""" Query the CS and return the Normalization
"""
benchmarkSI00Option = '%s/%s' % (queueCSSection, 'SI00')
benchmarkSI00 = gConfig.getValue(benchmarkSI00Option, 0.0)
if not benchmarkSI00:
benchmarkSI00Option = '%s/%s' % (siteCSSEction, 'SI00')
benchmarkSI00 = gConfig.getValue(benchmarkSI00Option, 0.0)
return benchmarkSI00
def __getMaxCPUTime(queueCSSection):
""" Query the CS and return the maxCPUTime
"""
maxCPUTimeOption = '%s/%s' % (queueCSSection, 'maxCPUTime')
maxCPUTime = gConfig.getValue(maxCPUTimeOption, 0.0)
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
return maxCPUTime
def getCPUNormalization(reference='HS06', iterations=1):
""" Get Normalized Power of the current CPU in [reference] units
"""
if reference not in UNITS:
return S_ERROR('Unknown Normalization unit %s' % str(reference))
try:
max(min(int(iterations), 10), 1)
except (TypeError, ValueError) as x:
return S_ERROR(x)
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
corr = Operations().getValue('JobScheduling/CPUNormalizationCorrection', 1.)
result = singleDiracBenchmark(iterations)
if result is None:
return S_ERROR('Cannot get benchmark measurements')
return S_OK({'CPU': result['CPU'],
'WALL': result['WALL'],
'NORM': result['NORM'] / corr,
'UNIT': reference})
def getCPUTime(cpuNormalizationFactor):
""" Trying to get CPUTime left for execution (in seconds).
It will first look to get the work left looking for batch system information useing the TimeLeft utility.
If it succeeds, it will convert it in real second, and return it.
If it fails, it tries to get it from the static info found in CS.
If it fails, it returns the default, which is a large 9999999, that we may consider as "Infinite".
This is a generic method, independent from the middleware of the resource if TimeLeft doesn't return a value
args:
cpuNormalizationFactor (float): the CPU power of the current Worker Node.
If not passed in, it's get from the local configuration
returns:
cpuTimeLeft (int): the CPU time left, in seconds
"""
cpuTimeLeft = 0.
cpuWorkLeft = gConfig.getValue('/LocalSite/CPUTimeLeft', 0)
if not cpuWorkLeft:
# Try and get the information from the CPU left utility
result = TimeLeft().getTimeLeft()
if result['OK']:
cpuWorkLeft = result['Value']
if cpuWorkLeft > 0:
# This is in HS06sseconds
# We need to convert in real seconds
if not cpuNormalizationFactor: # if cpuNormalizationFactor passed in is 0, try get it from the local cfg
cpuNormalizationFactor = gConfig.getValue('/LocalSite/CPUNormalizationFactor', 0.0)
if cpuNormalizationFactor:
cpuTimeLeft = cpuWorkLeft / cpuNormalizationFactor # this is a float
if not cpuTimeLeft:
# now we know that we have to find the CPUTimeLeft by looking in the CS
# this is not granted to be correct as the CS units may not be real seconds
gridCE = gConfig.getValue('/LocalSite/GridCE')
ceQueue = gConfig.getValue('/LocalSite/CEQueue')
if not ceQueue:
# we have to look for a ceQueue in the CS
# A bit hacky. We should better profit from something generic
gLogger.warn("No CEQueue in local configuration, looking to find one in CS")
siteName = gConfig.getValue('/LocalSite/Site')
queueSection = '/Resources/Sites/%s/%s/CEs/%s/Queues' % (siteName.split('.')[0], siteName, gridCE)
res = gConfig.getSections(queueSection)
if not res['OK']:
raise RuntimeError(res['Message'])
queues = res['Value']
cpuTimes = [gConfig.getValue(queueSection + '/' + queue + '/maxCPUTime', 9999999.) for queue in queues]
# These are (real, wall clock) minutes - damn BDII!
cpuTimeLeft = min(cpuTimes) * 60
else:
queueInfo = getQueueInfo('%s/%s' % (gridCE, ceQueue))
cpuTimeLeft = 9999999.
if not queueInfo['OK'] or not queueInfo['Value']:
gLogger.warn("Can't find a CE/queue, defaulting CPUTime to %d" % cpuTimeLeft)
else:
queueCSSection = queueInfo['Value']['QueueCSSection']
# These are (real, wall clock) minutes - damn BDII!
cpuTimeInMinutes = gConfig.getValue('%s/maxCPUTime' % queueCSSection, 0.)
if cpuTimeInMinutes:
cpuTimeLeft = cpuTimeInMinutes * 60.
gLogger.info("CPUTime for %s: %f" % (queueCSSection, cpuTimeLeft))
else:
gLogger.warn("Can't find maxCPUTime for %s, defaulting CPUTime to %f" % (queueCSSection, cpuTimeLeft))
return int(cpuTimeLeft)
|
andresailer/DIRAC
|
WorkloadManagementSystem/Client/CPUNormalization.py
|
Python
|
gpl-3.0
| 8,966
|
[
"DIRAC"
] |
4db82fba2b3158d74a748c29281ef15c2b3958f568612251d9fcbb48eadb01c8
|
"""**Class projection**
"""
from osgeo import osr
# The projection string depends on the gdal version
DEFAULT_PROJECTION = '+proj=longlat +datum=WGS84 +no_defs'
class Projection:
"""Represents projections associated with layers
"""
def __init__(self, p):
"""Constructor for Projection.
Args:
* p: Projection information.
Any of the GDAL formats are OK including WKT, proj4, ESRI, XML
It can also be an instance of Projection.
"""
if p is None:
#msg = 'Requested projection is None'
#raise TypeError(msg)
p = DEFAULT_PROJECTION
# Clean input string. This will also work when p is of class
# Projection by virtue of its __repr__()
p = str(p).strip()
# Create OSR spatial reference object
srs = self.spatial_reference = osr.SpatialReference()
# Try importing
input_OK = False
for import_func in [srs.ImportFromProj4,
srs.ImportFromWkt,
srs.ImportFromEPSG,
srs.ImportFromESRI,
# FIXME (Ole): This one emits the warning:
# Warning 5: Failed parsing CoordSys:
# 'Indonesia TM-3 zone 48.2'
#srs.ImportFromMICoordSys,
srs.ImportFromPCI,
srs.ImportFromXML,
srs.ImportFromUSGS,
srs.ImportFromUrl]:
try:
res = import_func(p)
except TypeError:
# FIXME: NetCDF raster layer gives SRS error
# Occasionally we get things like
# File "/usr/lib/python2.7/dist-packages/osgeo/osr.py",
# line 639, in ImportFromEPSG
# return _osr.SpatialReference_ImportFromEPSG(self, *args)
# TypeError: in method 'SpatialReference_ImportFromEPSG',
# argument 2 of type 'int'
# e.g. when using NetCDF multiband data. Why?
pass
if res == 0:
input_OK = True
break
if not input_OK:
msg = 'Spatial reference %s was not recognised' % p
raise TypeError(msg)
# Store some - FIXME this is only for backwards compat, remove.
self.wkt = self.get_projection(proj4=False)
self.proj4 = self.get_projection(proj4=True)
def __repr__(self):
return self.wkt
def get_projection(self, proj4=False):
"""Return projection
Args:
* proj4: If True, projection will be returned in proj4 format.
If False (default) projection will be returned in WKT
format
Note:
To compare projections, use the __eq__ method directly on the
projection objects: E.g.
self.projection == other.projection
"""
if proj4:
p = self.spatial_reference.ExportToProj4()
else:
p = self.spatial_reference.ExportToWkt()
return p.strip()
def __eq__(self, other):
"""Override '==' to allow comparison with other projection objecs
"""
try:
other = Projection(other)
except Exception, e:
msg = ('Argument to == must be a spatial reference or object'
' of class Projection. I got %s with error '
'message: %s' % (str(other), e))
raise TypeError(msg)
if self.spatial_reference.IsSame(other.spatial_reference):
# Native comparison checks out
return True
else:
# We have seen cases where the native comparison didn't work
# for projections that should be identical. See e.g.
# https://github.com/AIFDR/riab/issues/160
# Hence do a secondary check using the proj4 string
return (self.get_projection(proj4=True) ==
other.get_projection(proj4=True))
def __ne__(self, other):
"""Override '!=' to allow comparison with other projection objecs
"""
return not self == other
|
takmid/inasafe
|
safe/storage/projection.py
|
Python
|
gpl-3.0
| 4,330
|
[
"NetCDF"
] |
3f60267f54703cc7595c6cb20b20f604cde672fb8ed398e90321ad4d69b2b609
|
import ast
code = """
x = 1
y = 2
z = x + y
x, y, z
"""
module = ast.parse(code)
module.body
ast.dump(module, annotate_fields=False, include_attributes=False)
# "Module([Assign([Name('x', Store())], Num(1)), Assign([Name('y', Store())], Num(2)), Assign([Name('z', Store())], BinOp(Name('x', Load()), Add(), Name('y', Load()))), Expr(Tuple([Name('x', Load()), Name('y', Load()), Name('z', Load())], Load()))])"
ast.dump(module, annotate_fields=True, include_attributes=False)
# "Module(body=[Assign(targets=[Name(id='x', ctx=Store())], value=Num(n=1)), Assign(targets=[Name(id='y', ctx=Store())], value=Num(n=2)), Assign(targets=[Name(id='z', ctx=Store())], value=BinOp(left=Name(id='x', ctx=Load()), op=Add(), right=Name(id='y', ctx=Load()))), Expr(value=Tuple(elts=[Name(id='x', ctx=Load()), Name(id='y', ctx=Load()), Name(id='z', ctx=Load())], ctx=Load()))])"
class MyNodeVisitor(ast.NodeVisitor):
def visit(self, node):
print node
return super(MyNodeVisitor, self).visit(node)
# import pdb
# pdb.set_trace()
MyNodeVisitor().visit(module)
"""
<_ast.Module object at 0x045ADF70>
<_ast.Assign object at 0x04C842B0>
<_ast.Name object at 0x04C84510>
<_ast.Store object at 0x040C0290>
<_ast.Num object at 0x04C84430>
<_ast.Assign object at 0x04C844D0>
<_ast.Name object at 0x04C844F0>
<_ast.Store object at 0x040C0290>
<_ast.Num object at 0x04C843F0>
<_ast.Assign object at 0x04C84470>
<_ast.Name object at 0x04C84330>
<_ast.Store object at 0x040C0290>
<_ast.BinOp object at 0x04C84410>
<_ast.Name object at 0x04C84370>
<_ast.Load object at 0x040C01D0>
<_ast.Add object at 0x040C0C90>
<_ast.Name object at 0x04C844B0>
<_ast.Load object at 0x040C01D0>
<_ast.Expr object at 0x04C84530>
<_ast.Tuple object at 0x04C84550>
<_ast.Name object at 0x04C84570>
<_ast.Load object at 0x040C01D0>
<_ast.Name object at 0x04C84590>
<_ast.Load object at 0x040C01D0>
<_ast.Name object at 0x04C845B0>
<_ast.Load object at 0x040C01D0>
<_ast.Load object at 0x040C01D0>
"""
|
satishgoda/programmingusingpython
|
docs/library/language/ast/ast_NodeVisitor_subclass1.py
|
Python
|
gpl-2.0
| 1,984
|
[
"VisIt"
] |
65868e16f8ac5affca42fc91a1568b53b0611b53bd2f884bd51f43d9cb78be14
|
"""
An isogram is a word that has no repeating letters, consecutive or non-consecutive. Implement a function that determines whether a string that contains only letters is an isogram. Assume the empty string is an isogram. Ignore letter case.
is_isogram("Dermatoglyphics" ) == true
is_isogram("aba" ) == false
is_isogram("moOse" ) == false # -- ignore letter case
"""
def is_isogram(string):
string = string.lower()
return len(set(string)) == len(list(string))
|
aadithpm/code-a-day
|
py/Isograms.py
|
Python
|
unlicense
| 479
|
[
"MOOSE"
] |
1a372298db5795b0355d152227633ffe86d63be693747bc4b903a3d008ad4fad
|
# -*- coding: utf-8 -*-
#
# test_quantal_stp_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script compares the two variants of the Tsodyks/Markram synapse in NEST.
import nest
import numpy
import unittest
@nest.check_stack
class QuantalSTPSynapseTestCase(unittest.TestCase):
"""Compare quantal_stp_synapse with its deterministic equivalent."""
def test_QuantalSTPSynapse(self):
"""Compare quantal_stp_synapse with its deterministic equivalent"""
nest.ResetKernel()
nest.set_verbosity(100)
n_syn = 12 # number of synapses in a connection
n_trials = 50 # number of measurement trials
# parameter set for facilitation
fac_params = {"U": 0.03, "u": 0.03,
"tau_fac": 500., "tau_rec": 200., "weight": 1.}
dep_params = {"U": 0.5, "u": 0.5, "tau_fac": 15.,
"tau_rec": 670., "weight": 1.}
lin_params = {"U": 0.3, "u": 0.3, "tau_fac": 330.,
"tau_rec": 330., "weight": 1.}
# Here we assign the parameter set to the synapse models
t1_params = fac_params # for tsodyks2_synapse
t2_params = t1_params.copy() # for furhmann_synapse
t2_params['n'] = n_syn
t2_params['weight'] = 1. / n_syn
nest.SetDefaults("tsodyks2_synapse", t1_params)
nest.SetDefaults("quantal_stp_synapse", t2_params)
nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3., 'tau_m': 70.})
source = nest.Create('spike_generator')
nest.SetStatus(
source,
{
'spike_times': [
30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330., 360., 390., 900.]
}
)
parrot = nest.Create('parrot_neuron')
neuron = nest.Create("iaf_psc_exp", 2)
# We must send spikes via parrot because devices cannot
# connect through plastic synapses
# See #478.
nest.Connect(source, parrot)
nest.Connect(parrot, neuron[:1], syn_spec="tsodyks2_synapse")
nest.Connect(parrot, neuron[1:], syn_spec="quantal_stp_synapse")
voltmeter = nest.Create("voltmeter", 2)
nest.SetStatus(voltmeter, {"withgid": False, "withtime": True})
t_plot = 1000.
t_tot = 1500.
# the following is a dry run trial so that the synapse dynamics is
# idential in all subsequent trials.
nest.Simulate(t_tot)
# Now we connect the voltmeters
nest.Connect([voltmeter[0]], [neuron[0]])
nest.Connect([voltmeter[1]], [neuron[1]])
for t in range(n_trials):
t_net = nest.GetKernelStatus('time')
nest.SetStatus(source, {'origin': t_net})
nest.Simulate(t_tot)
nest.Simulate(.1) # flush the last voltmeter events from the queue
vm = numpy.array(nest.GetStatus([voltmeter[1]], 'events')[0]['V_m'])
vm_reference = numpy.array(nest.GetStatus(
[voltmeter[0]], 'events')[0]['V_m'])
vm.shape = (n_trials, t_tot)
vm_reference.shape = (n_trials, t_tot)
vm_mean = numpy.array([numpy.mean(vm[:, i])
for i in range(int(t_tot))])
vm_ref_mean = numpy.array(
[numpy.mean(vm_reference[:, i]) for i in range(int(t_tot))])
error = numpy.sqrt((vm_ref_mean[:t_plot] - vm_mean[:t_plot])**2)
self.assertTrue(numpy.max(error) < 4.0e-4)
def suite():
suite = unittest.makeSuite(QuantalSTPSynapseTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
HBPNeurorobotics/nest-simulator
|
pynest/nest/tests/test_quantal_stp_synapse.py
|
Python
|
gpl-2.0
| 4,353
|
[
"NEURON"
] |
06a31eec37f9d1551d380ddb45ba97ea42461db7ab597cdbd914102a0bca4d64
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
from warnings import warn
import numpy as np
from scipy.sparse import hstack as sparse_hstack
from scipy.sparse import issparse
from ..tree._tree import DTYPE, DOUBLE
from .base import BaseEnsemble, _partition_estimators
from ..base import ClassifierMixin, RegressorMixin
from ..exceptions import DataConversionWarning, NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.fixes import bincount
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples,))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/ensemble/forest.py
|
Python
|
mit
| 64,801
|
[
"Brian"
] |
0a74af56114846db39a5643eb37773fd199356e0a1e551ca7c521ff1e867deff
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import TestFailed, verify, check_syntax
import sys
print '1. Parser'
print '1.1 Tokens'
print '1.1.1 Backslashes'
# Backslash means line continuation:
x = 1 \
+ 1
if x != 2: raise TestFailed, 'backslash for line continuation'
# Backslash does not means continuation in comments :\
x = 0
if x != 0: raise TestFailed, 'backslash ending comment'
print '1.1.2 Numeric literals'
print '1.1.2.1 Plain integers'
if 0xff != 255: raise TestFailed, 'hex int'
if 0377 != 255: raise TestFailed, 'octal int'
if 2147483647 != 017777777777: raise TestFailed, 'large positive int'
try:
from sys import maxint
except ImportError:
maxint = 2147483647
if maxint == 2147483647:
# The following test will start to fail in Python 2.4;
# change the 020000000000 to -020000000000
if -2147483647-1 != -020000000000: raise TestFailed, 'max negative int'
# XXX -2147483648
if 037777777777 < 0: raise TestFailed, 'large oct'
if 0xffffffff < 0: raise TestFailed, 'large hex'
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
print "OverflowError on huge integer literal " + repr(s)
elif eval('maxint == 9223372036854775807'):
if eval('-9223372036854775807-1 != -01000000000000000000000'):
raise TestFailed, 'max negative int'
if eval('01777777777777777777777') < 0: raise TestFailed, 'large oct'
if eval('0xffffffffffffffff') < 0: raise TestFailed, 'large hex'
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
print "OverflowError on huge integer literal " + repr(s)
else:
print 'Weird maxint value', maxint
print '1.1.2.2 Long integers'
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
print '1.1.2.3 Floating point'
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
print '1.1.3 String literals'
x = ''; y = ""; verify(len(x) == 0 and x == y)
x = '\''; y = "'"; verify(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; verify(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
verify(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
verify(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
verify(x == y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''; verify(x == y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"; verify(x == y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'; verify(x == y)
print '1.2 Grammar'
print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
print 'file_input' # (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
print 'expr_input' # testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
print 'eval_input' # testlist ENDMARKER
x = eval('1, 0 or 1')
print 'funcdef'
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
verify(f2.func_code.co_varnames == ('one_argument',))
verify(f3.func_code.co_varnames == ('two', 'arguments'))
if sys.platform.startswith('java'):
verify(f4.func_code.co_varnames ==
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
verify(f5.func_code.co_varnames ==
('(compound, first)', 'two', 'compound', 'first'))
else:
verify(f4.func_code.co_varnames == ('two', '.2', 'compound',
'argument', 'list'))
verify(f5.func_code.co_varnames == ('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
if sys.platform.startswith('java'):
verify(v3.func_code.co_varnames == ('a', '(b, c)', 'rest', 'b', 'c'))
else:
verify(v3.func_code.co_varnames == ('a', '.2', 'rest', 'b', 'c'))
verify(v3(1, (2, 3), 4) == (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
### lambdef: 'lambda' [varargslist] ':' test
print 'lambdef'
l1 = lambda : 0
verify(l1() == 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
verify(l3() == [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
verify(l4() == 1)
l5 = lambda x, y, z=2: x + y + z
verify(l5(1, 2) == 5)
verify(l5(1, 2, 3) == 6)
check_syntax("lambda x: x = 2")
### stmt: simple_stmt | compound_stmt
# Tested below
### simple_stmt: small_stmt (';' small_stmt)* [';']
print 'simple_stmt'
x = 1; pass; del x
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
print 'expr_stmt' # (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
# NB these variables are deleted below
check_syntax("x + 1 = 1")
check_syntax("a + 1 = b + 2")
print 'print_stmt' # 'print' (test ',')* [test]
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
print 'extended print_stmt' # 'print' '>>' test ','
import sys
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
# syntax errors
check_syntax('print ,')
check_syntax('print >> x,')
print 'del_stmt' # 'del' exprlist
del abc
del x, y, (z, xyz)
print 'pass_stmt' # 'pass'
pass
print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
print 'break_stmt' # 'break'
while 1: break
print 'continue_stmt' # 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "continue + try/except ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
print msg
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "continue + try/finally ok"
print msg
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
print "testing continue and break in try/except in loop"
def test_break_continue_loop(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
print "continue then break in try/except in loop broken!"
test_break_continue_loop()
print 'return_stmt' # 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
print 'raise_stmt' # 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
print 'import_name' # 'import' dotted_as_names
import sys
import time, sys
print 'import_from' # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
print 'global_stmt' # 'global' NAME (',' NAME)*
def f():
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
def f():
z = None
del z
exec 'z=1+1\n'
if z != 2: raise TestFailed, 'exec \'z=1+1\'\\n'
del z
exec 'z=1+1'
if z != 2: raise TestFailed, 'exec \'z=1+1\''
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
del z
exec u'z=1+1'
if z != 2: raise TestFailed, 'exec u\'z=1+1\''
"""
f()
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g (%s), l (%s)' %(g,l)
print "assert_stmt" # assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285: raise TestFailed, 'for over growing sequence'
print 'try_stmt'
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [',' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError, msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
print 'test'
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
print 'comparison'
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
print 'binary mask ops'
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
print 'shift ops'
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
print 'additive ops'
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
print 'multiplicative ops'
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
print 'unary ops'
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
print 'selectors'
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
print
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
print 'atoms'
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
class B: pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
print [s.strip() for s in spcs]
print [3 * x for x in nums]
print [x for x in nums if x > 2]
print [(i, s) for i in nums for s in strs]
print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
print [(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)]
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
print test_in_func(nums)
def test_nested_front():
print [[y for y in [x, x + 1]] for x in [1,3,5]]
test_nested_front()
check_syntax("[i, s for i in nums for s in strs]")
check_syntax("[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
print [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
verify(g.next() == [x for x in range(10)])
try:
g.next()
raise TestFailed, 'should produce StopIteration exception'
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
raise TestFailed, 'should produce TypeError'
except TypeError:
pass
verify(list((x, y) for x in 'abcd' for y in 'abcd') == [(x, y) for x in 'abcd' for y in 'abcd'])
verify(list((x, y) for x in 'ab' for y in 'xy') == [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
verify(sum(b) == sum([x for x in range(10)]))
verify(sum(x**2 for x in range(10)) == sum([x**2 for x in range(10)]))
verify(sum(x*x for x in range(10) if x%2) == sum([x*x for x in range(10) if x%2]))
verify(sum(x for x in (y for y in range(10))) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10)))) == sum([x for x in range(10)]))
verify(sum(x for x in [y for y in (z for z in range(10))]) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10) if True)) if True) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True) == 0)
check_syntax("foo(x for x in range(10), 100)")
check_syntax("foo(100, x for x in range(10))")
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
verify(len(list(g)) == 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
verify([(i,j) for i in range(10) for j in range(5)] == list(g))
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/test/test_grammar.py
|
Python
|
gpl-2.0
| 19,154
|
[
"GULP"
] |
0e6c9826b95a3b53bf3addb2e4df1ee4e72da4adfbef0eb12819d5c92fabc3be
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for distributed ccresponse computations.
Defines functions for retrieving data computed at displaced geometries.
"""
from psi4.driver import p4util
def collect_displaced_matrix_data(db, signature, row_dim):
"""
Gathers a list of tensors, one at each displaced geometry.
db: (database) the database object for this property calculation
signature: (string) The string that notifies the matrix reader that the
targeted tensor data begins.
row_dim: the expected number of rows that this value should be printed
across in the file
Returns a 2d list result[i][j]:
i: indexes displacements
j: indexes elements of the flattened tensor at some displacement
Throws: none
"""
result = []
for job in db['job_status']:
with open('{}/output.dat'.format(job)) as outfile:
result.append(parse_geometry_matrix_data(outfile, signature, row_dim))
return result
# END collect_displaced_matrix_data()
def parse_geometry_matrix_data(outfile, matrix_name, row_tot):
"""
Parses data from a 3 by n matrix printed to a file
outfile: ( file ) handle open in read mode, where the data should be found
matrix_name: ( string ) that indicates the matrix data is found on the lines
below
row_tot: ( int ) indicates the number of lines that the matrix data should
be printed across in the file
Returns: matrix_data a list of matrix elements, len = 3*row_tot
Throws: ParsingError (Collecting matrix data failed) if
It can't find matrix_header in the file.
It found matrix_header, but no data.
It found matrix_header, and data but the number of elements is
incorrect.
"""
collect_matrix = False
n_rows = 0
n_tries = 0
matrix_data = []
for line in outfile:
if matrix_name in line:
collect_matrix = True
if collect_matrix and (n_rows < row_tot):
try:
n_tries += 1
if n_tries > (row_tot + 13):
raise p4util.ParsingError('{} Matrix was unreadable. Scanned {}'
'lines.'.format(matrix_name, n_tries))
else:
(index, x, y, z) = line.split()
matrix_data.append(float(x))
matrix_data.append(float(y))
matrix_data.append(float(z))
n_rows += 1
except:
pass
if (n_rows == row_tot) and (len(matrix_data) != 3 * row_tot):
raise p4util.ParsingError('Collecting {} data failed!'
'\nExpected {} elements but only captured {}'.format(
matrix_name, 3 * row_tot, len(matrix_data)))
if len(matrix_data) == 3 * row_tot:
return matrix_data
raise p4util.ParsingError('data for {} was not found in the output file, '
'but it was marked for collection. Check output files '
'in displacement sub-dirs!'.format(matrix_name))
# END parse_geometry_matrix_data()
|
susilehtola/psi4
|
psi4/driver/procrouting/findif_response_utils/data_collection_helper.py
|
Python
|
lgpl-3.0
| 4,102
|
[
"Psi4"
] |
a316e6674f68358160450cd74e32c2b433116963e44b902950f94d70eec49932
|
#!/usr/bin/env python3
# Tests check_format.py. This must be run in a context where the clang
# version and settings are compatible with the one in the Envoy
# docker. Normally this is run via check_format_test.sh, which
# executes it in under docker.
from __future__ import print_function
from run_command import run_command
import argparse
import logging
import os
import shutil
import sys
import tempfile
curr_dir = os.path.dirname(os.path.realpath(__file__))
tools = os.path.dirname(curr_dir)
src = os.path.join(tools, 'testdata', 'check_format')
check_format = sys.executable + " " + os.path.join(curr_dir, 'check_format.py')
errors = 0
# Runs the 'check_format' operation, on the specified file, printing
# the comamnd run and the status code as well as the stdout, and returning
# all of that to the caller.
def run_check_format(operation, filename):
command = check_format + " " + operation + " " + filename
status, stdout, stderr = run_command(command)
return (command, status, stdout + stderr)
def get_input_file(filename, extra_input_files=None):
files_to_copy = [filename]
if extra_input_files is not None:
files_to_copy.extend(extra_input_files)
for f in files_to_copy:
infile = os.path.join(src, f)
directory = os.path.dirname(f)
if not directory == '' and not os.path.isdir(directory):
os.makedirs(directory)
shutil.copyfile(infile, f)
return filename
# Attempts to fix file, returning a 4-tuple: the command, input file name,
# output filename, captured stdout as an array of lines, and the error status
# code.
def fix_file_helper(filename, extra_input_files=None):
command, status, stdout = run_check_format(
"fix", get_input_file(filename, extra_input_files=extra_input_files))
infile = os.path.join(src, filename)
return command, infile, filename, status, stdout
# Attempts to fix a file, returning the status code and the generated output.
# If the fix was successful, the diff is returned as a string-array. If the file
# was not fixable, the error-messages are returned as a string-array.
def fix_file_expecting_success(file, extra_input_files=None):
command, infile, outfile, status, stdout = fix_file_helper(
file, extra_input_files=extra_input_files)
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout)
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile + '.gold')
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout + stderr)
return 1
return 0
def fix_file_expecting_no_change(file):
command, infile, outfile, status, stdout = fix_file_helper(file)
if status != 0:
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile)
if status != 0:
logging.error(file + ': expected file to remain unchanged')
return 1
return 0
def emit_stdout_as_error(stdout):
logging.error("\n".join(stdout))
def expect_error(filename, status, stdout, expected_substring):
if status == 0:
logging.error("%s: Expected failure `%s`, but succeeded" % (filename, expected_substring))
return 1
for line in stdout:
if expected_substring in line:
return 0
logging.error("%s: Could not find '%s' in:\n" % (filename, expected_substring))
emit_stdout_as_error(stdout)
return 1
def fix_file_expecting_failure(filename, expected_substring):
command, infile, outfile, status, stdout = fix_file_helper(filename)
return expect_error(filename, status, stdout, expected_substring)
def check_file_expecting_error(filename, expected_substring, extra_input_files=None):
command, status, stdout = run_check_format(
"check", get_input_file(filename, extra_input_files=extra_input_files))
return expect_error(filename, status, stdout, expected_substring)
def check_and_fix_error(filename, expected_substring, extra_input_files=None):
errors = check_file_expecting_error(
filename, expected_substring, extra_input_files=extra_input_files)
errors += fix_file_expecting_success(filename, extra_input_files=extra_input_files)
return errors
def check_tool_not_found_error():
# Temporarily change PATH to test the error about lack of external tools.
oldPath = os.environ["PATH"]
os.environ["PATH"] = "/sbin:/usr/sbin"
clang_format = os.getenv("CLANG_FORMAT", "clang-format-11")
# If CLANG_FORMAT points directly to the binary, skip this test.
if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK):
os.environ["PATH"] = oldPath
return 0
errors = check_file_expecting_error(
"no_namespace_envoy.cc", "Command %s not found." % clang_format)
os.environ["PATH"] = oldPath
return errors
def check_unfixable_error(filename, expected_substring):
errors = check_file_expecting_error(filename, expected_substring)
errors += fix_file_expecting_failure(filename, expected_substring)
return errors
def check_file_expecting_ok(filename):
command, status, stdout = run_check_format("check", get_input_file(filename))
if status != 0:
logging.error("Expected %s to have no errors; status=%d, output:\n" % (filename, status))
emit_stdout_as_error(stdout)
return status + fix_file_expecting_no_change(filename)
def run_checks():
errors = 0
# The following error is the error about unavailability of external tools.
errors += check_tool_not_found_error()
# The following errors can be detected but not fixed automatically.
errors += check_unfixable_error(
"no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("mutex.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error(
"condition_variable.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error(
"condition_variable_any.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
real_time_inject_error = (
"Don't reference real-world time sources; use TimeSystem::advanceTime(Wait|Async)")
errors += check_unfixable_error("real_time_source.cc", real_time_inject_error)
errors += check_unfixable_error("real_time_system.cc", real_time_inject_error)
errors += check_unfixable_error(
"duration_value.cc",
"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)"
)
errors += check_unfixable_error("system_clock.cc", real_time_inject_error)
errors += check_unfixable_error("steady_clock.cc", real_time_inject_error)
errors += check_unfixable_error(
"unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
errors += check_unfixable_error(
"condvar_wait_for.cc", "Don't use CondVar::waitFor(); use TimeSystem::waitFor() instead.")
errors += check_unfixable_error("sleep.cc", real_time_inject_error)
errors += check_unfixable_error("std_atomic_free_functions.cc", "std::atomic_*")
errors += check_unfixable_error("std_get_time.cc", "std::get_time")
errors += check_unfixable_error(
"no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("bazel_tools.BUILD", "unexpected @bazel_tools reference")
errors += check_unfixable_error(
"proto.BUILD", "unexpected direct external dependency on protobuf")
errors += check_unfixable_error(
"proto_deps.cc", "unexpected direct dependency on google.protobuf")
errors += check_unfixable_error("attribute_packed.cc", "Don't use __attribute__((packed))")
errors += check_unfixable_error(
"designated_initializers.cc", "Don't use designated initializers")
errors += check_unfixable_error("elvis_operator.cc", "Don't use the '?:' operator")
errors += check_unfixable_error(
"testing_test.cc", "Don't use 'using testing::Test;, elaborate the type instead")
errors += check_unfixable_error(
"serialize_as_string.cc",
"Don't use MessageLite::SerializeAsString for generating deterministic serialization")
errors += check_unfixable_error(
"counter_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"gauge_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"histogram_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
errors += check_unfixable_error(
"grpc_init.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error(
"grpc_shutdown.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error(
"source/raw_try.cc",
"Don't use raw try, use TRY_ASSERT_MAIN_THREAD if on the main thread otherwise don't use exceptions."
)
errors += check_unfixable_error("clang_format_double_off.cc", "clang-format nested off")
errors += check_unfixable_error("clang_format_trailing_off.cc", "clang-format remains off")
errors += check_unfixable_error("clang_format_double_on.cc", "clang-format nested on")
errors += fix_file_expecting_failure(
"api/missing_package.proto",
"Unable to find package name for proto file: ./api/missing_package.proto")
errors += check_unfixable_error(
"proto_enum_mangling.cc", "Don't use mangled Protobuf names for enum constants")
errors += check_unfixable_error(
"test_naming.cc", "Test names should be CamelCase, starting with a capital letter")
errors += check_unfixable_error("mock_method_n.cc", "use MOCK_METHOD() instead")
errors += check_unfixable_error("for_each_n.cc", "use an alternative for loop instead")
errors += check_unfixable_error(
"test/register_factory.cc",
"Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use "
"Registry::InjectFactory instead.")
errors += check_unfixable_error(
"strerror.cc", "Don't use strerror; use Envoy::errorDetails instead")
errors += check_unfixable_error(
"std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead "
+ "or absl::node_hash_map if pointer stability of keys/values is required")
errors += check_unfixable_error(
"std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead "
+ "or absl::node_hash_set if pointer stability of keys/values is required")
errors += check_unfixable_error("std_any.cc", "Don't use std::any; use absl::any instead")
errors += check_unfixable_error(
"std_get_if.cc", "Don't use std::get_if; use absl::get_if instead")
errors += check_unfixable_error(
"std_holds_alternative.cc",
"Don't use std::holds_alternative; use absl::holds_alternative instead")
errors += check_unfixable_error(
"std_make_optional.cc", "Don't use std::make_optional; use absl::make_optional instead")
errors += check_unfixable_error(
"std_monostate.cc", "Don't use std::monostate; use absl::monostate instead")
errors += check_unfixable_error(
"std_optional.cc", "Don't use std::optional; use absl::optional instead")
errors += check_unfixable_error(
"std_string_view.cc",
"Don't use std::string_view or toStdStringView; use absl::string_view instead")
errors += check_unfixable_error(
"std_variant.cc", "Don't use std::variant; use absl::variant instead")
errors += check_unfixable_error("std_visit.cc", "Don't use std::visit; use absl::visit instead")
errors += check_unfixable_error(
"throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.")
errors += check_unfixable_error("pgv_string.proto", "min_bytes is DEPRECATED, Use min_len.")
errors += check_file_expecting_ok("commented_throw.cc")
errors += check_unfixable_error(
"repository_url.bzl", "Only repository_locations.bzl may contains URL references")
errors += check_unfixable_error(
"repository_urls.bzl", "Only repository_locations.bzl may contains URL references")
# The following files have errors that can be automatically fixed.
errors += check_and_fix_error(
"over_enthusiastic_spaces.cc", "./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error(
"extra_enthusiastic_spaces.cc",
"./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error(
"angle_bracket_include.cc", "envoy includes should not have angle brackets")
errors += check_and_fix_error("proto_style.cc", "incorrect protobuf type reference")
errors += check_and_fix_error("long_line.cc", "clang-format check failed")
errors += check_and_fix_error("header_order.cc", "header_order.py check failed")
errors += check_and_fix_error(
"clang_format_on.cc", "./clang_format_on.cc:7: over-enthusiastic spaces")
# Validate that a missing license is added.
errors += check_and_fix_error("license.BUILD", "envoy_build_fixer check failed")
# Validate that an incorrect license is replaced and reordered.
errors += check_and_fix_error("update_license.BUILD", "envoy_build_fixer check failed")
# Validate that envoy_package() is added where there is an envoy_* rule occurring.
errors += check_and_fix_error("add_envoy_package.BUILD", "envoy_build_fixer check failed")
# Validate that we don't add envoy_package() when no envoy_* rule.
errors += check_file_expecting_ok("skip_envoy_package.BUILD")
# Validate that we clean up gratuitous blank lines.
errors += check_and_fix_error("canonical_spacing.BUILD", "envoy_build_fixer check failed")
# Validate that unused loads are removed.
errors += check_and_fix_error("remove_unused_loads.BUILD", "envoy_build_fixer check failed")
# Validate that API proto package deps are computed automagically.
errors += check_and_fix_error(
"canonical_api_deps.BUILD",
"envoy_build_fixer check failed",
extra_input_files=[
"canonical_api_deps.cc", "canonical_api_deps.h", "canonical_api_deps.other.cc"
])
errors += check_and_fix_error("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix")
errors += check_and_fix_error("proto_format.proto", "clang-format check failed")
errors += check_and_fix_error(
"cpp_std.cc",
"term absl::make_unique< should be replaced with standard library term std::make_unique<")
errors += check_and_fix_error(
"code_conventions.cc", "term .Times(1); should be replaced with preferred term ;")
errors += check_file_expecting_ok("real_time_source_override.cc")
errors += check_file_expecting_ok("duration_value_zero.cc")
errors += check_file_expecting_ok("time_system_wait_for.cc")
errors += check_file_expecting_ok("clang_format_off.cc")
return errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tester for check_format.py.')
parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=args.log)
# Now create a temp directory to copy the input files, so we can fix them
# without actually fixing our testdata. This requires chdiring to the temp
# directory, so it's annoying to comingle check-tests and fix-tests.
with tempfile.TemporaryDirectory() as tmp:
os.chdir(tmp)
errors = run_checks()
if errors != 0:
logging.error("%d FAILURES" % errors)
exit(1)
logging.warning("PASS")
|
envoyproxy/envoy
|
tools/code_format/check_format_test_helper.py
|
Python
|
apache-2.0
| 16,423
|
[
"VisIt"
] |
7f47e95965af0bc597f445a51008cf2569ecce438bd3ad991bf5145eb40ea166
|
"""
:Author: Pierre Barbier de Reuille <pierre.barbierdereuille@gmail.com>
Module implementing non-parametric regressions using kernel smoothing methods.
"""
#from __future__ import division, absolute_import, print_function
from scipy import stats
from scipy.linalg import sqrtm, solve
import scipy
import numpy as np
#from .compat import irange
#from .cyth import HAS_CYTHON
def scotts_bandwidth(xdata, ydata=None, model=None):
r"""
The Scotts bandwidth is defined as a variance bandwidth with factor:
.. math::
\tau = n^\frac{-1}{d+4}
"""
xdata = np.atleast_2d(xdata)
d, n = xdata.shape
return variance_bandwidth(np.power(n, -1. / (d + 4.)), xdata)
class normal_kernel(object):
"""
Returns a function-object for the PDF of a Normal kernel of variance
identity and average 0 in dimension ``dim``.
"""
def __new__(klass, dim):
"""
The __new__ method will automatically select :py:class:`normal_kernel1d` if dim is 1.
"""
if dim == 1:
return normal_kernel1d()
return object.__new__(klass, dim)
def __init__(self, dim):
self.factor = 1 / np.sqrt(2 * np.pi) ** dim
def pdf(self, xs):
"""
Return the probability density of the function.
:param ndarray xs: Array of shape (D,N) where D is the dimension of the kernel
and N the number of points.
:returns: an array of shape (N,) with the density on each point of ``xs``
"""
xs = np.atleast_2d(xs)
return self.factor * np.exp(-0.5 * np.sum(xs * xs, axis=0))
__call__ = pdf
class normal_kernel1d(object):
"""
1D normal density kernel with extra integrals for 1D bounded kernel estimation.
"""
def pdf(self, z, out=None):
r"""
Return the probability density of the function. The formula used is:
.. math::
\phi(z) = \frac{1}{\sqrt{2\pi}}e^{-\frac{x^2}{2}}
:param ndarray xs: Array of any shape
:returns: an array of shape identical to ``xs``
"""
return kernels_imp.norm1d_pdf(z, out)
def _pdf(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pdf`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= S2PI
return out
__call__ = pdf
def fft(self, z, out=None):
"""
Returns the FFT of the normal distribution
"""
out = np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
return out
def dct(self, z, out=None):
"""
Returns the DCT of the normal distribution
"""
out = np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
return out
def cdf(self, z, out=None):
r"""
Cumulative density of probability. The formula used is:
.. math::
\text{cdf}(z) \triangleq \int_{-\infty}^z \phi(z)
dz = \frac{1}{2}\text{erf}\left(\frac{z}{\sqrt{2}}\right) + \frac{1}{2}
"""
return kernels_imp.norm1d_cdf(z, out)
def _cdf(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.cdf`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.divide(z, S2, out)
erf(out, out)
out *= 0.5
out += 0.5
return out
def pm1(self, z, out=None):
r"""
Partial moment of order 1:
.. math::
\text{pm1}(z) \triangleq \int_{-\infty}^z z\phi(z) dz
= -\frac{1}{\sqrt{2\pi}}e^{-\frac{z^2}{2}}
"""
return kernels_imp.norm1d_pm1(z, out)
def _pm1(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm1`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= -S2PI
return out
def pm2(self, z, out=None):
r"""
Partial moment of order 2:
.. math::
\text{pm2}(z) \triangleq \int_{-\infty}^z z^2\phi(z) dz
= \frac{1}{2}\text{erf}\left(\frac{z}{2}\right) - \frac{z}{\sqrt{2\pi}}
e^{-\frac{z^2}{2}} + \frac{1}{2}
"""
return kernels_imp.norm1d_pm2(z, out)
def _pm2(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm2`
"""
z = np.asarray(z, dtype=float)
if out is None:
out = np.empty(z.shape)
np.divide(z, S2, out)
erf(out, out)
out /= 2
if z.shape:
zz = np.isfinite(z)
sz = z[zz]
out[zz] -= sz * np.exp(-0.5 * sz * sz) / S2PI
elif np.isfinite(z):
out -= z * np.exp(-0.5 * z * z) / S2PI
out += 0.5
return out
def variance_bandwidth(factor, xdata):
"""
Returns the covariance matrix:
.. math::
\mathcal{C} = \tau^2 cov(X)
where :math:`\tau` is a correcting factor that depends on the method.
"""
data_covariance = np.atleast_2d(np.cov(xdata, rowvar=1, bias=False))
sq_bandwidth = data_covariance * factor * factor
return sq_bandwidth
class SpatialAverage(object):
r"""
Perform a Nadaraya-Watson regression on the data (i.e. also called
local-constant regression) using a gaussian kernel.
The Nadaraya-Watson estimate is given by:
.. math::
f_n(x) \triangleq \frac{\sum_i K\left(\frac{x-X_i}{h}\right) Y_i}
{\sum_i K\left(\frac{x-X_i}{h}\right)}
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: ndarray or callable
:param cov: If an ndarray, it should be a 2D array giving the matrix of
covariance of the gaussian kernel. Otherwise, it should be a function
``cov(xdata, ydata)`` returning the covariance matrix.
"""
def __init__(self, xdata, ydata, cov=scotts_bandwidth):
self.xdata = np.atleast_2d(xdata)
self.ydata = ydata
self._bw = None
self._covariance = None
self._inv_cov = None
self.covariance = cov
self.d, self.n = self.xdata.shape
self.correction = 1.
@property
def bandwidth(self):
"""
Bandwidth of the kernel. It cannot be set directly, but rather should
be set via the covariance attribute.
"""
if self._bw is None and self._covariance is not None:
self._bw = np.real(sqrtm(self._covariance))
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a 2D matrix for the covariance of the kernel.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = np.atleast_2d(cov(self.xdata, self.ydata))
else:
_cov = np.atleast_2d(cov)
self._bw = None
self._covariance = _cov
self._inv_cov = scipy.linalg.inv(_cov)
def evaluate(self, points, result=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
points = np.atleast_2d(points).astype(self.xdata.dtype)
#norm = self.kde(points)
d, m = points.shape
if result is None:
result = np.zeros((m,), points.dtype)
norm = np.zeros((m,), points.dtype)
# iterate on the internal points
for i, ci in np.broadcast(xrange(self.n),
xrange(self._correction.shape[0])):
diff = np.dot(self._correction[ci],
self.xdata[:, i, np.newaxis] - points)
tdiff = np.dot(self._inv_cov, diff)
energy = np.exp(-np.sum(diff * tdiff, axis=0) / 2.0)
result += self.ydata[i] * energy
norm += energy
result[norm > 1e-50] /= norm[norm > 1e-50]
return result
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`SpatialAverage.evaluate`
"""
return self.evaluate(*args, **kwords)
@property
def correction(self):
"""
The correction coefficient allows to change the width of the kernel
depending on the point considered. It can be either a constant (to
correct globaly the kernel width), or a 1D array of same size as the
input.
"""
return self._correction
@correction.setter # noqa
def correction(self, value):
self._correction = np.atleast_1d(value)
def set_density_correction(self):
"""
Add a correction coefficient depending on the density of the input
"""
kde = stats.gaussian_kde(self.xdata)
dens = kde(self.xdata)
dm = dens.max()
dens[dens < 1e-50] = dm
self._correction = dm / dens
class LocalLinearKernel1D(object):
r"""
Perform a local-linear regression using a gaussian kernel.
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i)\right)^2
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning the
variance.
"""
def __init__(self, xdata, ydata, cov=scotts_bandwidth):
self.xdata = np.atleast_1d(xdata)
self.ydata = np.atleast_1d(ydata)
self.n = xdata.shape[0]
self._bw = None
self._covariance = None
self.covariance = cov
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = float(cov(self.xdata, self.ydata))
else:
_cov = float(cov)
self._covariance = _cov
self._bw = np.sqrt(_cov)
def evaluate(self, points, output=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
li2, output = local_linear.local_linear_1d(self._bw, self.xdata,
self.ydata, points, output)
self.li2 = li2
return output
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
class PolynomialDesignMatrix1D(object):
def __init__(self, dim):
self.dim = dim
powers = np.arange(0, dim + 1).reshape((1, dim + 1))
self.powers = powers
def __call__(self, dX, out=None):
return np.power(dX, self.powers, out) # / self.frac
class LocalPolynomialKernel1D(object):
r"""
Perform a local-polynomial regression using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i) - \ldots -
a_q \frac{(x-X_i)^q}{q!}\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial and :math:`h` is the bandwidth of
the method. It is also recommended to have :math:`\int_\mathbb{R} x^2K(x)dx
= 1`, (i.e. variance of the kernel is 1) or the effective bandwidth will be
scaled by the square-root of this integral (i.e. the standard deviation of
the kernel).
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:param int q: Order of the polynomial to fit. **Default:** 3
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_bandwidth``
"""
def __init__(self, xdata, ydata, q=3, **kwords):
self.xdata = np.atleast_1d(xdata)
self.ydata = np.atleast_1d(ydata)
self.n = xdata.shape[0]
self.q = q
self._kernel = None
self._bw = None
self._covariance = None
self.designMatrix = None
for n in kwords:
setattr(self, n, kwords[n])
if self.kernel is None:
self.kernel = normal_kernel1d()
if self.covariance is None:
self.covariance = scotts_bandwidth
if self.designMatrix is None:
self.designMatrix = PolynomialDesignMatrix1D
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@bandwidth.setter # noqa
def bandwidth(self, bw):
if callable(bw):
_bw = float(bw(self.xdata, self.ydata))
else:
_bw = float(bw)
self._bw = _bw
self._covariance = _bw * _bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = float(cov(self.xdata, self.ydata))
else:
_cov = float(cov)
self._covariance = _cov
self._bw = np.sqrt(_cov)
@property
def cov(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self.covariance
@cov.setter # noqa
def cov(self, val):
self.covariance = val
@property
def kernel(self):
r"""
Kernel object. Should provide the following methods:
``kernel.pdf(xs)``
Density of the kernel, denoted :math:`K(x)`
By default, the kernel is an instance of
:py:class:`kernels.normal_kernel1d`
"""
return self._kernel
@kernel.setter # noqa
def kernel(self, val):
self._kernel = val
def evaluate(self, points, output=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put
in this array
"""
xdata = self.xdata[:, np.newaxis] # make it a column vector
ydata = self.ydata[:, np.newaxis] # make it a column vector
q = self.q
bw = self.bandwidth
kernel = self.kernel
designMatrix = self.designMatrix(q)
if output is None:
output = np.empty(points.shape, dtype=float)
for i, p in enumerate(points):
dX = (xdata - p)
Wx = kernel(dX / bw)
Xx = designMatrix(dX)
WxXx = Wx * Xx
XWX = np.dot(Xx.T, WxXx)
Lx = solve(XWX, WxXx.T)[0]
output[i] = np.dot(Lx, ydata)
return output
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
class PolynomialDesignMatrix(object):
"""
Class used to create a design matrix for polynomial regression
"""
def __init__(self, dim, deg):
self.dim = dim
self.deg = deg
self._designMatrixSize()
def _designMatrixSize(self):
"""
Compute the size of the design matrix for a n-D problem of order d.
Can also compute the Taylors factors (i.e. the factors that would be
applied for the taylor decomposition)
:param int dim: Dimension of the problem
:param int deg: Degree of the fitting polynomial
:param bool factors: If true, the output includes the Taylor factors
:returns: The number of columns in the design matrix and, if required,
a ndarray with the taylor coefficients for each column of
the design matrix.
"""
dim = self.dim
deg = self.deg
init = 1
dims = [0] * (dim + 1)
cur = init
prev = 0
#if factors:
# fcts = [1]
fact = 1
for i in irange(deg):
diff = cur - prev
prev = cur
old_dims = list(dims)
fact *= (i + 1)
for j in irange(dim):
dp = diff - old_dims[j]
cur += dp
dims[j + 1] = dims[j] + dp
# if factors:
# fcts += [fact]*(cur-prev)
self.size = cur
#self.factors = np.array(fcts)
def __call__(self, x, out=None):
"""
Creates the design matrix for polynomial fitting using the points x.
:param ndarray x: Points to create the design matrix.
Shape must be (D,N) or (N,), where D is the dimension of
the problem, 1 if not there.
:param int deg: Degree of the fitting polynomial
:param ndarray factors: Scaling factor for the columns of the design
matrix. The shape should be (M,) or (M,1), where M is the number
of columns of the output. This value can be obtained using
the :py:func:`designMatrixSize` function.
:returns: The design matrix as a (M,N) matrix.
"""
dim, deg = self.dim, self.deg
#factors = self.factors
x = np.atleast_2d(x)
dim = x.shape[0]
if out is None:
s = self._designMatrixSize(dim, deg)
out = np.empty((s, x.shape[1]), dtype=x.dtype)
dims = [0] * (dim + 1)
out[0, :] = 1
cur = 1
for i in irange(deg):
old_dims = list(dims)
prev = cur
for j in irange(x.shape[0]):
dims[j] = cur
for k in irange(old_dims[j], prev):
np.multiply(out[k], x[j], out[cur])
cur += 1
#if factors is not None:
# factors = np.asarray(factors)
# if len(factors.shape) == 1:
# factors = factors[:,np.newaxis]
# out /= factors
return out
class LocalPolynomialKernel(object):
r"""
Perform a local-polynomial regression in N-D using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises,
for each position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - \mathcal{P}_q(X_i-x)\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial, :math:`\mathcal{P}_q(x)` is a
polynomial of order :math:`d` in :math:`x` and :math:`h` is the bandwidth
of the method.
The polynomial :math:`\mathcal{P}_q(x)` is of the form:
.. math::
\mathcal{F}_d(k) = \left\{ \n \in \mathbb{N}^d \middle|
\sum_{i=1}^d n_i = k \right\}
\mathcal{P}_q(x_1,\ldots,x_d) = \sum_{k=1}^q
\sum_{\n\in\mathcal{F}_d(k)} a_{k,\n}
\prod_{i=1}^d x_i^{n_i}
For example we have:
.. math::
\mathcal{P}_2(x,y) = a_{110} x + a_{101} y + a_{220} x^2 +
a_{211} xy + a_{202} y^2
:param ndarray xdata: Explaining variables (at most 2D array).
The shape should be (N,D) with D the dimension of the problem
and N the number of points. For 1D array, the shape can be (N,),
in which case it will be converted to (N,1) array.
:param ndarray ydata: Explained variables (should be 1D array). The shape
must be (N,).
:param int q: Order of the polynomial to fit. **Default:** 3
:param callable kernel: Kernel to use for the weights. Call is
``kernel(points)`` and should return an array of values the same size
as ``points``. If ``None``, the kernel will be ``normal_kernel(D)``.
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_bandwidth``
"""
def __init__(self, xdata, ydata, q=3, cov=scotts_bandwidth, kernel=None):
self.xdata = np.atleast_2d(xdata)
self.ydata = np.atleast_1d(ydata)
self.d, self.n = xdata.shape
self.q = q
if kernel is None:
kernel = normal_kernel(self.d)
self.kernel = kernel
self._bw = None
self._covariance = None
self.covariance = cov
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a DxD matrix.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = cov(self.xdata, self.ydata)
else:
_cov = np.atleast_2d(cov)
self._covariance = _cov
self._bw = np.real(sqrtm(_cov))
def evaluate(self, points, output=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray output: Pre-allocated array for the result
"""
xdata = self.xdata
ydata = self.ydata[:, np.newaxis] # make it a column vector
points = np.atleast_2d(points)
n = self.n
q = self.q
d = self.d
designMatrix = PolynomialDesignMatrix(d, q)
dm_size = designMatrix.size
Xx = np.empty((dm_size, n), dtype=xdata.dtype)
WxXx = np.empty(Xx.shape, dtype=xdata.dtype)
XWX = np.empty((dm_size, dm_size), dtype=xdata.dtype)
inv_bw = scipy.linalg.inv(self.bandwidth)
kernel = self.kernel
if output is None:
output = np.empty((points.shape[1],), dtype=float)
for i in irange(points.shape[1]):
dX = (xdata - points[:, i:i + 1])
Wx = kernel(np.dot(inv_bw, dX))
designMatrix(dX, out=Xx)
np.multiply(Wx, Xx, WxXx)
np.dot(Xx, WxXx.T, XWX)
Lx = solve(XWX, WxXx)[0]
output[i] = np.dot(Lx, ydata)
return output
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
|
jtcb/regress-plan
|
astar/pyqt_fit/kernel_smoothing.py
|
Python
|
gpl-2.0
| 25,276
|
[
"Gaussian"
] |
b84bf6287a1ca106b882f67d6a4849d49e3f59b77158980be9a753fc3034aec5
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networkprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of NetworkProfile Avi RESTful Object
description:
- This module is used to configure NetworkProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
name:
description:
- The name of the network profile.
required: true
profile:
description:
- Networkprofileunion settings for networkprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the network profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a network profile for an UDP application
avi_networkprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
name: System-UDP-Fast-Path
profile:
type: PROTOCOL_TYPE_UDP_FAST_PATH
udp_fast_path_profile:
per_pkt_loadbalance: false
session_idle_timeout: 10
snat: true
tenant_ref: admin
"""
RETURN = '''
obj:
description: NetworkProfile (api/networkprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
name=dict(type='str', required=True),
profile=dict(type='dict', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networkprofile',
set([]))
if __name__ == '__main__':
main()
|
noroutine/ansible
|
lib/ansible/modules/network/avi/avi_networkprofile.py
|
Python
|
gpl-3.0
| 3,894
|
[
"VisIt"
] |
baf20537e5001eac491476eceb88fbd875d53e4206ceafc1a9af4867efe71ac5
|
#!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
'''
Contains class for common routines for loading all CANON data
Mike McCann
MBARI 22 April 2012
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
# Insert Django App directory (parent of config) into python path
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../")))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
# django >=1.7
try:
import django
django.setup()
except AttributeError:
pass
import DAPloaders
import requests
import urllib
from SampleLoaders import SeabirdLoader, SubSamplesLoader, ParentSamplesLoader
from lrauv_support import MissionLoader
from LRAUV.make_load_scripts import lrauvs
from bs4 import BeautifulSoup
from loaders import LoadScript, FileNotFound, SIGMAT, SPICE, SPICINESS, ALTITUDE
from stoqs.models import InstantPoint
from django.db.models import Max
from datetime import datetime, timedelta
from argparse import Namespace
from lxml import etree
from nettow import NetTow
from planktonpump import PlanktonPump
from thredds_crawler.crawl import Crawl
from urllib.request import urlopen, HTTPError
import logging
import matplotlib as mpl
mpl.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
import numpy as np
import re
import webob
def getStrideText(stride):
'''
Format stride into a string to be appended to the Activity name, if stride==1 return empty string
'''
if stride == 1:
return ''
else:
return ' (stride=%d)' % stride
class CANONLoader(LoadScript):
'''
Common routines for loading all CANON data
'''
brownish = {'dorado': '8c510a',
'tethys': 'bf812d',
'daphne': 'dfc27d',
'fulmar': 'f6e8c3',
'waveglider': 'c7eae5',
'nps_g29': '80cdc1',
'l_662': '35978f',
'l_662a': '38978f',
'm1': '35f78f',
'm2': '35f760',
'martin': '01665e',
'flyer': '11665e',
'espdrift': '21665e',
}
colors = {
'other': 'ffeda0',
'fulmar': 'fd8d3c',
'waveglider': 'fc4e2a',
'nps_g29': 'e31a1c',
'l_662': 'bd0026',
'l_662a': 'bd008f',
'nps29': '0b9131',
'nps34': '36d40f',
'nps34a': '36d40f',
'sg539': '5f9131',
'sg621': '507131',
'm1': 'bd2026',
'm2': 'bd4040',
'oa': '0f9cd4',
'oa2': '2d2426',
'hehape': 'bd2026',
'rusalka': 'bd4026',
'carmen': 'bd8026',
'martin': '800026',
'flyer': '801026',
'carson': '730a46',
'espdrift': '802026',
'espmack': '804026',
'espbruce': '808026',
'Stella201': '26f080',
'Stella202': 'F02696',
'Stella203': 'F08026',
'Stella204': 'AAAA26',
'stella203': 'F08026',
'stella204': 'AAAA26',
'Stella205': '2696f0',
'nemesis': 'FFF026',
'ucsc294': 'FFBA26',
'slocum_294': 'FFBA26',
'slocum_nemesis':'FFF026',
'ucsc260': 'FF8426',
'slocum_260': 'FF8426',
'wg_oa': '0f9cd4',
'wg_tex': '9626ff',
'wg_Tiny': '960000',
'wg_Sparky': 'FCDD00',
'wg_272': '98FF26',
'wg_Hansen': '9AD484',
'deimos': '33D4FF',
'saildrone': 'ff0c0c', # CSS button color on https://www.saildrone.com/
}
# Distribute AUV colors along a yellow to brown palette, auv_names imported from LRAUV/make_load_scripts.py
YlOrBr = plt.cm.YlOrBr
# Have dummy1 take up the first blackish color
auv_names = ['dummy1', 'dorado'] + list(lrauvs)
for auv_name, c in zip(auv_names, YlOrBr(np.linspace(0, YlOrBr.N, len(auv_names), dtype=int))):
colors[auv_name] = rgb2hex(c)[1:]
# Colors for roms_* "platforms"
roms_platforms = ('roms_spray', 'roms_sg621')
num_roms = len(roms_platforms)
oranges = plt.cm.Oranges
for b, c in zip(roms_platforms, oranges(np.arange(0, oranges.N, oranges.N/num_roms))):
colors[b] = rgb2hex(c)[1:]
def loadDorado(self, startdate=None, enddate=None,
parameters=[ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume', 'rhodamine', 'par',
'bbp470', 'bbp676', 'fl676_uncorr',
'sepCountList', 'mepCountList', 'roll', 'pitch', 'yaw', ], stride=None,
file_patterns=('.*_decim.nc$'), build_attrs=False, plankton_proxies=False):
'''
Support legacy use of loadDorado() and permit wider use by specifying startdate and endate
'''
pname = 'dorado'
psl = ParentSamplesLoader('', '', dbAlias=self.dbAlias)
if build_attrs:
self.logger.info(f'Building load parameter attributes from crawling TDS')
self.build_dorado_attrs(pname, startdate, enddate, parameters, file_patterns)
else:
self.logger.info(f'Using load {pname} attributes set in load script')
parameters = getattr(self, f'{pname}_parms')
stride = stride or self.stride
if hasattr(self, 'dorado_base'):
urls = [os.path.join(self.dorado_base, f) for f in self.dorado_files]
else:
urls = self.dorado_urls
for url in urls:
dfile = url.split('/')[-1]
aname = dfile + getStrideText(stride)
try:
mps_loaded = DAPloaders.runDoradoLoader(url, self.campaignName, self.campaignDescription, aname,
pname, self.colors[pname], 'auv', 'AUV mission',
self.dorado_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain,
plotTimeSeriesDepth=0.0, plankton_proxies=plankton_proxies)
if mps_loaded:
psl.load_gulps(aname, dfile, self.dbAlias)
except DAPloaders.DuplicateData as e:
self.logger.warn(str(e))
self.logger.info(f"Skipping load of {url}")
self.addPlatformResources('https://stoqs.mbari.org/x3d/dorado/simpleDorado389.x3d', pname,
scalefactor=2)
def load_i2MAP(self, startdate=None, enddate=None,
parameters=[ 'seabird25p_temperature', 'seabird25p_salinity',
'navigation_roll', 'navigation_pitch', 'navigation_yaw' ],
stride=None,
file_patterns=('.*_1S.nc$'), build_attrs=False, plankton_proxies=False):
'''
With i2map_*_1S.nc files in the AUVCTD/surveys directories we can use the Dorado loading code
'''
pname = 'i2map'
psl = ParentSamplesLoader('', '', dbAlias=self.dbAlias)
if build_attrs:
self.logger.info(f'Building load parameter attributes for {pname} by crawling TDS')
self.build_dorado_attrs('dorado', startdate, enddate, parameters, file_patterns)
else:
self.logger.info(f'Using load {pname} attributes set in load script')
parameters = getattr(self, f'{pname}_parms')
stride = stride or self.stride
if hasattr(self, 'dorado_base'):
urls = [os.path.join(self.dorado_base, f) for f in self.dorado_files]
else:
urls = self.dorado_urls
for url in urls:
dfile = url.split('/')[-1]
aname = dfile + getStrideText(stride)
try:
mps_loaded = DAPloaders.runDoradoLoader(url, self.campaignName, self.campaignDescription, aname,
pname, self.colors['dorado'], 'auv', 'i2MAP mission',
self.dorado_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain,
plotTimeSeriesDepth=0.0, plankton_proxies=plankton_proxies)
if mps_loaded:
psl.load_gulps(aname, dfile, self.dbAlias)
except DAPloaders.DuplicateData as e:
self.logger.warn(str(e))
self.logger.info(f"Skipping load of {url}")
self.addPlatformResources('https://stoqs.mbari.org/x3d/dorado/simpleDorado389.x3d', pname,
scalefactor=2)
def _execute_load(self, pname, parameters, stride, critSimpleDepthTime):
psl = ParentSamplesLoader('', '', dbAlias=self.dbAlias)
lrauv_ml = MissionLoader('', '', dbAlias=self.dbAlias)
stride = stride or self.stride
files = getattr(self, f'{pname}_files')
base = getattr(self, f'{pname}_base')
for (aname, f) in zip([ a + getStrideText(stride) for a in files], files):
url = os.path.join(base, f)
# shorten the activity names
if 'slate.nc' in aname or 'shore' in aname:
aname = f"{pname}_{'_'.join(aname.split('/')[-2:])}"
else:
aname = f"{pname}_{aname.rsplit('/', 1)[-1]}"
if hasattr(self, f'{pname}_aux_coords'):
aux_coords = getattr(self, f'{pname}_aux_coords')
else:
setattr(self, f'{pname}s_aux_coords', None)
aux_coords = None
try:
# Early LRAUV data had time coord of 'Time', override with auxCoords setting from load script
DAPloaders.runLrauvLoader(url, self.campaignName, self.campaignDescription, aname,
pname, self.colors[pname], 'auv', 'LRAUV log',
parameters, self.dbAlias, stride,
grdTerrain=self.grdTerrain, command_line_args=self.args,
plotTimeSeriesDepth=0, auxCoords=aux_coords,
critSimpleDepthTime=critSimpleDepthTime)
psl.load_lrauv_samples(pname, aname, url, self.dbAlias)
lrauv_ml.load_missions(pname, aname, url, self.dbAlias)
except DAPloaders.NoValidData:
self.logger.info("No valid data in %s" % url)
except (webob.exc.HTTPError, UnboundLocalError) as e:
self.logger.warn(f"{e}")
except Exception as e:
if 'shore_i.nc' in url:
self.logger.warn(f"{e}")
self.logger.info(f"Being tolerant of shore_i.nc files and ignoring this warning")
else:
raise
self.addPlatformResources(f'https://stoqs.mbari.org/x3d/lrauv/lrauv_{pname}.x3d', pname,
scalefactor=2)
def loadLRAUV(self, pname, startdate=None, enddate=None,
parameters=['temperature', 'salinity', 'chlorophyll', 'nitrate', 'oxygen','bbp470', 'bbp650','PAR',
'yaw', 'pitch', 'roll', 'control_inputs_rudder_angle', 'control_inputs_mass_position',
'control_inputs_buoyancy_position', 'control_inputs_propeller_rotation_rate',
'health_platform_battery_charge', 'health_platform_average_voltage',
'health_platform_average_current','fix_latitude', 'fix_longitude',
'fix_residual_percent_distance_traveled_DeadReckonUsingSpeedCalculator',
'pose_longitude_DeadReckonUsingSpeedCalculator',
'pose_latitude_DeadReckonUsingSpeedCalculator',
'pose_depth_DeadReckonUsingSpeedCalculator',
'fix_residual_percent_distance_traveled_DeadReckonUsingMultipleVelocitySources',
'pose_longitude_DeadReckonUsingMultipleVelocitySources',
'pose_latitude_DeadReckonUsingMultipleVelocitySources',
'pose_depth_DeadReckonUsingMultipleVelocitySources',
'chromophoric_dissolved_organic_matter',
'BackscatteringCoeff700nm',
'VolumeScatCoeff117deg700nm',
'petroleum_hydrocarbons',
'mass_concentration_of_oxygen_in_sea_water', 'chl',
'bin_mean_sea_water_salinity', 'bin_median_sea_water_salinity',
'bin_mean_sea_water_temperature', 'bin_median_sea_water_temperature',
'bin_mean_mass_concentration_of_chlorophyll_in_sea_water', 'bin_median_mass_concentration_of_chlorophyll_in_sea_water',
'mass_concentration_of_chlorophyll_in_sea_water',
'bin_mean_mass_concentration_of_petroleum_hydrocarbons_in_sea_water',
'bin_median_mass_concentration_of_petroleum_hydrocarbons_in_sea_water',
'concentration_of_colored_dissolved_organic_matter_in_sea_water',
'bin_mean_concentration_of_colored_dissolved_organic_matter_in_sea_water',
'bin_median_concentration_of_colored_dissolved_organic_matter_in_sea_water',
],
stride=None, file_patterns=('.*2S_scieng.nc$'), build_attrs=True,
dlist_str=None, err_on_missing_file=False, critSimpleDepthTime=10,
sbd_logs=False, cell_logs=False):
if sbd_logs:
dir_string = 'sbdlogs'
file_patterns=('.*shore_i.nc$')
elif cell_logs:
dir_string = "TODO: Will be 'celllogs' when implemented"
else:
dir_string = 'missionlogs'
if build_attrs:
self.logger.info(f'Building load parameter attributes crawling LRAUV {dir_string} dirs for {pname}')
for mission_year in range(startdate.year, enddate.year + 1):
self.build_lrauv_attrs(mission_year, pname, startdate, enddate, parameters,
file_patterns, dlist_str, err_on_missing_file, sbd_logs, cell_logs)
self._execute_load(pname, parameters, stride, critSimpleDepthTime)
else:
self.logger.info(f'Using load {pname} attributes set in load script')
parameters = getattr(self, f'{pname}_parms')
self._execute_load(pname, parameters, stride, critSimpleDepthTime)
def loadMartin(self, stride=None):
'''
Martin specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.martin_files], self.martin_files):
url = self.martin_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'Martin', self.colors['martin'], 'ship', 'cruise',
self.martin_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadJMuctd(self, stride=None):
'''
Martin specific underway load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.JMuctd_files], self.JMuctd_files):
url = self.JMuctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'John_Martin_UCTD', self.colors['martin'], 'ship', 'cruise',
self.JMuctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadJMpctd(self, stride=None, platformName='John_Martin_PCTD', activitytypeName='John Martin Profile CTD Data'):
'''
Martin specific underway load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.JMpctd_files], self.JMpctd_files):
url = self.JMpctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['martin'], 'ship', activitytypeName,
self.JMpctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
# load all the bottles
sl = SeabirdLoader(aName[:5], platformName, dbAlias=self.dbAlias, campaignName=self.campaignName,
platformColor=self.colors['martin'], platformTypeName='ship', dodsBase=self.JMpctd_base)
if self.args.verbose:
sl.logger.setLevel(logging.DEBUG)
sl.tdsBase= self.tdsBase
sl.process_btl_files(self.JMpctd_files)
def loadFulmar(self, stride=None):
'''
Fulmar specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.fulmar_files], self.fulmar_files):
url = self.fulmar_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'fulmar', self.colors['fulmar'], 'ship', 'cruise',
self.fulmar_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadNps_g29(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.nps_g29_files], self.nps_g29_files):
url = self.nps_g29_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'nps_g29', self.colors['nps_g29'], 'glider', 'Glider Mission',
self.nps_g29_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain,
command_line_args=self.args)
def loadL_662(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.l_662_files], self.l_662_files):
url = self.l_662_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'SPRAY_L66_Glider', self.colors['l_662'], 'glider', 'Glider Mission',
self.l_662_parms, self.dbAlias, stride, self.l_662_startDatetime,
self.l_662_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
def loadL_662a(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.l_662a_files], self.l_662a_files):
url = self.l_662a_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'SPRAY_L66a_Glider', self.colors['l_662a'], 'glider', 'Glider Mission',
self.l_662a_parms, self.dbAlias, stride, self.l_662a_startDatetime,
self.l_662a_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
def load_NPS29(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.nps29_files], self.nps29_files):
url = self.nps29_base + f
try:
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'NPS_Glider_29', self.colors['nps29'], 'glider', 'Glider Mission',
self.nps29_parms, self.dbAlias, stride, self.nps29_startDatetime,
self.nps29_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
except (DAPloaders.OpendapError, DAPloaders.NoValidData, webob.exc.HTTPError) as e:
self.logger.warn(str(e))
def load_SG539(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.sg539_files], self.sg539_files):
url = self.sg539_base + f
try:
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'SG_Glider_539', self.colors['sg539'], 'glider', 'Glider Mission',
self.sg539_parms, self.dbAlias, stride, self.sg539_startDatetime,
self.sg539_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
except (DAPloaders.OpendapError, DAPloaders.NoValidData) as e:
self.logger.warn(str(e))
def load_SG621(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.sg621_files], self.sg621_files):
url = self.sg621_base + f
try:
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'SG_Glider_621', self.colors['sg621'], 'glider', 'Glider Mission',
self.sg621_parms, self.dbAlias, stride, self.sg621_startDatetime,
self.sg621_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
except (DAPloaders.OpendapError, DAPloaders.NoValidData) as e:
self.logger.warn(str(e))
def load_NPS34(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.nps34_files], self.nps34_files):
url = self.nps34_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'NPS_Glider_34', self.colors['nps34'], 'glider', 'Glider Mission',
self.nps34_parms, self.dbAlias, stride, self.nps34_startDatetime,
self.nps34_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
def load_NPS34a(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.nps34a_files], self.nps34a_files):
url = self.nps34a_base + f
try:
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'NPS_Glider_34', self.colors['nps34a'], 'glider', 'Glider Mission',
self.nps34a_parms, self.dbAlias, stride, self.nps34a_startDatetime,
self.nps34a_endDatetime, grdTerrain=self.grdTerrain,
command_line_args=self.args)
except (webob.exc.HTTPError, DAPloaders.NoValidData) as e:
self.logger.warn(str(e))
self.logger.warn(f'{e}')
def load_glider_ctd(self, stride=None):
'''
Glider load functions. Requires apriori knowledge of glider file names so we can extract platform and color name
To be used with gliders that follow the same naming convention, i.e. nemesis_ctd.nc, ucsc260_ctd.nc
and that load the exact same parameters, i.e. TEMP, PSAL or TEMP, PSAL, FLU2 or TEMP, FLU2, OPBS etc
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.glider_ctd_files], self.glider_ctd_files):
url = self.glider_ctd_base + f
gplatform=aName.split('_')[0].upper() + '_Glider'
gname=aName.split('_')[0].lower()
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
gplatform, self.colors[gname], 'glider', 'Glider Mission',
self.glider_ctd_parms, self.dbAlias, stride, self.glider_ctd_startDatetime,
self.glider_ctd_endDatetime, grdTerrain=self.grdTerrain)
def load_glider_met(self, stride=None):
'''
Glider load functions. Requires apriori knowledge of glider file names so we can extract platform and color name
To be used with gliders that follow the same naming convention, i.e. nemesis_met.nc, ucsc260_met.nc
and that load the exact same parameters, i.e. meanu,meanv or windspeed, winddirection etc.
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.glider_met_files], self.glider_met_files):
url = self.glider_met_base + f
gplatform=aName.split('_')[0].upper() + '_Glider'
gname=aName.split('_')[0].lower()
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
gplatform, self.colors[gname], 'glider', 'Glider Mission',
self.glider_met_parms, self.dbAlias, stride, self.glider_met_startDatetime,
self.glider_met_endDatetime, grdTerrain=self.grdTerrain)
def load_slocum_260(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.slocum_260_files], self.slocum_260_files):
url = self.slocum_260_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'Slocum_260', self.colors['slocum_260'], 'glider', 'Glider Mission',
self.slocum_260_parms, self.dbAlias, stride, self.slocum_260_startDatetime,
self.slocum_260_endDatetime, grdTerrain=self.grdTerrain)
def load_slocum_294(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.slocum_294_files], self.slocum_294_files):
url = self.slocum_294_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'Slocum_294', self.colors['slocum_294'], 'glider', 'Glider Mission',
self.slocum_294_parms, self.dbAlias, stride,
self.slocum_294_startDatetime, self.slocum_294_endDatetime,
grdTerrain=self.grdTerrain)
def load_slocum_nemesis(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.slocum_nemesis_files], self.slocum_nemesis_files):
url = self.slocum_nemesis_base + f
try:
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'Slocum_nemesis', self.colors['slocum_nemesis'], 'glider', 'Glider Mission',
self.slocum_nemesis_parms, self.dbAlias, stride,
self.slocum_nemesis_startDatetime, self.slocum_nemesis_endDatetime,
grdTerrain=self.grdTerrain, plotTimeSeriesDepth=0)
except DAPloaders.NoValidData as e:
self.logger.warn(f'No valid data in {url}')
except DAPloaders.DuplicateData as e:
self.logger.warn(f'Data from {url} already in database, skipping')
def load_wg_oa(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_oa_files], self.wg_oa_files):
url = self.wg_oa_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_OA_Glider', self.colors['wg_oa'], 'waveglider', 'Glider Mission',
self.wg_oa_parms, self.dbAlias, stride, self.wg_oa_startDatetime,
self.wg_oa_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_oa_pco2(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_oa_pco2_files], self.wg_oa_pco2_files):
url = self.wg_oa_pco2_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_OA_Glider', self.colors['wg_oa'], 'waveglider', 'Glider Mission',
self.wg_oa_pco2_parms, self.dbAlias, stride,
self.wg_oa_pco2_startDatetime, self.wg_oa_pco2_endDatetime,
grdTerrain=self.grdTerrain)
def load_wg_oa_ctd(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_oa_ctd_files], self.wg_oa_ctd_files):
url = self.wg_oa_ctd_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_OA_Glider', self.colors['wg_oa'], 'waveglider', 'Glider Mission',
self.wg_oa_ctd_parms, self.dbAlias, stride, self.wg_oa_ctd_startDatetime,
self.wg_oa_ctd_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_tex_ctd(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_tex_ctd_files], self.wg_tex_ctd_files):
url = self.wg_tex_ctd_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Tex_Glider', self.colors['wg_tex'], 'waveglider', 'Glider Mission',
self.wg_tex_ctd_parms, self.dbAlias, stride, self.wg_tex_ctd_startDatetime,
self.wg_tex_ctd_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_oa_met(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_oa_met_files], self.wg_oa_met_files):
url = self.wg_oa_met_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_OA_Glider', self.colors['wg_oa'], 'waveglider', 'Glider Mission',
self.wg_oa_met_parms, self.dbAlias, stride, self.wg_oa_met_startDatetime,
self.wg_oa_met_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_tex_met(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_tex_met_files], self.wg_tex_met_files):
url = self.wg_tex_met_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Tex_Glider', self.colors['wg_tex'], 'waveglider', 'Glider Mission',
self.wg_tex_met_parms, self.dbAlias, stride, self.wg_tex_met_startDatetime,
self.wg_tex_met_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_tex(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_tex_files], self.wg_tex_files):
url = self.wg_tex_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Tex_Glider', self.colors['wg_tex'], 'waveglider', 'Glider Mission',
self.wg_tex_parms, self.dbAlias, stride, self.wg_tex_startDatetime,
self.wg_tex_endDatetime, grdTerrain=self.grdTerrain)
def load_wg_Tiny(self, stride=None):
'''
Glider specific load functions, sets plotTimeSeriesDepth=0 to get Parameter tab in UI
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_Tiny_files], self.wg_Tiny_files):
url = self.wg_Tiny_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Tiny_Glider', self.colors['wg_Tiny'], 'waveglider', 'Glider Mission',
self.wg_Tiny_parms, self.dbAlias, stride, self.wg_Tiny_startDatetime,
self.wg_Tiny_endDatetime, grdTerrain=self.grdTerrain, plotTimeSeriesDepth=0,
command_line_args=self.args)
def load_wg_Sparky(self, stride=None):
'''
Glider specific load functions, sets plotTimeSeriesDepth=0 to get Parameter tab in UI
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_Sparky_files], self.wg_Sparky_files):
url = self.wg_Sparky_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Sparky_Glider', self.colors['wg_Sparky'], 'waveglider', 'Glider Mission',
self.wg_Sparky_parms, self.dbAlias, stride, self.wg_Sparky_startDatetime,
self.wg_Sparky_endDatetime, grdTerrain=self.grdTerrain, plotTimeSeriesDepth=0)
def load_wg_272(self, stride=None):
'''
Glider specific load functions, sets plotTimeSeriesDepth=0 to get Parameter tab in UI
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_272_files], self.wg_272_files):
url = self.wg_272_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_272_Glider', self.colors['wg_272'], 'waveglider', 'Glider Mission',
self.wg_272_parms, self.dbAlias, stride, self.wg_272_startDatetime,
self.wg_272_endDatetime, grdTerrain=self.grdTerrain, plotTimeSeriesDepth=0)
def load_wg_Hansen(self, stride=None):
'''
Glider specific load functions, sets plotTimeSeriesDepth=0 to get Parameter tab in UI
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_Hansen_files], self.wg_Hansen_files):
url = self.wg_Hansen_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_Hansen_Glider', self.colors['wg_Hansen'], 'waveglider', 'Glider Mission',
self.wg_Hansen_parms, self.dbAlias, stride, self.wg_Hansen_startDatetime,
self.wg_Hansen_endDatetime, grdTerrain=self.grdTerrain, plotTimeSeriesDepth=0,
command_line_args=self.args)
def load_wg_oa(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wg_oa_files], self.wg_oa_files):
url = self.wg_oa_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'wg_OA_Glider', self.colors['wg_oa'], 'waveglider', 'Glider Mission',
self.wg_oa_parms, self.dbAlias, stride, self.wg_oa_startDatetime,
self.wg_oa_endDatetime, grdTerrain=self.grdTerrain)
def load_oa1(self, stride=None):
'''
Mooring OA1 specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.oa1_files], self.oa1_files):
url = os.path.join(self.oa1_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.oa1_parms, self.dbAlias, stride, self.oa1_startDatetime, self.oa1_endDatetime,
command_line_args=self.args)
def load_oa2(self, stride=None):
'''
Mooring OA2 specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.oa2_files], self.oa2_files):
url = os.path.join(self.oa2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.oa2_parms, self.dbAlias, stride, self.oa2_startDatetime, self.oa2_endDatetime,
command_line_args=self.args)
def loadOA1pco2(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1pco2_files], self.OA1pco2_files):
url = os.path.join(self.OA1pco2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1pco2_parms, self.dbAlias, stride, self.OA1pco2_startDatetime, self.OA1pco2_endDatetime)
def loadOA1fl(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1fl_files], self.OA1fl_files):
url = os.path.join(self.OA1fl_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1fl_parms, self.dbAlias, stride, self.OA1fl_startDatetime, self.OA1fl_endDatetime)
def loadOA1o2(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1o2_files], self.OA1o2_files):
url = os.path.join(self.OA1o2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1o2_parms, self.dbAlias, stride, self.OA1o2_startDatetime, self.OA1o2_endDatetime)
def loadOA1ctd(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1ctd_files], self.OA1ctd_files):
url = os.path.join(self.OA1ctd_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1ctd_parms, self.dbAlias, stride, self.OA1ctd_startDatetime, self.OA1ctd_endDatetime)
def loadOA1pH(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1pH_files], self.OA1pH_files):
url = os.path.join(self.OA1pH_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1pH_parms, self.dbAlias, stride, self.OA1pH_startDatetime, self.OA1pH_endDatetime)
def loadOA1met(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA1met_files], self.OA1met_files):
url = os.path.join(self.OA1met_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA1_Mooring', self.colors['oa'], 'mooring', 'Mooring Deployment',
self.OA1met_parms, self.dbAlias, stride, self.OA1met_startDatetime, self.OA1met_endDatetime)
def loadOA2pco2(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2pco2_files], self.OA2pco2_files):
url = os.path.join(self.OA2pco2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2pco2_parms, self.dbAlias, stride, self.OA2pco2_startDatetime, self.OA2pco2_endDatetime)
def loadOA2fl(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2fl_files], self.OA2fl_files):
url = os.path.join(self.OA2fl_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2fl_parms, self.dbAlias, stride, self.OA2fl_startDatetime, self.OA2fl_endDatetime)
def loadOA2o2(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2o2_files], self.OA2o2_files):
url = os.path.join(self.OA2o2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2o2_parms, self.dbAlias, stride, self.OA2o2_startDatetime, self.OA2o2_endDatetime)
def loadOA2ctd(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2ctd_files], self.OA2ctd_files):
url = os.path.join(self.OA2ctd_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2ctd_parms, self.dbAlias, stride, self.OA2ctd_startDatetime, self.OA2ctd_endDatetime)
def loadOA2pH(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2pH_files], self.OA2pH_files):
url = os.path.join(self.OA2pH_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2pH_parms, self.dbAlias, stride, self.OA2pH_startDatetime, self.OA2pH_endDatetime)
def loadOA2met(self, stride=None):
'''
Mooring OA specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.OA2met_files], self.OA2met_files):
url = os.path.join(self.OA2met_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'OA2_Mooring', self.colors['oa2'], 'mooring', 'Mooring Deployment',
self.OA2met_parms, self.dbAlias, stride, self.OA2met_startDatetime, self.OA2met_endDatetime)
def loadBruceMoor(self, stride=None):
'''
Mooring Bruce specific load functions
'''
stride = stride or self.stride
pName = 'ESP_Bruce_Mooring'
for (aName, f) in zip([ a + getStrideText(stride) for a in self.bruce_moor_files], self.bruce_moor_files):
url = os.path.join(self.bruce_moor_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
pName, self.colors['espbruce'], 'mooring',
'Mooring Deployment', self.bruce_moor_parms, self.dbAlias, stride,
self.bruce_moor_startDatetime, self.bruce_moor_endDatetime)
# Let browser code use {{STATIC_URL}} to fill in the /stoqs/static path
self.addPlatformResources('x3d/ESPMooring/esp_base_scene.x3d', pName)
def loadMackMoor(self, stride=None):
'''
Mooring Mack specific load functions
'''
stride = stride or self.stride
pName = 'ESP_Mack_Mooring'
for (aName, f) in zip([ a + getStrideText(stride) for a in self.mack_moor_files], self.mack_moor_files):
url = os.path.join(self.mack_moor_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
pName, self.colors['espmack'], 'mooring', 'Mooring Deployment',
self.mack_moor_parms, self.dbAlias, stride,
self.mack_moor_startDatetime, self.mack_moor_endDatetime)
# Let browser code use {{STATIC_URL}} to fill in the /stoqs/static path
self.addPlatformResources('x3d/ESPMooring/esp_base_scene.x3d', pName)
def loadM1(self, stride=None):
'''
Mooring M1 specific load functions
'''
platformName = 'M1_Mooring'
stride = stride or self.stride
start_datetime = getattr(self, 'm1_startDatetime', None)
end_datetime = getattr(self, 'm1_endDatetime', None)
for (aName, f) in zip([ a + getStrideText(stride) for a in self.m1_files], self.m1_files):
url = os.path.join(self.m1_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['m1'], 'mooring', 'Mooring Deployment',
self.m1_parms, self.dbAlias, stride, start_datetime,
end_datetime, command_line_args=self.args)
# For timeseriesProfile data we need to pass the nominaldepth of the plaform
# so that the model is put at the correct depth in the Spatial -> 3D view.
try:
self.addPlatformResources('https://stoqs.mbari.org/x3d/m1_assembly/m1_assembly_scene.x3d',
platformName, nominaldepth=self.m1_nominaldepth)
except AttributeError:
self.addPlatformResources('https://stoqs.mbari.org/x3d/m1_assembly/m1_assembly_scene.x3d',
platformName)
def loadM2(self, stride=None):
'''
Mooring M2 specific load functions
'''
platformName = 'M2_Mooring'
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.m2_files], self.m2_files):
url = os.path.join(self.m2_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['m2'], 'mooring', 'Mooring Deployment',
self.m2_parms, self.dbAlias, stride, self.m2_startDatetime,
self.m2_endDatetime, command_line_args=self.args)
# For timeseriesProfile data we need to pass the nominaldepth of the plaform
# so that the model is put at the correct depth in the Spatial -> 3D view.
try:
self.addPlatformResources('https://stoqs.mbari.org/x3d/m1_assembly/m1_assembly_scene.x3d',
platformName, nominaldepth=self.m2_nominaldepth)
except AttributeError:
self.addPlatformResources('https://stoqs.mbari.org/x3d/m1_assembly/m1_assembly_scene.x3d',
platformName)
def loadM1ts(self, stride=None):
'''
Mooring M1ts specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.m1ts_files], self.m1ts_files):
url = self.m1ts_base + f
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'M1_Mooring', self.colors['m1'], 'mooring', 'Mooring Deployment',
self.m1ts_parms, self.dbAlias, stride,
self.m1ts_startDatetime, self.m1ts_endDatetime)
def loadM1met(self, stride=None):
'''
Mooring M1met specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.m1met_files], self.m1met_files):
url = self.m1met_base + f
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
'M1_Mooring', self.colors['m1'], 'mooring', 'Mooring Deployment',
self.m1met_parms, self.dbAlias, stride,
self.m1met_startDatetime, self.m1met_endDatetime)
def loadDEIMOS(self, startdate=None, enddate=None, stride=None):
'''
Mooring DEIMOS EK60 specific load functions
'''
platformName = 'DEIMOS'
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.deimos_files], self.deimos_files):
url = os.path.join(self.deimos_base, f)
DAPloaders.runMooringLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['deimos'], 'mooring', 'Mooring Deployment',
self.deimos_parms, self.dbAlias, stride, startdate,
enddate, command_line_args=self.args)
def loadHeHaPe(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.hehape_files], self.hehape_files):
url = self.hehape_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'hehape', self.colors['hehape'], 'glider', 'Glider Mission',
self.hehape_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadRusalka(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.rusalka_files], self.rusalka_files):
url = self.rusalka_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'rusalka', self.colors['rusalka'], 'glider', 'Glider Mission',
self.rusalka_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadCarmen(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.carmen_files], self.carmen_files):
url = self.carmen_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'carmen', self.colors['carmen'], 'glider', 'Glider Mission',
self.carmen_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadWaveglider(self, stride=None):
'''
Glider specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.waveglider_files], self.waveglider_files):
url = self.waveglider_base + f
DAPloaders.runGliderLoader(url, self.campaignName, self.campaignDescription, aName,
'waveglider', self.colors['waveglider'], 'glider', 'Glider Mission',
self.waveglider_parms, self.dbAlias, stride, self.waveglider_startDatetime,
self.waveglider_endDatetime, grdTerrain=self.grdTerrain)
def loadStella(self, stride=None):
'''
Stella drift specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.stella_files], self.stella_files):
url = self.stella_base + f
dname='Stella' + aName[6:9]
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
dname, self.colors[dname], 'drifter', 'Stella drifter Mission',
self.stella_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadESPdrift(self, stride=None):
'''
ESPdrift specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.espdrift_files], self.espdrift_files):
url = self.espdrift_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'espdrift', self.colors['espdrift'], 'drifter', 'ESP drift Mission',
self.espdrift_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadESPmack(self, stride=None):
'''
ESPmack specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.espmack_files], self.espmack_files):
url = self.espmack_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'ESP_Mack_Drifter', self.colors['espmack'], 'espmack', 'ESP mack Mission',
self.espmack_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadESPbruce(self, stride=None):
'''
ESPbruce specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.espbruce_files], self.espbruce_files):
url = self.espbruce_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
'espbruce', self.colors['espbruce'], 'espbruce', 'ESP bruce Mission',
self.espbruce_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadWFuctd(self, stride=None, platformName='WesternFlyer_UCTD', activitytypeName='Western Flyer Underway CTD Data'):
'''
WF uctd specific load functions. Override defaults for @platformName and activitytypeName if it's desired
to consider uctd and pctd coming from the same platform. You may want to do this to use the data
visualization capabilities in STOQS.
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.wfuctd_files], self.wfuctd_files):
url = self.wfuctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['flyer'], 'ship', activitytypeName,
self.wfuctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
self.addPlatformResources('https://stoqs.mbari.org/x3d/flyer/flyer.x3d', platformName)
def loadWFpctd(self, stride=None, platformName='WesternFlyer_PCTD', activitytypeName='Western Flyer Profile CTD Data'):
'''
WF pctd specific load functions. Override defaults for @platformName and activitytypeName if it's desired
to consider uctd and pctd coming from the same platform. You may want to do this to use the data
visualization capabilities in STOQS.
'''
stride = stride or self.stride
for (aName, f) in zip([ a.split('.')[0] + getStrideText(stride) for a in self.wfpctd_files], self.wfpctd_files):
url = self.wfpctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['flyer'], 'ship', activitytypeName,
self.wfpctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
# Now load all the bottles
sl = SeabirdLoader('activity name', platformName, dbAlias=self.dbAlias, campaignName=self.campaignName,
platformColor=self.colors['flyer'], dodsBase=self.wfpctd_base)
if self.args.verbose:
sl.logger.setLevel(logging.DEBUG)
sl.tdsBase= self.tdsBase
sl.process_btl_files(self.wfpctd_files)
def loadRCuctd(self, stride=None, platformName='RachelCarson_UCTD', activitytypeName='Rachel Carson Underway CTD Data'):
'''
RC uctd specific load functions
'''
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in self.rcuctd_files], self.rcuctd_files):
url = self.rcuctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['carson'], 'ship', activitytypeName,
self.rcuctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
def loadRCpctd(self, stride=None, platformName='RachelCarson_PCTD', activitytypeName='Rachel Carson Profile CTD Data'):
'''
RC pctd specific load functions
'''
stride = stride or self.stride
#platformName = 'rc_pctd'
for (aName, f) in zip([ a.split('.')[0] + getStrideText(stride) for a in self.rcpctd_files], self.rcpctd_files):
url = self.rcpctd_base + f
DAPloaders.runTrajectoryLoader(url, self.campaignName, self.campaignDescription, aName,
platformName, self.colors['carson'], 'ship', activitytypeName,
self.rcpctd_parms, self.dbAlias, stride, grdTerrain=self.grdTerrain)
# load all the bottles
sl = SeabirdLoader(aName[:5], platformName, dbAlias=self.dbAlias, campaignName=self.campaignName,
platformColor=self.colors['carson'], platformTypeName='ship', dodsBase=self.rcpctd_base)
if self.args.verbose:
sl.logger.setLevel(logging.DEBUG)
sl.tdsBase= self.tdsBase
sl.process_btl_files(self.rcpctd_files)
# Dynamic method creation for any number of 'roms' platforms
@staticmethod
def make_load_roms_method(name):
def _generic_load_roms(self, stride=None):
# Generalize attribute value lookup
plt_name = '_'.join(name.split('_')[1:])
base = getattr(self, plt_name + '_base')
files = getattr(self, plt_name + '_files')
parms = getattr(self, plt_name + '_parms')
start_datetime = getattr(self, plt_name + '_start_datetime')
end_datetime = getattr(self, plt_name + '_end_datetime')
stride = stride or self.stride
for (aName, f) in zip([ a + getStrideText(stride) for a in files], files):
url = os.path.join(base, f)
try:
loader = DAPloaders.Trajectory_Loader(url = url,
campaignName = self.campaignName,
campaignDescription = self.campaignDescription,
dbAlias = self.dbAlias,
activityName = aName,
activitytypeName = 'Simulated Glider/AUV Deployment',
platformName = plt_name,
platformColor = self.colors[plt_name],
platformTypeName = 'simulated_trajectory',
stride = stride,
startDatetime = start_datetime,
endDatetime = end_datetime,
dataStartDatetime = None)
except DAPloaders.OpendapError:
self.logger.info("Cannot open %s" % url)
else:
loader.include_names = parms
loader.auxCoords = {}
loader.process_data()
return _generic_load_roms
def find_saildrone_urls(self, base, search_str, startdate, enddate):
'''Use Thredds Crawler to return a list of DAP urls. Initially written for LRAUV data, for
which we don't initially know the urls.
'''
urls = []
catalog_url = os.path.join(base, 'catalog.xml')
c = Crawl(catalog_url, select=[search_str])
d = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
for url in d:
file_dt = datetime.strptime(url.split('-')[-4], '%Y%m%dT%H%M%S')
if startdate < file_dt and file_dt < enddate:
urls.append(url)
self.logger.debug(f'* {url}')
else:
self.logger.debug(f'{url}')
if not urls:
raise FileNotFound('No urls matching "{search_str}" found in {catalog_url}')
return urls
def build_saildrone_attrs(self, platform_name, startdate, enddate, parameters, file_patterns):
'''Set loader attributes for saildrone data
'''
setattr(self, platform_name + '_parms' , parameters)
urls = []
for year in range(startdate.year, enddate.year+1):
base = f'http://odss.mbari.org/thredds/catalog/Other/routine/Platforms/Saildrone/1046/netcdf/'
dods_base = f'http://odss.mbari.org/thredds/dodsC/Other/routine/Platforms/Saildrone/1046/netcdf/'
try:
urls += self.find_saildrone_urls(base, file_patterns, startdate, enddate)
files = []
for url in sorted(urls):
files.append(url.split('/')[-1])
except FileNotFound as e:
self.logger.debug(f'{e}')
# Send signal that urls span years by not setting dorado_base so that dorado_urls is used instead
if startdate.year == enddate.year:
setattr(self, platform_name + '_base', dods_base)
else:
setattr(self, platform_name + '_urls', sorted(urls))
setattr(self, platform_name + '_files', files)
setattr(self, platform_name + '_startDatetime', startdate)
setattr(self, platform_name + '_endDatetime', enddate)
def loadSaildrone(self, startdate=None, enddate=None, parameters=['SOG_FILTERED_MEAN',
'COG_FILTERED_MEAN', 'HDG_FILTERED_MEAN', 'ROLL_FILTERED_MEAN',
'PITCH_FILTERED_MEAN', 'UWND_MEAN', 'VWND_MEAN', 'WWND_MEAN',
'TEMP_AIR_MEAN', 'RH_MEAN', 'BARO_PRES_MEAN', 'PAR_AIR_MEAN',
'WAVE_DOMINANT_PERIOD', 'WAVE_SIGNIFICANT_HEIGHT', 'TEMP_SBE37_MEAN',
'SAL_SBE37_MEAN', 'O2_CONC_SBE37_MEAN', 'O2_SAT_SBE37_MEAN',
'CHLOR_WETLABS_MEAN',],
stride=None, file_patterns=('.*montereybay_mbari_2019_001-sd1046.*nc$'), build_attrs=False):
'''First deployed for CANON May 2019 for DEIMOS campaigns
'''
platform_name = 'saildrone'
activity_type_name = 'Saildrone Deployment'
stride = stride or self.stride
# Save these here in case we want to add them
rbr_parms = ['TEMP_CTD_RBR_MEAN', 'SAL_RBR_MEAN', 'O2_CONC_RBR_MEAN', 'O2_SAT_RBR_MEAN',
'CHLOR_RBR_MEAN']
if build_attrs:
self.logger.info(f'Building load parameter attributes from crawling TDS')
self.build_saildrone_attrs(platform_name, startdate, enddate, parameters, file_patterns)
else:
self.logger.info(f'Using load {pname} attributes set in load script')
parameters = getattr(self, f'{platform_name}_parms')
for (aName, f) in zip([ a.split('.')[0] + getStrideText(stride) for a in self.saildrone_files], self.saildrone_files):
url = self.saildrone_base + f
try:
loader = DAPloaders.Trajectory_Loader(url = url,
campaignName = self.campaignName,
campaignDescription = self.campaignDescription,
dbAlias = self.dbAlias,
activityName = aName,
activitytypeName = activity_type_name,
platformName = platform_name,
platformColor = self.colors[platform_name],
platformTypeName = 'glider',
stride = stride,
startDatetime = startdate,
endDatetime = enddate,
dataStartDatetime = None)
except webob.exc.HTTPError as e:
self.logger.warn(f"Skipping over {url}")
loader.include_names = parameters
loader.auxCoords = {}
for parm in parameters:
loader.auxCoords[parm] = {'latitude': 'latitude', 'longitude': 'longitude', 'time': 'time', 'depth': 0.0}
loader.plotTimeSeriesDepth = dict.fromkeys(parameters + [ALTITUDE, SIGMAT, SPICE], 0.0)
try:
loader.process_data()
except (DAPloaders.OpendapError, IndexError) as e:
self.logger.warn(f"Skipping over {url} due to Execption: {e}")
def loadSubSamples(self):
'''
Load water sample analysis Sampled data values from spreadsheets (.csv files). Expects to have the subsample_csv_base and
subsample_csv_files set by the load script.
'''
ssl = SubSamplesLoader('', '', dbAlias=self.dbAlias)
if self.args.verbose:
ssl.logger.setLevel(logging.DEBUG)
for csvFile in [ os.path.join(self.subsample_csv_base, f) for f in self.subsample_csv_files ]:
ssl.logger.info("Processing subsamples from file %s", csvFile)
try:
ssl.process_subsample_file(csvFile, False)
except IOError as e:
ssl.logger.error(e)
def loadParentNetTowSamples(self):
'''
Load Parent NetTow Samples. This must be done after CTD cast data are loaded and before subsamples are loaded.
'''
nt = NetTow()
ns = Namespace()
# Produce parent samples file, e.g.:
# cd loaders/MolecularEcology/SIMZOct2013
# ../../nettow.py --database stoqs_simz_oct2013 --subsampleFile 2013_SIMZ_TowNets_STOQS.csv \
# --csvFile 2013_SIMZ_TowNet_ParentSamples.csv -v
ns.database = self.dbAlias
ns.loadFile = os.path.join(self.subsample_csv_base, self.parent_nettow_file)
ns.purpose = ''
ns.laboratory = ''
ns.researcher = ''
nt.args = ns
try:
nt.load_samples()
except IOError as e:
self.logger.error(e)
def loadParentPlanktonPumpSamples(self, duration=10):
'''
Load Parent PlanktonPump Samples. This must be done after CTD cast data are loaded and before subsamples are loaded.
duration is pumping time in minutes.
'''
pp = PlanktonPump()
ns = Namespace()
# Produce parent samples file, e.g.:
# cd loaders/MolecularEcology/SIMZOct2013
# ../../planktonpump.py --database stoqs_simz_oct2013 --subsampleFile SIMZ_2013_PPump_STOQS_tidy_v2.csv \
# --csvFile 2013_SIMZ_PlanktonPump_ParentSamples.csv -v
ns.database = self.dbAlias
ns.load_file = os.path.join(self.subsample_csv_base, self.parent_planktonpump_file)
ns.duration = duration
ns.purpose = ''
ns.laboratory = ''
ns.researcher = ''
pp.args = ns
try:
pp.load_samples()
except IOError as e:
self.logger.error(str(e))
def find_lrauv_urls(self, base, search_str, startdate, enddate, date_intersect=True):
'''Use Thredds Crawler to return a list of DAP urls. Initially written for LRAUV data, for
which we don't initially know the urls.
'''
INV_NS = "http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0"
url = os.path.join(base, 'catalog.xml')
self.logger.info(f"Crawling: {url}")
skips = Crawl.SKIPS + [".*Courier*", ".*Express*", ".*Normal*, '.*Priority*", ".*.cfg$" ]
u = urllib.parse.urlsplit(url)
name, ext = os.path.splitext(u.path)
if ext == ".html":
u = urllib.parse.urlsplit(url.replace(".html", ".xml"))
url = u.geturl()
urls = []
# Get an etree object
r = requests.get(url)
if r.status_code == 200:
tree = etree.XML(r.text.encode('utf-8'))
else:
self.logger.debug(f"status_code != 200, Skipping {url}")
return urls
# Crawl the catalogRefs:
for ref in tree.findall('.//{%s}catalogRef' % INV_NS):
# get the mission directory name and extract the start and ending dates
mission_dir_name = ref.attrib['{http://www.w3.org/1999/xlink}title']
if '_' in mission_dir_name:
dts = mission_dir_name.split('_')
dir_start = datetime.strptime(dts[0], '%Y%m%d')
dir_end = datetime.strptime(dts[1], '%Y%m%d')
if date_intersect:
# Grab the valid urls for all log files in a .dlist directory that intersect the Campaign dates
if ( (startdate <= dir_start and dir_start <= enddate) or (startdate <= dir_end and dir_end <= enddate) ):
self.logger.debug(f'{mission_dir_name}: Collecting all log files matching {search_str} in this directory')
catalog = ref.attrib['{http://www.w3.org/1999/xlink}href']
c = Crawl(os.path.join(base, catalog), select=[search_str], skip=skips)
d = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
for url in d:
self.logger.debug(f'{url}')
urls.append(url)
else:
# Grab the valid urls for .dlist directories encompasing the startdate and enddate for the Campaign
self.logger.debug(f'{mission_dir_name}: Looking for {search_str} files between {startdate} and {enddate}')
if dir_start >= startdate and dir_end <= enddate:
catalog = ref.attrib['{http://www.w3.org/1999/xlink}href']
c = Crawl(os.path.join(base, catalog), select=[search_str], skip=skips)
d = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
for url in d:
self.logger.debug(f'{url}')
urls.append(url)
else:
# Likely a realtime log - add to urls if only url date is between startdate and enddate
catalog = ref.attrib['{http://www.w3.org/1999/xlink}href']
c = Crawl(os.path.join(base, catalog), select=[search_str], skip=skips)
d = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
for url in d:
try:
dir_start = datetime.strptime(url.split('/')[11], '%Y%m%dT%H%M%S')
except ValueError as e:
self.logger.warn(f"{e} from url = {url}")
self.logger.warn(f"Likely due to a log file found in the parent dir. Ignoring.")
if (startdate <= dir_start and dir_start <= enddate):
self.logger.debug(f'{url}')
urls.append(url)
if not urls:
raise FileNotFound('No urls matching "{}" found in {}'.format(search_str, os.path.join(base, 'catalog.html')))
return urls
def _get_mission_url(self, nc_str, mission_dir, mission_dods):
soup = BeautifulSoup(urlopen(mission_dir).read(), 'lxml')
for link in soup.find_all('a'):
if nc_str in link.get('href'):
mission_url = os.path.join(mission_dods, link.get('href'))
self.logger.debug(f"Found mission {mission_url}")
return mission_url
def _scieng_file_state(self, log_url):
'''Check other contents the associated .log file to test whether there really should be a .nc file.
Return text indicating presence or reason why not. (Borrowed from lrauv-tools/handle-lrauv-logs/lrauv-data-file-audit.)
'''
not_creating_line = "ERROR .* Not creating"
no_start_and_end = "WARNING .* Can't get start and end date from .nc4"
with requests.get(log_url) as resp:
if resp.status_code != 200:
self.logger.error(f"Cannot read {log_url}, resp.status_code = {resp.status_code}")
return 'log_file_missing'
for line in (r.decode('utf-8') for r in resp.iter_lines()):
self.logger.debug(f"{line}")
if re.match(not_creating_line, line):
# Likely no variables available in .nc4 to produce the scieng.nc file
return 'missing_no_variables'
if re.match(no_start_and_end, line):
# Likely no time_time variable in the scieng.nc file
return 'missing_no_time_time'
return 'should_be_present'
def find_lrauv_urls_by_dlist_string(self, dlist_str, platform, startdate, enddate, mission_year, nc_str='_2S_scieng.nc'):
'''Crawl web accessible directories and search for missions that have dlist_str.
Find all .dlist files and scan contents of the .dlist that has `dlist_str`.
Return a list of those urls. This is called by build_lrauv_attrs() which needs
to do its work one year at a time. Add urls that fall within startdate and
enddate, but do this only for one mission_year at a time, set by build_lrauv_attrs().
'''
urls = []
file_base = f'http://dods.mbari.org/data/lrauv/{platform}/missionlogs/{mission_year}'
dods_base = f'http://dods.mbari.org/opendap/data/lrauv/{platform}/missionlogs/{mission_year}'
self.logger.info(f"Looking in {file_base} for .dlist files containing string '{dlist_str}'")
soup = BeautifulSoup(urlopen(file_base).read(), 'lxml')
for link in soup.find_all('a'):
if '.dlist' in link.get('href'):
dlist_dir = link.get('href').split('/')[-1].split('.')[0]
dlist_url = os.path.join(file_base, f"{dlist_dir}.dlist")
self.logger.debug(f"Cheking if {platform}/missionlogs/{startdate.year}/{dlist_dir}.dlist contains '{dlist_str}'")
with requests.get(dlist_url) as resp:
if resp.status_code != 200:
self.logger.error(f"Cannot read {dlist_url}, resp.status_code = {resp.status_code}")
return
if dlist_str in resp.text:
self.logger.debug(f"Found a .dlist containing {dlist_str}: {dlist_dir}")
self.logger.debug(f"Searching uncommented directores in {dlist_url}")
for line in (r.decode('utf-8') for r in resp.iter_lines()):
self.logger.debug(f"{line}")
if not line.startswith('#'):
mission_dir = os.path.join(file_base, dlist_dir, line)
mission_dods = os.path.join(dods_base, dlist_dir, line)
url = self._get_mission_url(nc_str, mission_dir, mission_dods)
if url:
dts = dlist_dir.split('_')
dir_start = datetime.strptime(dts[0], '%Y%m%d')
dir_end = datetime.strptime(dts[1], '%Y%m%d')
# Grab the valid urls for all log files in a .dlist directory that fall within startdata and enddate
if ( (startdate <= dir_start and dir_start <= enddate) or (startdate <= dir_end and dir_end <= enddate) ):
self.logger.info(f"Adding {url} to urls list")
urls.append(url)
else:
# Check .log file contents to confirm that we expect a url (.nc file)
log_url = self._get_mission_url(nc_str[:-2] + 'log', mission_dir, mission_dods)
if log_url:
log_reason = self._scieng_file_state(log_url)
self.logger.debug(f"The .log file indication for .nc file: {log_reason}")
if log_reason == 'should_be_present':
self.logger.warn(f"Could not find {nc_str} file in {mission_dods}, it {log_reason}")
else:
self.logger.warning(f"Log directory {mission_dods} has no .log file from lrauvNc4ToNetcdf.py processing")
return urls
def build_lrauv_attrs(self, mission_year, platform, startdate, enddate, parameters, file_patterns,
dlist_str=None, err_on_missing_file=False, sbd_logs=False, cell_logs=False):
'''Set loader attributes for each LRAUV platform. This is meant to be called for startdate
and enddate being within a single year. It will fail if startdate and enddate span multiple
years. We'd like to keep the files portion of the string short as it's the mouse-over text
in the UI
'''
base = f'http://dods.mbari.org/thredds/catalog/LRAUV/{platform}/missionlogs/{mission_year}/'
dods_base = f'http://dods.mbari.org/opendap/data/lrauv/{platform}/missionlogs/{mission_year}/'
if sbd_logs:
base = f'http://dods.mbari.org/thredds/catalog/LRAUV/{platform}/realtime/sbdlogs/{mission_year}/'
dods_base = f'http://dods.mbari.org/opendap/data/lrauv/{platform}/realtime/sbdlogs/{mission_year}/'
# TODO: Add case for cell_logs
setattr(self, platform + '_files', [])
setattr(self, platform + '_base', dods_base)
setattr(self, platform + '_parms' , parameters)
urls = []
try:
if dlist_str:
urls += self.find_lrauv_urls_by_dlist_string(dlist_str, platform,
startdate, enddate, mission_year)
else:
urls += self.find_lrauv_urls(base, file_patterns, startdate, enddate)
files = []
if len(urls) > 0:
for url in sorted(urls):
if 'shore_i' in url:
file = '/'.join(url.split('/')[-3:])
else:
file = '/'.join(url.split('/')[-3:])
files.append(file)
setattr(self, platform + '_files', files)
setattr(self, platform + '_startDatetime', startdate)
setattr(self, platform + '_endDatetime', enddate)
except urllib.error.HTTPError as e:
self.logger.warn(f'{e}')
except FileNotFound as e:
self.logger.warn(f'{e}')
if err_on_missing_file:
raise
def find_dorado_urls(self, base, search_str, startdate, enddate):
'''Use Thredds Crawler to return a list of DAP urls. Initially written for LRAUV data, for
which we don't initially know the urls.
'''
urls = []
catalog_url = os.path.join(base, 'catalog.xml')
c = Crawl(catalog_url, select=[search_str])
d = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
for url in d:
try:
yyyy_yd = '_'.join(url.split('/')[-1].split('_')[1:3])
file_dt = datetime.strptime(yyyy_yd, '%Y_%j')
sd = startdate.replace(hour=0, minute=0, second=0, microsecond=0)
ed = enddate.replace(hour=0, minute=0, second=0, microsecond=0)
if sd <= file_dt and file_dt <= ed:
urls.append(url)
self.logger.debug(f'* {url}')
else:
self.logger.debug(f'{url}')
except ValueError:
urls.append(url)
if not urls:
raise FileNotFound('No urls matching "{search_str}" found in {catalog_url}')
return urls
def build_dorado_attrs(self, platform, startdate, enddate, parameters, file_patterns):
'''Set loader attributes for each Dorado vehicle
'''
setattr(self, platform + '_parms' , parameters)
urls = []
files = []
for year in range(startdate.year, enddate.year+1):
base = f'http://dods.mbari.org/thredds/catalog/auv/{platform}/{year}/netcdf/'
dods_base = f'http://dods.mbari.org/opendap/data/auvctd/surveys/{year}/netcdf/'
try:
urls += self.find_dorado_urls(base, file_patterns, startdate, enddate)
for url in sorted(urls):
files.append(url.split('/')[-1])
except FileNotFound as e:
self.logger.debug(f'{e}')
if not files:
self.logger.warn(f"No files found for {platform} between {startdate} and {enddate} in {dods_base}")
# Send signal that urls span years by not setting dorado_base so that dorado_urls is used instead
if startdate.year == enddate.year:
setattr(self, platform + '_base', dods_base)
else:
setattr(self, platform + '_urls', sorted(urls))
setattr(self, platform + '_files', files)
setattr(self, platform + '_startDatetime', startdate)
setattr(self, platform + '_endDatetime', enddate)
if __name__ == '__main__':
'''
Test operation of this class
'''
# Instance variable settings
cl = CANONLoader('default', 'Test Load')
cl.stride = 1000
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2010/netcdf/'
cl.dorado_files = ['Dorado389_2010_300_00_300_00_decim.nc']
# Execute the load
cl.process_command_line()
cl.loadAll()
|
stoqs/stoqs
|
stoqs/loaders/CANON/__init__.py
|
Python
|
gpl-3.0
| 88,295
|
[
"NetCDF"
] |
26c59baaed1583abba6e8ff2c1bb89a7da6d9495564af7693caab1b1f32bb1c4
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_raises, assert_equal
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate)
from mne.io import read_raw_ctf, read_raw_bti, read_raw_kit
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
plot_trans)
from mne.utils import requires_mayavi, requires_pysurfer, run_tests_if_main
from mne.datasets import testing
from mne.source_space import read_source_spaces
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
ctf_fname = op.join(data_dir, 'CTF', 'testdata_ctf.ds')
io_dir = op.join(op.abspath(op.dirname(__file__)), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
base_dir = op.join(io_dir, 'bti', 'tests', 'data')
pdf_fname = op.join(base_dir, 'test_pdf_linux')
config_fname = op.join(base_dir, 'test_config_linux')
hs_fname = op.join(base_dir, 'test_hs_linux')
sqd_fname = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
warnings.simplefilter('always') # enable b/c these tests throw warnings
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_plot_sparse_source_estimates():
"""Test plotting of (sparse) source estimates
"""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size / 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size / 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
plot_source_estimates(stc, 'sample', colormap=colormap,
config_opts={'background': (1, 1, 0)},
subjects_dir=subjects_dir, colorbar=True,
clim='auto')
assert_raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto')
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
@testing.requires_testing_data
@requires_mayavi
def test_plot_evoked_field():
"""Test plotting evoked field
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
with warnings.catch_warnings(record=True): # bad proj
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1,
ch_type=t)
evoked.plot_field(maps, time=0.1)
@testing.requires_testing_data
@requires_mayavi
def test_plot_trans():
"""Test plotting of -trans.fif files and MEG sensor layouts
"""
evoked = read_evokeds(evoked_fname)[0]
with warnings.catch_warnings(record=True): # 4D weight tables
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
ref_meg = False if system == 'KIT' else True
plot_trans(info, trans_fname, subject='sample', meg_sensors=True,
subjects_dir=subjects_dir, ref_meg=ref_meg)
# KIT ref sensor coil def not defined
assert_raises(RuntimeError, plot_trans, infos['KIT'], None,
meg_sensors=True, ref_meg=True)
info = infos['Neuromag']
assert_raises(ValueError, plot_trans, info, trans_fname,
subject='sample', subjects_dir=subjects_dir,
ch_type='bad-chtype')
assert_raises(TypeError, plot_trans, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
# no-head version
plot_trans(info, None, meg_sensors=True, dig=True, coord_frame='head')
# EEG only with strange options
with warnings.catch_warnings(record=True) as w:
plot_trans(evoked.copy().pick_types(meg=False, eeg=True).info,
trans=trans_fname, meg_sensors=True)
assert_true(['Cannot plot MEG' in str(ww.message) for ww in w])
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
subjects_dir=subjects_dir)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)),
subjects_dir=subjects_dir)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)),
subjects_dir=subjects_dir)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
subjects_dir=subjects_dir)
# Test handling of degenerate data
stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
subjects_dir=subjects_dir) # ok
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(1.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data[0].fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data.fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 1)
mlab.close()
@testing.requires_testing_data
@requires_mayavi
def test_plot_dipole_locations():
"""Test plotting dipole locations
"""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
dipoles.plot_locations(trans, 'sample', subjects_dir, fig_name='foo')
assert_raises(ValueError, dipoles.plot_locations, trans, 'sample',
subjects_dir, mode='foo')
run_tests_if_main()
|
wronk/mne-python
|
mne/viz/tests/test_3d.py
|
Python
|
bsd-3-clause
| 9,172
|
[
"Mayavi"
] |
bd5a3346252f96dee800aa154f60eaea8c5706ae3b42b4700a4b9c59d72c1068
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import SUN, MON, TUE, WED, FRI, SAT
class Brazil(WesternCalendar, ChristianMixin):
"Brazil"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(4, 21, "Tiradentes' Day"),
(5, 1, "Labour Day"),
(9, 7, "Independence Day"),
(10, 12, "Our Lady of Aparecida"),
(11, 2, "All Souls' Day"),
(11, 15, "Republic Day"),
)
class BrazilSaoPauloState(Brazil):
"Brazil São Paulo State"
FIXED_HOLIDAYS = Brazil.FIXED_HOLIDAYS + (
(7, 9, "Constitutional Revolution of 1932"),
)
class BrazilSaoPauloCity(BrazilSaoPauloState):
"Brazil São Paulo City"
FIXED_HOLIDAYS = BrazilSaoPauloState.FIXED_HOLIDAYS + (
(1, 25, "Anniversary of the city of São Paulo"),
(11, 20, "Dia da Consciência Negra")
)
include_easter_sunday = True
include_corpus_christi = True
def get_carnaval(self, year):
return self.get_easter_sunday(year) - timedelta(days=47)
def get_variable_days(self, year):
days = super(BrazilSaoPauloCity, self).get_variable_days(year)
days.append((self.get_carnaval(year), "Carnaval"))
days.append((self.get_good_friday(year), "Sexta-feira da Paixão"))
return days
class Chile(WesternCalendar, ChristianMixin):
"Chile"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(5, 21, "Navy Day"),
(6, 29, "Saint Peter and Saint Paul"),
(7, 16, "Our Lady of Mount Carmel"),
(9, 18, "National holiday"),
(9, 19, "Army holiday"),
(10, 12, "Columbus Day"),
(12, 31, "Banking Holiday"),
)
include_good_friday = True
include_easter_saturday = True
include_assumption = True
include_all_saints = True
include_immaculate_conception = True
def get_variable_days(self, year):
days = super(Chile, self).get_variable_days(year)
september_17 = date(year, 9, 17)
if september_17.weekday() == MON:
days.append((september_17, '"Bridge" holiday'))
september_20 = date(year, 9, 20)
if september_20.weekday() == FRI:
days.append((september_20, '"Bridge" holiday'))
reformation_day = date(year, 10, 31)
if reformation_day.weekday() == WED:
reformation_day = date(year, 11, 2)
elif reformation_day.weekday() == TUE:
reformation_day = date(year, 10, 27)
days.append((reformation_day, "Reformation Day"))
return days
class Colombia(WesternCalendar, ChristianMixin):
"Colombia"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(7, 20, "Independence Day"),
(8, 7, "Boyacá Battle"),
)
include_palm_sunday = True
include_holy_thursday = True
include_good_friday = True
include_easter_sunday = True
include_corpus_christi = True
include_immaculate_conception = True
def get_epiphany(self, year):
base_day = date(year, 1, 6)
return Colombia.get_first_weekday_after(base_day, 0)
def get_saint_joseph(self, year):
base_day = date(year, 3, 19)
return Colombia.get_first_weekday_after(base_day, 0)
def get_ascension(self, year):
return self.get_easter_sunday(year) + timedelta(days=43)
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=64)
def get_sacred_heart(self, year):
return self.get_easter_sunday(year) + timedelta(days=71)
def get_saint_peter_and_saint_paul(self, year):
base_day = date(year, 6, 29)
return Colombia.get_first_weekday_after(base_day, 0)
def get_assumption(self, year):
base_day = date(year, 8, 15)
return Colombia.get_first_weekday_after(base_day, 0)
def get_race_day(self, year):
base_day = date(year, 10, 12)
return Colombia.get_first_weekday_after(base_day, 0)
def get_all_saints(self, year):
base_day = date(year, 11, 1)
return Colombia.get_first_weekday_after(base_day, 0)
def get_cartagena_independence(self, year):
base_day = date(year, 11, 11)
return Colombia.get_first_weekday_after(base_day, 0)
def get_variable_days(self, year):
days = super(Colombia, self).get_variable_days(year)
days.extend([
(self.get_epiphany(year), "Epiphany"),
(self.get_saint_joseph(year), "Saint Joseph"),
(self.get_ascension(year), "Ascension"),
(self.get_sacred_heart(year), "Sacred Heart"),
(self.get_saint_peter_and_saint_paul(year),
"Saint Peter and Saint Paul"),
(self.get_assumption(year), "Assumption of Mary to Heaven"),
(self.get_race_day(year), "Race Day"),
(self.get_all_saints(year), "All Saints"),
(self.get_cartagena_independence(year),
"Cartagena's Independence"),
])
return days
class Mexico(WesternCalendar, ChristianMixin):
"Mexico"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(9, 16, "Independence Day"),
)
def get_variable_days(self, year):
days = super(Mexico, self).get_variable_days(year)
days.append(
(Mexico.get_nth_weekday_in_month(year, 2, MON),
"Constitution Day"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 3, MON, 3),
"Benito Juárez's birthday"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 11, MON, 3),
"Revolution Day"))
return days
def get_calendar_holidays(self, year):
days = super(Mexico, self).get_calendar_holidays(year)
# If any statutory day is on Sunday, the monday is off
# If it's on a Saturday, the Friday is off
for day, label in days:
if day.weekday() == SAT:
days.append((day - timedelta(days=1), "%s substitute" % label))
elif day.weekday() == SUN:
days.append((day + timedelta(days=1), "%s substitute" % label))
# Extra: if new year's day is a saturday, the friday before is off
next_new_year = date(year + 1, 1, 1)
if next_new_year.weekday():
days.append((date(year, 12, 31), "New Year Day substitute"))
return days
class Panama(WesternCalendar, ChristianMixin):
"Panama"
include_good_friday = True
include_easter_saturday = True
include_easter_sunday = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 9, "Martyrs' Day"),
(5, 1, "Labour Day"),
(11, 3, "Independence Day"),
(11, 5, "Colon Day"),
(11, 10, "Shout in Villa de los Santos"),
(12, 2, "Independence from Spain"),
(12, 8, "Mothers' Day"),
)
def get_variable_days(self, year):
days = super(Panama, self).get_variable_days(year)
days.append(
(self.get_ash_wednesday(year) - timedelta(days=1), "Carnival")
)
return days
|
sirk390/workalendar
|
workalendar/america.py
|
Python
|
mit
| 7,348
|
[
"COLUMBUS"
] |
df3d0369cedafbb30d79424a353c68d20f48e89c51f68533ffc57021c1ff7438
|
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright © 2015 Glenn Fitzpatrick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, re, sys
# when using as an embedded script in Hazel, the matched file name is passed to this script as the first argument
# open the file as read-only
# kindle clipping text files are UTF-8 encoding with BOM, so we use the utf-8-sig encoding
clippings = open(sys.argv[1], 'r', encoding='utf-8-sig')
# read in all of the lines in the file
lines = iter(clippings.readlines())
# the lines in the file look something like this:
#
# Casino Royale (James Bond) (Ian Fleming)
# - Highlight on Page 43 | Loc. 596-97 | Added on Friday, October 11, 2013, 12:08 PM
#
# ‘My name’s Felix Leiter,’ said the American. ‘Glad to meet you.’ ‘Mine’s Bond – James Bond.’
# ==========
#
#
# that is, a file with multiple clippings would look like...
#
# Title (Author)
# - Highlight on Page 1 | Loc. 1-999 | Added on Day, Month, Date, Year, Hour:Minute Time
#
# Body of clipping.
# ==========
# Title (Author)
# - Highlight on Page 1 | Loc. 1-999 | Added on Day, Month, Date, Year, Hour:Minute Time
#
# Body of clipping.
# ==========
# Title (Author)
# - Highlight on Page 1 | Loc. 1-999 | Added on Day, Month, Date, Year, Hour:Minute Time
#
# Body of clipping.
# ==========
#
# we want to extract each of those individual clippings to their own separate files, organized by item title.
# we don't need to know the day the clipping was made. the goal is to have each of those individual clippings as a file
# that looks like:
#
# Title: title
# Author: author
# Page 1 | Loc. 1-999
#
# Body of clipping.
# start with the first line in the clipped section
# we use clippingline as a counter to determine what to do depending on where we are in the clipped section
clippingline = 0
# set the body of the first clipping to an empty string
clippingbody = ''
# for each line that was read in
for line in lines:
if "- Your Note on " in line:
next(lines)
next(lines)
next(lines)
clippingline = 0
clippingbody = ''
continue
# if it's the first line in the clipped section
if clippingline == 0:
# the first line in the clipped section has the title of the item the clipping came from
# and also has the author of that item. we need to do some regex on that first line to extract
# the title of the item and the item's author:
#
# Casino Royale (James Bond) (Ian Fleming)
# get the clipping title
# sometimes the title has a parenthetical section, like in this example
# we want everything up until the space between the title and the last parenthetical section
# on this line, which contains the author's name
regex_title = re.compile(r"(?P<title>.*)\s(?=\(.*\)$)")
result = regex_title.search(line)
clippingtitle = result.group('title')
# get the clipping author
# the item's author is in the last parenthetical section on the line, so we grab everything inside
# that last parenthetical section as the author's name
regex_author = re.compile(r"\((?P<author>[^)]*)\)$(?!.*\()")
result = regex_author.search(line)
clippingauthor = result.group('author')
# move to the next line in the clipping section
clippingline = clippingline + 1
elif clippingline == 1:
# the second line in the clipped section has the location of the clipping and the date it was clipped:
#
# - Highlight on Page 43 | Loc. 596-97 | Added on Friday, October 11, 2013, 12:08 PM
#
# the page number is optional, only items with real page numbers contain that field. if this item did not
# have real page numbers, it would look like this:
#
# - Highlight Loc. 596-97 | Added on Friday, October 11, 2013, 12:08 PM
#
# here we grab the page number (if it exists) and the location so we can reference it in our output. we'll
# also extract the actual location numbers so we can use that as part of the output file's filename.
# get the clipping location
regex_location = re.compile(r"\- Your (Highlight|Note) on (?P<location>(page.*)?.*Location\s(?P<loc>\S*))")
result = regex_location.search(line)
clippinglocation = result.group('location') # Page 43 | Loc. 596-97
clippinglocation = clippinglocation.replace('page', 'Page')
loc = result.group('loc') # 596-97
# move to the next line in the clipping section
clippingline = clippingline + 1
elif clippingline == 2:
# the third line in the clipped section is a blank line between the details of the clipping and the body of
# the clipped section. we just skip to the next line to start clipping the body of the clipping.
clippingline = clippingline + 1
elif clippingline == 3 and line != '==========\n':
# the fourth line starts the actual body of the clipping. most clippings i've found have saved the body as a
# single line, but one i've found (i'm looking at you, Feynman's Rainbow: A Search for Beauty in Physics and in Life)
# had carriage returns in the middle of the body. since the body of the clipping goes until there is a '==========\n'
# line, we just keep reading in lines and appending them to the lines we've already read so far until we reach
# that '==========\n' line. if there do happen to be multiple lines split by carriage returns in the middle of the
# body, we join those lines together with line feeds to make the output a little nicer to read.
#
# ‘My name’s Felix Leiter,’ said the American. ‘Glad to meet you.’ ‘Mine’s Bond – James Bond.’
# get the whole line as the clipping body and join it to any previous lines in the body
clippingbody = '\n'.join([clippingbody, line])
elif line == '==========\n':
# once we reach the '==========\n' line that's the end of the clipped section so now we can create the output file
# if the book's (clipping's) directory doesn't yet exist, create it
# if there's a ':' in the book's title, replace it with a '-' for the filesystem
if not os.path.exists(clippingtitle.replace(':', '-')):
os.makedirs(clippingtitle.replace(':', '-'))
# change to the book's directory
os.chdir(clippingtitle.replace(':', '-'))
# create the clipping file
# name the clipping file as the clipped item's title and the location of the clipping:
#
# Casino Royale (James Bond) 596-97.txt
filename = " ".join([clippingtitle.replace(':', '-'), loc])
filename = filename + ".txt"
output = open(filename, 'w', encoding='utf-8')
# write to the clipping file. the output will look like:
#
# Title: Casino Royale (James Bond)
# Author: Ian Fleming
# Page 43 | Loc. 596-97
#
# ‘My name’s Felix Leiter,’ said the American. ‘Glad to meet you.’ ‘Mine’s Bond – James Bond.’
output.write("".join(["Title: ", clippingtitle.strip(), '\n']))
output.write("".join(["Author: ", clippingauthor.strip(), '\n']))
output.write("".join([clippinglocation.strip(), '\n\n']))
output.write(clippingbody.strip())
# close the output file
output.close()
# go back to the top-level directory
os.chdir('..')
# start anew on the next block of clipping text
clippingline = 0
clippingbody = ''
# close the My Clippings.txt file
clippings.close()
|
gfitzp/kindle-clipper
|
kindle-clipper.py
|
Python
|
mit
| 8,941
|
[
"CASINO"
] |
c7acee64dc7ab8deb0e606ecbbf2576924b8e117c7a71d5266078648ff7323b8
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from ambhas.copula import Copula
from scipy import stats
input1 = pd.read_excel("Input_Data.xlsx", sheetname="Oil Call Option Prices")
input2 = pd.read_excel("Input_Data.xlsx", sheetname="FX Call Option Prices")
input3 = pd.read_excel("Input_Data.xlsx", sheetname="Joint_FX_Put")
input3 = pd.read_excel("Input_Data.xlsx", sheetname="Joint_Oil_Call")
x1=input1["Strike"].as_matrix()
y1=input1["Price"].as_matrix()
x2=input2["Strike"].as_matrix()
y2=input2["Price"].as_matrix()
fd1=np.gradient(y1)
fd2=np.gradient(y2)
sd1=np.gradient(fd1)
sd2=np.gradient(fd2)
# Figure 1
plt.plot(x1,sd1)
plt.xlabel('Price of Oil')
plt.ylabel('f($X_{Oil}$)')
plt.show()
#Figure 2
plt.plot(x2,sd2)
plt.xlabel('Price of FX')
plt.ylabel('f($X_{FX}$)')
plt.show()
# For Oil Digital Options
price = []
for K in range(30,71):
temp = 0
for i in np.nditer(x1):
if i > K:
index = np.where(x1==i)
temp = temp + sd1[index]
price.append(temp)
np.savetxt('Q1_1.csv',np.array(price))
temp = range(30,71)
# plt.plot(temp,price)
plt.show()
price = []
for K in range(20,106):
temp = 0
for i in np.nditer(x2):
if i > K:
index = np.where(x2==i)
temp = temp + sd2[index]
price.append(temp)
np.savetxt('Q1_2.csv',np.array(price))
temp = range(20,106)
plt.plot(temp, price)
plt.show()
# Oil Exotic Options
price = []
for K in range(30,71):
temp = 0
for i in np.nditer(x1):
if i > K:
index = np.where(x1==i)
temp = temp + ((i-K)**2)*sd1[index]
price.append(temp)
np.savetxt('Q2_1.csv',np.array(price))
temp = range(30,71)
plt.plot(temp, price)
plt.show()
# FX Exotic Options
price = []
for K in range(20,106):
temp = 0
for i in np.nditer(x2):
if i > K:
index = np.where(x2==i)
temp = temp + ((i-K)**2)*sd2[index]
price.append(temp)
np.savetxt('Q2_2.csv',np.array(price))
plt.plot(range(20,106),price)
plt.show()
xk1 = np.arange(len(list(sd1)))
pk1 = sd1
# Generating a random number distribution for Oil
custm1 = stats.rv_discrete(name='custm1', values=(xk1, pk1))
xk2 = np.arange(len(list(sd2)))
pk2 = sd2
# Generating a random number distribution for FX
custm2 = stats.rv_discrete(name='custm2', values=(xk2, pk2))
# Generating Random Numbers from the distributions
R1 = custm1.rvs(size=10000)
R2 = custm2.rvs(size=10000)
# function to generate copula from two sets of random numbers which follow the given marginal probability distribution
def genCopulas():
fig = plt.figure()
frank = Copula(R1,R2,family='frank')
xf,yf = frank.generate_xy(500000)
clayton = Copula(R1,R2,family='clayton')
xc,yc = clayton.generate_xy(500000)
# to return the random number pairs from frank copula
return xf, yf
# to return the random number pairs from clayton copula
# return xc, yc
# Create a grid to calculate the joint distribution from generated random number pairs
m1, m2 = genCopulas()
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
# Using Gaussian Kernel Density Estimator
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Verifying that the obtained joint distribution is adequate
# Comparing with Actual Marginal obtained from Question1
fd1=np.gradient(X.T[0])
fd2=np.gradient(Y[0])
x_list = []
y_list = []
for i in range(100):
temp_x = 25 + X[i][0]
temp_y =sum(Z[i])*fd2[0]
x_list.append(temp_x)
y_list.append(temp_y)
plt.plot(x_list,y_list, label = 'Estimated Marginal')
plt.plot(x1,sd1, label = 'Actual Marginal')
plt.ylabel("f($X_1$)")
plt.xlabel("Price of Oil ($X_1$)")
plt.legend()
plt.show()
fd1=np.gradient(X.T[0])
fd2=np.gradient(Y[0])
x_list = []
y_list = []
for i in range(100):
temp_x = 15 + Y[0][i]
temp_y =sum(Z.T[i])*fd1[0]
x_list.append(temp_x)
y_list.append(temp_y)
plt.plot(x_list,y_list, label = 'Estimated Marginal')
plt.plot(x2,sd2, label = 'Actual Marginal')
plt.ylabel("f($X_2$)")
plt.xlabel("Price of FX ($X_2$)")
plt.legend()
plt.show()
# for 'Q2'
B1 = [35, 41, 47, 53, 59, 65]
pred = []
for k in B1:
sum2 = 0
for j in range(100):
sum1 = 0
for i in range(100):
if (25+X[i][0]) > k:
sum1 = sum1 + (25+X[i][0]-k)*Z[i][j]
sum2 = sum2 + (15+Y[0][j])*sum1
sum3 = sum2*fd1[0]*fd2[0]
pred.append(sum3)
actual = [912.104648, 591.928507, 309.753731, 115.46706, 27.091061, 3.655863]
plt.plot(B1,actual, label = 'Actual Joint_Oil_Call')
plt.plot(B1,pred, label = 'Estimated Joint_Oil_Call')
plt.legend()
plt.show()
# for 'Q1'
B2 = [30, 40, 50, 60, 70, 80]
pred = []
for k in B2:
sum2 = 0
for j in range(100):
sum1 = 0
for i in range(100):
if (15+Y[0][i]) < k:
sum1 = sum1 + (k-(15+Y[0][i]))*Z[j][i]
sum2 = sum2 + (25+X[j][0])*sum1
sum3 = sum2*fd1[0]*fd2[0]
pred.append(sum3)
actual = [4.640858, 59.718679, 235.426702, 493.174062, 814.620805, 1214.109622]
plt.plot(B2,actual, label = 'Actual Joint_FX_Put')
plt.plot(B2,pred, label = 'Estimated Joint_FX_Put')
plt.legend()
plt.show()
# Final Estimation of OilCall_FXPut
B1 = [35, 39, 43, 47, 51, 55, 59, 63, 67]
B2 = 90
fname = 'temp_90.txt'
pred = []
for k in B1:
sum2 = 0
for j in range(100):
if (15+Y[0][j])<B2:
sum1 = 0
for i in range(100):
if (25+X[i][0]) > k:
sum1 = sum1 + (25+X[i][0]-k)*Z[i][j]
sum2 = sum2 + (B2 - (15+Y[0][j]))*sum1
sum3 = sum2*fd1[0]*fd2[0]
pred.append(sum3)
np.savetxt(fname,pred)
# plt.plot(pred)
# plt.show()
|
GauthamGoli/quantify-2016
|
Modelling Joint Distributions/source_code.py
|
Python
|
mit
| 5,896
|
[
"Gaussian"
] |
d3edeef00b885c2343aa90887bf7cf5699860e05815ba0ef2abe2cb089174914
|
"""Implements tools for computing performance profiles.
This module implements tools for computing
the Dolan and More performance profiles for
a set of solvers on a given set of test
problems. For the definition of performance
profiles, see
E.D. Dolan and J.J. More, Benchmarking optimization
software with performance profiles, Mathematical
Programming 91 (2002), no. 2, 201-213
"""
from native import *
from matplotlib import rc
from matplotlib.pyplot import *
from numpy import *
from testproblems import *
def performance_profile(S, P, plot_results):
"""For a given set of solver S and a given set of
test problems P, compute the Dolan and More performance
profiles. A triplet (R,tau,rho) is returned. R is
a mxn matrix containing the computed performance
ratios, where m and n denotes the sizes of P and S,
respectively. The return values tau and rho values
represent the x and y values used for plotting the
data. If the boolean value plot_results is set to
True, the results are also plotted."""
R_MAX = 1e99
T = zeros((len(P), len(S)))
R = zeros((len(P), len(S)))
si = 0
for s in S:
print 'Algorithm:', s.get_name()
print "%-25s%-13s%-15s%-15s%-8s" % ('Test function',
'Time', 'Term.val.', '#i #f #g', 'Time/iter.')
pi = 0
for p in P:
results = s.solve(p.f, p.x0, p.stopcrit,
Solver.DefaultSetup(), NoConstraints(), True)
if results.converged == True:
print "%-25s%-13.2f%-15.4e%-5d%-5d%-5d%-8.2f" % (p.name,
results.time, results.term_val, results.num_iter,
results.num_func_eval, results.num_grad_eval,
1.0 * results.time / results.num_iter)
T[pi, si] = results.time
else:
print "%-25s%-13s%-15.4e%-5s%-5s%-5s%-8s" % (p.name,
'Failure', results.term_val, '-', '-', '-', '-')
T[pi, si] = nan
pi = pi + 1
si = si + 1
d = zeros(len(P))
for pi in range(len(P)):
d[pi] = nanmin(T[pi, :])
for si in range(len(S)):
for pi in range(len(P)):
if not isnan(T[pi, si]) and not isnan(d[pi]):
R[pi, si] = T[pi, si] / d[pi]
else:
R[pi, si] = R_MAX
tau = linspace(0, 1.1 * log2(R[R < R_MAX].max()), 20)
rho = zeros((len(tau), len(S)))
for si in range(len(S)):
for ti in range(len(tau)):
rho[ti, si] = sum(log2(R[:, si]) <= tau[ti])
rho = rho / len(P)
if plot_results == True:
markers = ['^', 's', 'x', 'o', '+']
rc('text', usetex=True)
figure()
plots = []
for si in range(len(S)):
plots += plot(tau, rho[:, si], '-' + markers[si],
linewidth=2, markersize=6)
legends = []
for s in S:
legends.append(s.get_name().replace('_', '\_'))
legend(plots, legends, loc='lower right', shadow=True)
xlabel(r'\tau')
ylabel(r'\rho_s(2^\tau)')
xlim(0, tau[len(tau) - 1])
ylim(0, 1.05)
grid(True)
show()
return (R, tau, rho)
def main():
S = [ LRWWSimplex(),
DSQA(),
#DFQAS(),
SteihaugSR1(),
#ConjGradMT(ConjGradType.FR),
#ConjGradMT(ConjGradType.PR),
LinminBFGS(LinminBFGS.LinminType.morethuente) ]
P1 = [ PowellBadlyScaled(),
BrownBadlyScaled(),
Beale(),
HelicalValley(),
Gaussian(),
Gulf(m=5),
Box(m=5),
Wood(),
BrownDennis(m=20),
BiggsEXP6(m=13),
Watson(n=6),
ExtendedRosenbrock(n=10),
ExtendedPowellSingular(n=12),
PenaltyFunctionI(n=10),
PenaltyFunctionII(n=10),
VariablyDimensioned(n=10),
Trigonometric(n=5),
ChebyQuad(n=8, m=8) ]
P2 = [ #PowellBadlyScaled(gEvalType=DerivEvalType.fdiff_central_2),
#BrownBadlyScaled(gEvalType=DerivEvalType.fdiff_central_2),
#Beale(gEvalType=DerivEvalType.fdiff_central_2),
#HelicalValley(gEvalType=DerivEvalType.fdiff_central_2),
#Gaussian(gEvalType=DerivEvalType.fdiff_central_2),
Gulf(m=5, gEvalType=DerivEvalType.fdiff_central_2),
Box(m=5, gEvalType=DerivEvalType.fdiff_central_2),
#Wood(gEvalType=DerivEvalType.fdiff_central_2),
BrownDennis(m=20, gEvalType=DerivEvalType.fdiff_central_2),
BiggsEXP6(m=13, gEvalType=DerivEvalType.fdiff_central_2),
Watson(n=6, gEvalType=DerivEvalType.fdiff_central_2),
ExtendedRosenbrock(n=32, gEvalType=DerivEvalType.fdiff_central_2),
ExtendedPowellSingular(n=16, gEvalType=DerivEvalType.fdiff_central_2),
PenaltyFunctionI(n=10, gEvalType=DerivEvalType.fdiff_central_2),
PenaltyFunctionII(n=10, gEvalType=DerivEvalType.fdiff_central_2),
VariablyDimensioned(n=20, gEvalType=DerivEvalType.fdiff_central_2),
Trigonometric(n=7, gEvalType=DerivEvalType.fdiff_central_2),
ChebyQuad(n=8, m=8, gEvalType=DerivEvalType.fdiff_central_2) ]
performance_profile(S, P1, True)
if __name__ == "__main__":
main()
|
tbs1980/otkpp
|
pyotk/pyotk/perfprof.py
|
Python
|
gpl-3.0
| 4,857
|
[
"Gaussian"
] |
de535ee3ba068f26504ea8bba0e19207c63482f1fa91b73f6a8ee631e6eb6acd
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore
from peacock.utils import WidgetUtils
class TabbedPreferences(QtWidgets.QWidget):
"""
For each plugin, store a preference widget in its own tab.
"""
def __init__(self, plugins):
super(TabbedPreferences, self).__init__()
self._widgets = []
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
self.tabs = QtWidgets.QTabWidget(parent=self)
self.layout.addWidget(self.tabs)
self.button_layout = QtWidgets.QHBoxLayout()
self.layout.addLayout(self.button_layout)
self.save_button = WidgetUtils.addButton(self.button_layout, self, "&Save", self.save)
self.cancel_button = WidgetUtils.addButton(self.button_layout, self, "&Cancel", self.cancel)
for plugin in plugins:
w = plugin.preferencesWidget()
if w.count() > 0:
self._widgets.append(w)
self.tabs.addTab(w, plugin.tabName())
def save(self):
"""
Save the preferences to disk
"""
settings = QtCore.QSettings()
for w in self._widgets:
w.save(settings)
settings.sync()
self.close()
def load(self):
"""
Load preferences from disk.
"""
settings = QtCore.QSettings()
for w in self._widgets:
w.load(settings)
def widget(self, tab_name):
"""
Gets the PreferenceWidget based on tab name.
This is primarily intended for use while testing.
"""
for i in range(self.tabs.count()):
if self.tabs.tabText(i) == tab_name:
return self.tabs.widget(i)
return None
def cancel(self):
"""
Cancel the changes and close the window
"""
self.load() # we want to leave the widgets in a good state
self.close()
|
harterj/moose
|
python/peacock/base/TabbedPreferences.py
|
Python
|
lgpl-2.1
| 2,204
|
[
"MOOSE"
] |
949a7d01726b7a7c754e7b777a00c1b70e5ed53c977e44dfdef353e99c0c7651
|
"""
Utilities for cleaning HTML code.
"""
def clean_html(*args, **kwargs):
raise ImportError("clean_html requires html5lib or pytidylib")
def sanitize_html(*args, **kwargs):
raise ImportError("sanitize_html requires html5lib")
def clean_html5lib(input):
"""
Takes an HTML fragment and processes it using html5lib to ensure that the HTML is well-formed.
>>> clean_html5lib("<p>Foo<b>bar</b></p>")
u'<p>Foo<b>bar</b></p>'
>>> clean_html5lib("<p>Foo<b>bar</b><i>Ooops!</p>")
u'<p>Foo<b>bar</b><i>Ooops!</i></p>'
>>> clean_html5lib('<p>Foo<b>bar</b>& oops<a href="#foo&bar">This is a <>link</a></p>')
u'<p>Foo<b>bar</b>& oops<a href=#foo&bar>This is a <>link</a></p>'
"""
from html5lib import treebuilders, treewalkers, serializer, sanitizer
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom_tree = p.parseFragment(input)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)
return "".join(s.serialize(stream))
def sanitize_html5lib(input):
"""
Removes any unwanted HTML tags and attributes, using html5lib.
>>> sanitize_html5lib("foobar<p>adf<i></p>abc</i>")
u'foobar<p>adf<i></i></p><i>abc</i>'
>>> sanitize_html5lib('foobar<p style="color:red; remove:me; background-image: url(http://example.com/test.php?query_string=bad);">adf<script>alert("Uhoh!")</script><i></p>abc</i>')
u'foobar<p style="color: red;">adf<script>alert("Uhoh!")</script><i></i></p><i>abc</i>'
"""
from html5lib import treebuilders, treewalkers, serializer, sanitizer
p = html5lib.HTMLParser(tokenizer=sanitizer.HTMLSanitizer, tree=treebuilders.getTreeBuilder("dom"))
dom_tree = p.parseFragment(input)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)
return "".join(s.serialize(stream))
def clean_pytidylib(input):
(cleaned_html, warnings) = tidylib.tidy_document(input)
return cleaned_html
try:
import html5lib
clean_html, sanitize_html = clean_html5lib, sanitize_html5lib
except ImportError:
try:
import tidylib
clean_html = clean_pytidylib
except ImportError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
|
luiscarlosgph/nas
|
env/lib/python2.7/site-packages/django_wysiwyg/utils.py
|
Python
|
mit
| 2,416
|
[
"ADF"
] |
03c9b44138179e29144728ccc8bc6a40f36def1b25b4b1e3b0c6a6ac5973affe
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Genrate Climate Based Sky
This component generate a climate based sky for any hour of the year
-
Provided by Honeybee 0.0.57
Args:
north_: Input a vector to be used as a true North direction for the sun path or a number between 0 and 360 that represents the degrees off from the y-axis to make North. The default North direction is set to the Y-axis (0 degrees).
_weatherFile: epw weather file address on your system
_month: Month of the study [1-12]
_day: Day of the study [1-31]
_hour: Hour of the study [1-24]
Returns:
radiationValues: Direct and diffuse radiation of the sky
skyFilePath: Sky file location on the local drive
"""
ghenv.Component.Name = "Honeybee_Generate Climate Based Sky"
ghenv.Component.NickName = 'genClimateBasedSky'
ghenv.Component.Message = 'VER 0.0.57\nJUL_06_2015'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "02 | Daylight | Sky"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import os
import scriptcontext as sc
import Grasshopper.Kernel as gh
import math
def date2Hour(month, day, hour):
# fix the end day
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
# dd = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
JD = numOfDays[int(month)-1] + int(day)
return (JD - 1) * 24 + hour
def getRadiationValues(epw_file, HOY):
epwfile = open(epw_file,"r")
for lineCount, line in enumerate(epwfile):
if lineCount == int(HOY + 8 - 1):
dirRad = (float(line.split(',')[14]))
difRad = (float(line.split(',')[15]))
return dirRad, difRad
def RADDaylightingSky(epwFileAddress, locName, lat, long, timeZone, hour, day, month, north = 0):
dirNrmRad, difHorRad = getRadiationValues(epwFileAddress, date2Hour(month, day, hour))
print "Direct: " + `dirNrmRad` + "| Diffuse: " + `difHorRad`
return "# start of sky definition for daylighting studies\n" + \
"# location name: " + locName + " LAT: " + lat + "\n" + \
"!gendaylit " + `month` + ' ' + `day` + ' ' + `hour` + \
" -a " + lat + " -o " + `-float(long)` + " -m " + `-float(timeZone) * 15` + \
" -W " + `dirNrmRad` + " " + `difHorRad` + " -O " + `outputType` + \
" | xform -rz " + str(north) + "\n" + \
"skyfunc glow sky_mat\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"1 1 1 0\n" + \
"sky_mat source sky\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"0 0 1 180\n" + \
"skyfunc glow ground_glow\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"1 .8 .5 0\n" + \
"ground_glow source ground\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"0 0 -1 180\n"
def main(outputType, weatherFile, month, day, hour, north = 0):
# import the classes
if sc.sticky.has_key('honeybee_release') and sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
hb_folders = sc.sticky["honeybee_folders"]
hb_RADPath = hb_folders["RADPath"]
hb_RADLibPath = hb_folders["RADLibPath"]
else:
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Ladybug and Honeybee to fly...")
return -1
# check forgendaylit exist
if not os.path.isfile(hb_RADPath + "\\gendaylit.exe"):
msg = "Cannot find gendaylit.exe at " + hb_RADPath + \
"Make sure that gendaylit is installed on your system."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
if weatherFile != None and weatherFile[-3:] == 'epw':
if not os.path.isfile(weatherFile):
print "Can't find the weather file at: " + weatherFile
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Can't find the weather file at: " + weatherFile)
return -1
# import data from epw file data
locName, lat, lngt, timeZone, elev, locationStr = lb_preparation.epwLocation(weatherFile)
newLocName = lb_preparation.removeBlank(locName)
else:
print "epwWeatherFile address is not a valid .epw file"
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "epwWeatherFile address is not a valid .epw file")
return -1
# make new folder for each city
subWorkingDir = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "skylib\\climateBasedSkies\\", newLocName)
subWorkingDir = lb_preparation.makeWorkingDir(subWorkingDir)
# print 'Current working directory is set to: ', subWorkingDir
outputFile = subWorkingDir + "\\climateBasedSky@_" + `month` + "_" + `day` + "@" + ('%.2f'%hour).replace(".", "") + ".sky"
northAngle, northVector = lb_preparation.angle2north(north)
skyStr = RADDaylightingSky(weatherFile, newLocName, lat, lngt, timeZone, hour, day, month, math.degrees(northAngle))
skyFile = open(outputFile, 'w')
skyFile.write(skyStr)
skyFile.close()
return outputFile , `day` + "_" + `month` + "@" + ('%.2f'%hour).replace(".", "")
if _weatherFile!=None and _month!=None and _day!=None and _hour!=None:
outputType = 0
result = main(outputType, _weatherFile, _month, _day, _hour, north_)
if result!=-1:
skyFilePath, skyDescription = result
|
samuto/Honeybee
|
src/Honeybee_Generate Climate Based Sky.py
|
Python
|
gpl-3.0
| 7,211
|
[
"EPW"
] |
75bcfb6f415691f576c086a7ca64c284d16e086b19aed454b2d18185bdaa71ce
|
"""
Given a n*n adjacency array.
it will give you a maximum flow.
This version use DFS to search path.
Assume the first is the source and the last is the sink.
Time complexity - O(Ef)
example
graph = [[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0]]
answer should be
23
"""
import copy
import math
def maximum_flow_dfs(adjacency_matrix):
"""
Get the maximum flow through a graph using a depth first search
"""
#initial setting
new_array = copy.deepcopy(adjacency_matrix)
total = 0
while True:
#setting min to max_value
min = math.inf
#save visited nodes
visited = [0]*len(new_array)
#save parent nodes
path = [0]*len(new_array)
#initialize stack for DFS
stack = []
#initial setting
visited[0] = 1
stack.append(0)
#DFS to find path
while len(stack) > 0:
#pop from queue
src = stack.pop()
for k in range(len(new_array)):
#checking capacity and visit
if new_array[src][k] > 0 and visited[k] == 0:
#if not, put into queue and chage to visit and save path
visited[k] = 1
stack.append(k)
path[k] = src
#if there is no path from src to sink
if visited[len(new_array) - 1] == 0:
break
#initial setting
tmp = len(new_array) - 1
#Get minimum flow
while tmp != 0:
#find minimum flow
if min > new_array[path[tmp]][tmp]:
min = new_array[path[tmp]][tmp]
tmp = path[tmp]
#initial setting
tmp = len(new_array) - 1
#reduce capacity
while tmp != 0:
new_array[path[tmp]][tmp] = new_array[path[tmp]][tmp] - min
tmp = path[tmp]
total = total + min
return total
|
keon/algorithms
|
algorithms/graph/maximum_flow_dfs.py
|
Python
|
mit
| 2,038
|
[
"VisIt"
] |
38c72597e00740ad0ab5946760e2644b962c4c3ef3b13f26277c8fec5f8fc1c1
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""The Visualization ToolKit (VTK) is an open source, freely available
software system for 3D computer graphics, image processing, and
visualization used by thousands of researchers and developers around
the world. http://www.vtk.org"""
from __future__ import division
from identifiers import *
import vistrails.core
def package_dependencies():
import vistrails.core.packagemanager
manager = vistrails.core.packagemanager.get_package_manager()
if manager.has_package('org.vistrails.vistrails.spreadsheet'):
return ['org.vistrails.vistrails.spreadsheet']
else:
return []
def package_requirements():
from vistrails.core.requirements import require_python_module, \
python_module_exists
require_python_module('vtk', {
'linux-debian': 'python-vtk',
'linux-ubuntu': 'python-vtk',
'linux-fedora': 'vtk-python'})
if not python_module_exists('PyQt4'):
from vistrails.core import debug
debug.warning('PyQt4 is not available. There will be no interaction '
'between VTK and the spreadsheet.')
|
hjanime/VisTrails
|
vistrails/packages/vtk/__init__.py
|
Python
|
bsd-3-clause
| 3,026
|
[
"VTK"
] |
d72123f2d89f8cca8879d3f1a9f534ab1ec38829b16b618fbc362d6e7c90fc4d
|
#!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: HeaderTesting.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME HeaderTesting - a VTK style and validity checking utility
## .SECTION Description
## HeaderTesting is a script which checks the list of header files for
## validity based on VTK coding standard. It checks for proper super
## classes, number and style of include files, type macro, private
## copy constructor and assignment operator, broken constructors, and
## exsistence of PrintSelf method. This script should be run as a part
## of the dashboard checking of the Visualization Toolkit and related
## projects.
## .SECTION See Also
## http://www.vtk.org http://public.kitware.com/Dart/HTML/Index.shtml
## http://www.vtk.org/contribute.php#coding-standards
import sys
import re
import os
import stat
import string
# Get the path to the directory containing this script.
if __name__ == '__main__':
selfpath = os.path.abspath(sys.path[0] or os.curdir)
else:
selfpath = os.path.abspath(os.path.dirname(__file__))
# Load the list of names mangled by windows.h.
execfile(os.path.join(selfpath, 'WindowsMangleList.py'))
## If tested from dart, make sure to fix all the output strings
test_from_dart = 0
if os.environ.has_key("DART_TEST_FROM_DART"):
test_from_dart = 1
## For backward compatibility
def StringEndsWith(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 < l2:
return 0
return (str1[(l1-l2):] == str2)
##
class TestVTKFiles:
def __init__(self):
self.FileName = ""
self.ErrorValue = 0;
self.Errors = {}
self.WarningValue = 0;
self.Warnings = {}
self.FileLines = []
self.Export = ""
self.UnnecessaryIncludes = [
"stdio.h",
"stdlib.h",
"string.h",
"iostream",
"iostream.h",
"strstream",
"strstream.h",
"fstream",
"fstream.h",
"windows.h"
]
pass
def SetExport(self, export):
self.Export = export
def Print(self, text=""):
rtext = text
if test_from_dart:
rtext = string.replace(rtext, "<", "<")
rtext = string.replace(rtext, ">", ">")
print rtext
def Error(self, error):
self.ErrorValue = 1
self.Errors[error] = 1
pass
def Warning(self, warning):
self.WarningValue = 1
self.Warnings[warning] = 1
pass
def PrintErrors(self):
if self.ErrorValue:
self.Print( )
self.Print( "There were errors:" )
for a in self.Errors.keys():
self.Print( "* %s" % a )
def PrintWarnings(self):
if self.WarningValue:
self.Print( )
self.Print( "There were warnings:" )
for a in self.Warnings.keys():
self.Print( "* %s" % a )
def TestFile(self, filename):
self.FileName = filename
self.FileLines = []
self.ClassName = ""
self.ParentName = ""
try:
file = open(filename)
self.FileLines = file.readlines()
file.close()
except:
self.Print( "Problem reading file: %s" % filename )
sys.exit(1)
pass
def CheckIncludes(self):
count = 0
lines = []
nplines = []
unlines = []
includere = "^\s*#\s*include\s*[\"<]([^>\"]+)"
ignincludere = ".*\/\/.*"
regx = re.compile(includere)
regx1 = re.compile(ignincludere)
cc = 0
includeparent = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if rm and not regx1.match(line):
lines.append(" %4d: %s" % (cc, line))
file = rm.group(1)
if file == (self.ParentName + ".h"):
includeparent = 1
if not StringEndsWith(file, ".h"):
nplines.append(" %4d: %s" % (cc, line))
if file in self.UnnecessaryIncludes:
unlines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 1:
self.Print()
self.Print( "File: %s has %d includes: " %
( self.FileName, len(lines)) )
for a in lines:
self.Print( a )
self.Error("Multiple includes")
if len(nplines) > 0:
self.Print( )
self.Print( "File: %s has non-portable include(s): " % self.FileName )
for a in nplines:
self.Print( a )
self.Error("Non-portable includes")
if len(unlines) > 0:
self.Print( )
self.Print( "File: %s has unnecessary include(s): " % self.FileName )
for a in unlines:
self.Print( a )
self.Error("Unnecessary includes")
if not includeparent and self.ParentName:
self.Print()
self.Print( "File: %s does not include parent \"%s.h\"" %
( self.FileName, self.ParentName ) )
self.Error("Does not include parent")
pass
def CheckParent(self):
classre = "^class\s*(.*_EXPORT|\s*) (vtk[A-Z0-9_][^ :\n]*)\s*:\s*public\s*(vtk[^ \n\{]*)"
cname = ""
pname = ""
classlines = []
regx = re.compile(classre)
cc = 0
lastline = ""
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if not rm and not cname:
rm = regx.match(lastline + line)
if rm:
export = rm.group(1)
export = string.strip(export)
cname = rm.group(2)
pname = rm.group(3)
classlines.append(" %4d: %s" % (cc, line))
if not export:
self.Print("File: %s defines 1 class with no export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Error("No export macro")
elif self.Export and self.Export != export:
self.Print("File: %s defines 1 class with wrong export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Print(" The export macro should be: %s" % (self.Export))
self.Error("Wrong export macro")
cc = cc + 1
lastline = a
if len(classlines) > 1:
self.Print()
self.Print( "File: %s defines %d classes: " %
(self.FileName, len(classlines)) )
for a in classlines:
self.Print( a )
self.Error("Multiple classes defined")
if len(classlines) < 1:
self.Print()
self.Print( "File: %s does not define any classes" % self.FileName )
self.Error("No class defined")
return
#self.Print( "Classname: %s ParentName: %s" % (cname, pname)
self.ClassName = cname
self.ParentName = pname
pass
def CheckTypeMacro(self):
count = 0
lines = []
oldlines = []
typere = "^\s*vtkType(Revision)*Macro\s*\(\s*(vtk[^ ,]+)\s*,\s*(vtk[^ \)]+)\s*\)\s*;"
typesplitre = "^\s*vtkType(Revision)*Macro\s*\("
regx = re.compile(typere)
regxs = re.compile(typesplitre)
cc = 0
found = 0
for a in range(len(self.FileLines)):
line = string.strip(self.FileLines[a])
rm = regx.match(line)
if rm:
found = 1
if rm.group(1) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(2)
pname = rm.group(3)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
else:
# Maybe it is in two lines
rm = regxs.match(line)
if rm:
nline = line + " " + string.strip(self.FileLines[a+1])
line = string.strip(nline)
rm = regx.match(line)
if rm:
found = 1
if rm.group(1) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(2)
pname = rm.group(3)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has broken type macro(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName) )
self.Error("Broken type macro")
if len(oldlines) > 0:
self.Print( "File: %s has legacy type-revision macro(s):" % self.FileName )
for a in oldlines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s);" %
(self.ClassName, self.ParentName))
self.Error("Legacy style type-revision macro")
if not found:
self.Print( "File: %s does not have type macro" % self.FileName )
self.Print( "Should be:\n vtkTypeMacro(%s, %s);" %
(self.ClassName, self.ParentName))
self.Error("No type macro")
pass
def CheckForCopyAndAssignment(self):
if not self.ClassName:
return
count = 0
lines = []
oldlines = []
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % ( self.ClassName, self.ClassName)
asgnoperator = "^\s*void\s*operator\s*=\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % self.ClassName
#self.Print( copyoperator
regx1 = re.compile(copyoperator)
regx2 = re.compile(asgnoperator)
foundcopy = 0
foundasgn = 0
for a in self.FileLines:
line = string.strip(a)
if regx1.match(line):
foundcopy = foundcopy + 1
if regx2.match(line):
foundasgn = foundasgn + 1
lastline = ""
if foundcopy < 1:
for a in self.FileLines:
line = string.strip(a)
if regx1.match(lastline + line):
foundcopy = foundcopy + 1
lastline = a
lastline = ""
if foundasgn < 1:
for a in self.FileLines:
line = string.strip(a)
if regx2.match(lastline + line):
foundasgn = foundasgn + 1
lastline = a
if foundcopy < 1:
self.Print( "File: %s does not define copy constructor" %
self.FileName )
self.Print( "Should be:\n%s(const %s&); // Not implemented" %
(self.ClassName, self.ClassName) )
self.Error("No private copy constructor")
if foundcopy > 1:
self.Print( "File: %s defines multiple copy constructors" %
self.FileName )
self.Error("Multiple copy constructor")
if foundasgn < 1:
self.Print( "File: %s does not define assignment operator" %
self.FileName )
self.Print( "Should be:\nvoid operator=(const %s&); // Not implemented"
% self.ClassName )
self.Error("No private assignment operator")
if foundcopy > 1:
self.Print( "File: %s defines multiple assignment operators" %
self.FileName )
self.Error("Multiple assignment operators")
pass
def CheckWeirdConstructors(self):
count = 0
lines = []
oldlines = []
constructor = "^\s*%s\s*\(([^ )]*)\)" % self.ClassName
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*implemented(\.)*" % ( self.ClassName, self.ClassName)
regx1 = re.compile(constructor)
regx2 = re.compile(copyoperator)
cc = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has weird constructor(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "There should be only:\n %s();" % self.ClassName )
self.Error("Weird constructor")
pass
def CheckPrintSelf(self):
if not self.ClassName:
return
typere = "^\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
newtypere = "^\s*virtual\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
regx1 = re.compile(typere)
regx2 = re.compile(newtypere)
found = 0
oldstyle = 0
for a in self.FileLines:
line = string.strip(a)
rm1 = regx1.match(line)
rm2 = regx2.match(line)
if rm1 or rm2:
found = 1
if rm1:
oldstyle = 1
if not found:
self.Print( "File: %s does not define PrintSelf method:" %
self.FileName )
self.Warning("No PrintSelf method")
pass
def CheckWindowsMangling(self):
lines = []
regx1 = WindowsMangleRegEx
regx2 = re.compile("^.*VTK_LEGACY.*$")
# This version will leave out comment lines but we probably do
# not want to refer to mangled (hopefully deprecated) methods
# in comments.
# regx2 = re.compile("^(\s*//|\s*\*|.*VTK_LEGACY).*$")
cc = 1
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has windows.h mangling violations:" % self.FileName )
for a in lines:
self.Print(a)
self.Error("Windows Mangling Violation - choose another name that does not conflict.")
pass
##
test = TestVTKFiles()
## Check command line arguments
if len(sys.argv) < 2:
print "Testing directory not specified..."
print "Usage: %s <directory> [ exception(s) ]" % sys.argv[0]
sys.exit(1)
dirname = sys.argv[1]
exceptions = sys.argv[2:]
if len(sys.argv) > 2:
export = sys.argv[2]
if export[:3] == "VTK" and export[len(export)-len("EXPORT"):] == "EXPORT":
print "Use export macro: %s" % export
exceptions = sys.argv[3:]
test.SetExport(export)
## Traverse through the list of files
for a in os.listdir(dirname):
## Skip non-header files
if not StringEndsWith(a, ".h"):
continue
## Skip exceptions
if a in exceptions:
continue
pathname = '%s/%s' % (dirname, a)
if pathname in exceptions:
continue
mode = os.stat(pathname)[stat.ST_MODE]
## Skip directories
if stat.S_ISDIR(mode):
continue
elif stat.S_ISREG(mode):
## Do all the tests
test.TestFile(pathname)
test.CheckParent()
test.CheckIncludes()
test.CheckTypeMacro()
test.CheckForCopyAndAssignment()
test.CheckWeirdConstructors()
test.CheckPrintSelf()
test.CheckWindowsMangling()
## Summarize errors
test.PrintWarnings()
test.PrintErrors()
sys.exit(test.ErrorValue)
|
naucoin/VTKSlicerWidgets
|
Common/Testing/HeaderTesting.py
|
Python
|
bsd-3-clause
| 16,639
|
[
"VTK"
] |
154e3a86d47af6a9f5989fbcc82727e40f90add1103ca56317afa997b43371a5
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Module to support the loading of a NetCDF file into an Iris cube.
See also: `netCDF4 python <http://code.google.com/p/netcdf4-python/>`_.
Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions',
Version 1.4, 27 February 2009.
"""
import collections
import itertools
import os
import os.path
import string
import warnings
import iris.proxy
iris.proxy.apply_proxy('netCDF4', globals())
import numpy as np
import numpy.ma as ma
from pyke import knowledge_engine
import iris.analysis
import iris.coord_systems
import iris.coords
import iris.cube
import iris.exceptions
import iris.fileformats.cf
import iris.fileformats.manager
import iris.fileformats._pyke_rules
import iris.io
import iris.unit
import iris.util
# Show Pyke inference engine statistics.
DEBUG = False
# Pyke CF related file names.
_PYKE_RULE_BASE = 'fc_rules_cf'
_PYKE_FACT_BASE = 'facts_cf'
# Standard CML spatio-temporal axis names.
SPATIO_TEMPORAL_AXES = ['t', 'z', 'y', 'x']
# Pass through CF attributes:
# - comment
# - Conventions
# - history
# - institution
# - reference
# - source
# - title
# - positive
#
_CF_ATTRS = ['add_offset', 'ancillary_variables', 'axis', 'bounds', 'calendar',
'cell_measures', 'cell_methods', 'climatology', 'compress',
'coordinates', '_FillValue', 'flag_masks', 'flag_meanings',
'flag_values', 'formula_terms', 'grid_mapping', 'leap_month',
'leap_year', 'long_name', 'missing_value', 'month_lengths',
'scale_factor', 'standard_error_multiplier',
'standard_name', 'units', 'valid_max', 'valid_min', 'valid_range']
# CF attributes that should not be global.
_CF_DATA_ATTRS = ['flag_masks', 'flag_meanings', 'flag_values',
'instance_dimension', 'sample_dimension',
'standard_error_multiplier']
# CF attributes that should only be global.
_CF_GLOBAL_ATTRS = ['conventions', 'featureType', 'history', 'title']
# UKMO specific attributes that should not be global.
_UKMO_DATA_ATTRS = ['STASH', 'ukmo__um_stash_source', 'ukmo__process_flags']
_CF_CONVENTIONS_VERSION = 'CF-1.5'
_FactoryDefn = collections.namedtuple('_FactoryDefn', ('primary', 'std_name',
'formula_terms_format'))
_FACTORY_DEFNS = {
iris.aux_factory.HybridHeightFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_height_coordinate',
formula_terms_format='a: {delta} b: {sigma} orog: {orography}'), }
class CFNameCoordMap(object):
"""Provide a simple CF name to CF coordinate mapping."""
_Map = collections.namedtuple('_Map', ['name', 'coord'])
def __init__(self):
self._map = []
def append(self, name, coord):
"""
Append the given name and coordinate pair to the mapping.
Args:
* name:
CF name of the associated coordinate.
* coord:
The coordinate of the associated CF name.
Returns:
None.
"""
self._map.append(CFNameCoordMap._Map(name, coord))
@property
def names(self):
"""Return all the CF names."""
return [pair.name for pair in self._map]
@property
def coords(self):
"""Return all the coordinates."""
return [pair.coord for pair in self._map]
def name(self, coord):
"""
Return the CF name, given a coordinate
Args:
* coord:
The coordinate of the associated CF name.
Returns:
Coordinate.
"""
result = None
for pair in self._map:
if coord == pair.coord:
result = pair.name
break
if result is None:
msg = 'Coordinate is not mapped, {!r}'.format(coord)
raise KeyError(msg)
return result
def coord(self, name):
"""
Return the coordinate, given a CF name.
Args:
* name:
CF name of the associated coordinate.
Returns:
CF name.
"""
result = None
for pair in self._map:
if name == pair.name:
result = pair.coord
break
if result is None:
msg = 'Name is not mapped, {!r}'.format(name)
raise KeyError(msg)
return result
def _pyke_kb_engine():
"""Return the PyKE knowledge engine for CF->cube conversion."""
pyke_dir = os.path.join(os.path.dirname(__file__), '_pyke_rules')
compile_dir = os.path.join(pyke_dir, 'compiled_krb')
engine = None
if os.path.exists(compile_dir):
tmpvar = [os.path.getmtime(os.path.join(compile_dir, fname)) for
fname in os.listdir(compile_dir) if not
fname.startswith('_')]
if tmpvar:
oldest_pyke_compile_file = min(tmpvar)
rule_age = os.path.getmtime(
os.path.join(pyke_dir, _PYKE_RULE_BASE + '.krb'))
if oldest_pyke_compile_file >= rule_age:
# Initialise the pyke inference engine.
engine = knowledge_engine.engine(
(None, 'iris.fileformats._pyke_rules.compiled_krb'))
if engine is None:
engine = knowledge_engine.engine(iris.fileformats._pyke_rules)
return engine
class NetCDFDataProxy(object):
"""A reference to the data payload of a single NetCDF file variable."""
__slots__ = ('path', 'variable_name')
def __init__(self, path, variable_name):
self.path = path
self.variable_name = variable_name
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.path,
self.variable_name)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def load(self, data_shape, data_type, mdi, deferred_slice):
"""
Load the corresponding proxy data item and perform any deferred
slicing.
Args:
* data_shape (tuple of int):
The data shape of the proxy data item.
* data_type (:class:`numpy.dtype`):
The data type of the proxy data item.
* mdi (float):
The missing data indicator value.
* deferred_slice (tuple):
The deferred slice to be applied to the proxy data item.
Returns:
:class:`numpy.ndarray`
"""
dataset = netCDF4.Dataset(self.path)
variable = dataset.variables[self.variable_name]
# Get the NetCDF variable data and slice.
payload = variable[deferred_slice]
dataset.close()
return payload
def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
engine.provides['coordinates'] = []
# Assert facts for CF coordinates.
for cf_name in cf_group.coordinates.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'coordinate',
(cf_name,))
# Assert facts for CF auxiliary coordinates.
for cf_name in cf_group.auxiliary_coordinates.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'auxiliary_coordinate',
(cf_name,))
# Assert facts for CF grid_mappings.
for cf_name in cf_group.grid_mappings.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'grid_mapping',
(cf_name,))
# Assert facts for CF labels.
for cf_name in cf_group.labels.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'label',
(cf_name,))
# Assert facts for CF formula terms associated with the cf_group
# of the CF data variable.
formula_root = set()
for cf_var in cf.cf_group.formula_terms.itervalues():
for cf_root, cf_term in cf_var.cf_terms_by_root.iteritems():
# Only assert this fact if the formula root variable is
# defined in the CF group of the CF data variable.
if cf_root in cf_group:
formula_root.add(cf_root)
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_term',
(cf_var.cf_name, cf_root,
cf_term))
for cf_root in formula_root:
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_root',
(cf_root,))
def _pyke_stats(engine, cf_name):
if DEBUG:
print '-' * 80
print 'CF Data Variable: %r' % cf_name
engine.print_stats()
print 'Rules Triggered:'
for rule in sorted(list(engine.rule_triggered)):
print '\t%s' % rule
print 'Case Specific Facts:'
kb_facts = engine.get_kb(_PYKE_FACT_BASE)
for key in kb_facts.entity_lists.iterkeys():
for arg in kb_facts.entity_lists[key].case_specific_facts:
print '\t%s%s' % (key, arg)
def _set_attributes(attributes, key, value):
"""Set attributes dictionary, converting unicode strings appropriately."""
if isinstance(value, unicode):
try:
attributes[str(key)] = str(value)
except UnicodeEncodeError:
attributes[str(key)] = value
else:
attributes[str(key)] = value
def _load_cube(engine, cf, cf_var, filename):
"""Create the cube associated with the CF-netCDF data variable."""
# Figure out what the eventual data type will be after any scale/offset
# transforms.
dummy_data = np.zeros(1, dtype=cf_var.dtype)
if hasattr(cf_var, 'scale_factor'):
dummy_data = cf_var.scale_factor * dummy_data
if hasattr(cf_var, 'add_offset'):
dummy_data = cf_var.add_offset + dummy_data
# Create cube with data (not yet deferred), but no metadata
data_proxies = np.array(NetCDFDataProxy(filename, cf_var.cf_name))
data_manager = iris.fileformats.manager.DataManager(cf_var.shape,
dummy_data.dtype,
None)
cube = iris.cube.Cube(data_proxies, data_manager=data_manager)
# Reset the pyke inference engine.
engine.reset()
# Initialise pyke engine rule processing hooks.
engine.cf_var = cf_var
engine.cube = cube
engine.provides = {}
engine.requires = {}
engine.rule_triggered = set()
engine.filename = filename
# Assert any case-specific facts.
_assert_case_specific_facts(engine, cf, cf_var.cf_group)
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get('coordinates', [])
attribute_predicate = lambda item: item[0] not in _CF_ATTRS
for coord, cf_var_name in coordinates:
tmpvar = itertools.ifilter(attribute_predicate,
cf.cf_group[cf_var_name].cf_attrs_unused())
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
tmpvar = itertools.ifilter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
# Show pyke session statistics.
_pyke_stats(engine, cf_var.cf_name)
return cube
def _load_aux_factory(engine, cf, filename, cube):
"""
Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory.
"""
formula_type = engine.requires.get('formula_type')
if formula_type == 'atmosphere_hybrid_height_coordinate':
def coord_from_var_name(name):
mapping = engine.provides['coordinates']
for coord, cf_var_name in engine.provides['coordinates']:
if cf_var_name == name:
return coord
raise ValueError('Unable to find coordinate for variable '
'{!r}'.format(name))
# Convert term names to coordinates (via netCDF variable names).
terms_to_var_names = engine.requires['formula_terms']
delta = coord_from_var_name(terms_to_var_names['a'])
sigma = coord_from_var_name(terms_to_var_names['b'])
orography = coord_from_var_name(terms_to_var_names['orog'])
factory = iris.aux_factory.HybridHeightFactory(delta, sigma, orography)
cube.add_aux_factory(factory)
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of NetCDF filenames/URLs.
Args:
* filenames (string/list):
One or more NetCDF filenames/DAP URLs to load from.
Kwargs:
* callback (callable function):
Function which can be passed on to :func:`iris.io.run_callback`.
Returns:
Generator of loaded NetCDF :class:`iris.cubes.Cube`.
"""
# Initialise the pyke inference engine.
engine = _pyke_kb_engine()
if isinstance(filenames, basestring):
filenames = [filenames]
for filename in filenames:
# Ingest the netCDF file.
cf = iris.fileformats.cf.CFReader(filename)
# Process each CF data variable.
for cf_var in cf.cf_group.data_variables.itervalues():
# Only process CF data variables that do not participate in a
# formula term.
if not cf_var.has_formula_terms():
cube = _load_cube(engine, cf, cf_var, filename)
# Process any associated formula terms and attach
# the corresponding AuxCoordFactory.
_load_aux_factory(engine, cf, filename, cube)
# Perform any user registered callback function.
cube = iris.io.run_callback(callback, cube, engine.cf_var,
filename)
# Callback mechanism may return None, which must not be yielded
if cube is None:
continue
yield cube
class Saver(object):
"""A manager for saving netcdf files."""
def __init__(self, filename, netcdf_format):
"""
A manager for saving netcdf files.
Args:
* filename (string):
Name of the netCDF file to save the cube.
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
Returns:
None.
For example::
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube)
"""
if netcdf_format not in ['NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
raise ValueError('Unknown netCDF file format, got %r' %
netcdf_format)
# All persistent variables
#: CF name mapping with iris coordinates
self._name_coord_map = CFNameCoordMap()
#: List of dimension coordinates added to the file
self._dim_coords = []
#: List of grid mappings added to the file
self._coord_systems = []
#: A dictionary, listing dimension names and corresponding length
self._existing_dim = {}
#: NetCDF dataset
try:
self._dataset = netCDF4.Dataset(filename, mode='w',
format=netcdf_format)
except RuntimeError:
dir_name = os.path.dirname(filename)
if not os.path.isdir(dir_name):
msg = 'No such file or directory: {}'.format(dir_name)
raise IOError(msg)
if not os.access(dir_name, os.R_OK | os.W_OK):
msg = 'Permission denied: {}'.format(filename)
raise IOError(msg)
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Flush any buffered data to the CF-netCDF file before closing."""
self._dataset.sync()
self._dataset.close()
def write(self, cube, local_keys=None):
"""
Wrapper for saving cubes to a NetCDF file.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
Returns:
None.
"""
if len(cube.aux_factories) > 1:
raise ValueError('Multiple auxiliary factories are not supported.')
cf_profile_available = (
'cf_profile' in iris.site_configuration and
iris.site_configuration['cf_profile'] not in [None, False])
if cf_profile_available:
# Perform a CF profile of the cube. This may result in an exception
# being raised if mandatory requirements are not satisfied.
profile = iris.site_configuration['cf_profile'](cube)
# Get suitable dimension names.
dimension_names = self._get_dim_names(cube)
# Create the CF-netCDF data dimensions.
self._create_cf_dimensions(dimension_names)
# Create the associated cube CF-netCDF data variable.
cf_var_cube = self._create_cf_data_variable(cube, dimension_names,
local_keys)
# Add coordinate variables and return factory definitions
factory_defn = self._add_dim_coords(cube, dimension_names)
# Add the auxiliary coordinate variable names and associate the data
# variable to them
cf_var_cube = self._add_aux_coords(cube, cf_var_cube, dimension_names,
factory_defn)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add global attributes taking into account local_keys.
global_attributes = {k: v for k, v in cube.attributes.iteritems() if k
not in local_keys and k.lower() != 'conventions'}
self.update_global_attributes(global_attributes)
if cf_profile_available:
# Perform a CF patch of the dataset.
iris.site_configuration['cf_patch'](profile, self._dataset,
cf_var_cube)
def update_global_attributes(self, attributes=None, **kwargs):
"""
Update the CF global attributes based on the provided
iterable/dictionary and/or keyword arguments.
Args:
* attributes (dict or iterable of key, value pairs):
CF global attributes to be updated.
"""
if attributes is not None:
# Handle sequence e.g. [('fruit', 'apple'), ...].
if not hasattr(attributes, 'keys'):
attributes = dict(attributes)
for attr_name in sorted(attributes):
setattr(self._dataset, attr_name, attributes[attr_name])
for attr_name in sorted(kwargs):
setattr(self._dataset, attr_name, kwargs[attr_name])
def _create_cf_dimensions(self, dimension_names):
"""
Create the CF-netCDF data dimensions.
Create the CF-netCDF data dimensions, making the outermost dimension
an unlimited dimension.
Args:
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
None.
"""
if dimension_names:
if dimension_names[0] not in self._dataset.dimensions:
self._dataset.createDimension(dimension_names[0], None)
for dim_name in dimension_names[1:]:
if dim_name not in self._dataset.dimensions:
self._dataset.createDimension(dim_name,
self._existing_dim[dim_name])
def _add_aux_coords(self, cube, cf_var_cube, dimension_names,
factory_defn):
"""
Add aux. coordinate to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
* factory_defn (:class:`_FactoryDefn`):
An optional description of the AuxCoordFactory relevant to this
cube.
Returns:
Updated cf_var_cube with coordinates added.
"""
auxiliary_coordinate_names = []
# Add CF-netCDF variables for the associated auxiliary coordinates.
for coord in sorted(cube.aux_coords, key=lambda coord: coord.name()):
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord, factory_defn)
self._name_coord_map.append(cf_name, coord)
else:
cf_name = self._name_coord_map.name(coord)
if cf_name is not None:
auxiliary_coordinate_names.append(cf_name)
# Add CF-netCDF auxiliary coordinate variable references to the
# CF-netCDF data variable.
if auxiliary_coordinate_names:
cf_var_cube.coordinates = ' '.join(
sorted(auxiliary_coordinate_names))
return cf_var_cube
def _add_dim_coords(self, cube, dimension_names):
"""
Add coordinate variables to NetCDF dataset.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
Factory definitions, a description of the AuxCoordFactory relevant
to this cube.
"""
factory_defn = None
if cube.aux_factories:
factory = cube.aux_factories[0]
factory_defn = _FACTORY_DEFNS.get(type(factory), None)
# Ensure we create the netCDF coordinate variables first.
for coord in cube.dim_coords:
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord, factory_defn)
self._name_coord_map.append(cf_name, coord)
return factory_defn
def _get_dim_names(self, cube):
"""
Determine suitable CF-netCDF data dimension names.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
Returns:
List of dimension names with length equal the number of dimensions
in the cube.
"""
dimension_names = []
for dim in xrange(cube.ndim):
coords = cube.coords(dimensions=dim, dim_coords=True)
if coords:
coord = coords[0]
dim_name = self._get_coord_variable_name(cube, coord)
# Add only dimensions that have not already been added.
if coord not in self._dim_coords:
# Determine unique dimension name
while (dim_name in self._existing_dim or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update names added, current cube dim names used and
# unique coordinates added.
self._existing_dim[dim_name] = coord.shape[0]
dimension_names.append(dim_name)
self._dim_coords.append(coord)
else:
# Return the dim_name associated with the existing
# coordinate.
dim_name = self._name_coord_map.name(coord)
dimension_names.append(dim_name)
else:
# No CF-netCDF coordinates describe this data dimension.
dim_name = 'dim%d' % dim
if dim_name in self._existing_dim:
# Increment name if conflicted with one already existing.
if self._existing_dim[dim_name] != cube.shape[dim]:
while (dim_name in self._existing_dim and
self._existing_dim[dim_name] !=
cube.shape[dim] or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
else:
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
dimension_names.append(dim_name)
return dimension_names
def _cf_coord_identity(self, coord):
"""
Determine a suitable units from a given coordinate.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
Returns:
The (standard_name, long_name, unit) of the given
:class:`iris.coords.Coord` instance.
"""
units = str(coord.units)
# TODO: Use #61 to get the units.
if isinstance(coord.coord_system, iris.coord_systems.GeogCS):
if "latitude" in coord.standard_name:
units = 'degrees_north'
elif "longitude" in coord.standard_name:
units = 'degrees_east'
elif isinstance(coord.coord_system, iris.coord_systems.RotatedGeogCS):
units = 'degrees'
elif isinstance(coord.coord_system,
iris.coord_systems.TransverseMercator):
units = 'm'
return coord.standard_name, coord.long_name, units
def _ensure_valid_dtype(self, values, src_name, src_object):
# NetCDF3 does not support int64 or unsigned ints, so we check
# if we can store them as int32 instead.
if ((np.issubdtype(values.dtype, np.int64) or
np.issubdtype(values.dtype, np.unsignedinteger)) and
self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Cast to an integer type supported by netCDF3.
if not np.can_cast(values.max(), np.int32) or \
not np.can_cast(values.min(), np.int32):
msg = 'The data type of {} {!r} is not supported by {} and' \
' its values cannot be safely cast to a supported' \
' integer type.'
msg = msg.format(src_name, src_object,
self._dataset.file_format)
raise ValueError(msg)
values = values.astype(np.int32)
return values
def _create_cf_bounds(self, coord, cf_var, cf_name):
"""
Create the associated CF-netCDF bounds variable.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
* cf_var:
CF-netCDF variable
* cf_name (string):
name of the CF-NetCDF variable.
Returns:
None
"""
if coord.has_bounds():
# Get the values in a form which is valid for the file format.
bounds = self._ensure_valid_dtype(coord.bounds,
'the bounds of coordinate',
coord)
n_bounds = bounds.shape[-1]
if n_bounds == 2:
bounds_dimension_name = 'bnds'
else:
bounds_dimension_name = 'bnds_%s' % n_bounds
if bounds_dimension_name not in self._dataset.dimensions:
# Create the bounds dimension with the appropriate extent.
self._dataset.createDimension(bounds_dimension_name, n_bounds)
cf_var.bounds = cf_name + '_bnds'
cf_var_bounds = self._dataset.createVariable(
cf_var.bounds, bounds.dtype.newbyteorder('='),
cf_var.dimensions + (bounds_dimension_name,))
cf_var_bounds[:] = bounds
def _get_cube_variable_name(self, cube):
"""
Returns a CF-netCDF variable name for the given cube.
Args:
* cube (class:`iris.cube.Cube`):
An instance of a cube for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if cube.var_name is not None:
cf_name = cube.var_name
else:
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(cube.name().lower().split())
return cf_name
def _get_coord_variable_name(self, cube, coord):
"""
Returns a CF-netCDF variable name for the given coordinate.
Args:
* cube (:class:`iris.cube.Cube`):
The cube that contains the given coordinate.
* coord (:class:`iris.coords.Coord`):
An instance of a coordinate for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if coord.var_name is not None:
cf_name = coord.var_name
else:
name = coord.standard_name or coord.long_name
if not name or set(name).intersection(string.whitespace):
# Auto-generate name based on associated dimensions.
name = ''
for dim in cube.coord_dims(coord):
name += 'dim{}'.format(dim)
# Handle scalar coordinate (dims == ()).
if not name:
name = 'unknown_scalar'
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(name.lower().split())
return cf_name
def _create_cf_variable(self, cube, dimension_names, coord, factory_defn):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given coordinate. If required, also create the CF-netCDF bounds
variable and associated dimension.
Args:
* dataset (:class:`netCDF4.Dataset`):
The CF-netCDF data file being created.
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* coord (:class:`iris.coords.Coord`):
The coordinate to be saved to CF-netCDF file.
* factory_defn (:class:`_FactoryDefn`):
An optional description of the AuxCoordFactory relevant to this
cube.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, coord)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [dimension_names[dim] for dim in
cube.coord_dims(coord)]
if np.issubdtype(coord.points.dtype, np.str):
string_dimension_depth = coord.points.dtype.itemsize
string_dimension_name = 'string%d' % string_dimension_depth
# Determine whether to create the string length dimension.
if string_dimension_name not in self._dataset.dimensions:
self._dataset.createDimension(string_dimension_name,
string_dimension_depth)
# Add the string length dimension to dimension names.
cf_dimensions.append(string_dimension_name)
# Create the label coordinate variable.
cf_var = self._dataset.createVariable(cf_name, '|S1',
cf_dimensions)
# Add the payload to the label coordinate variable.
if len(cf_dimensions) == 1:
cf_var[:] = list('%- *s' % (string_dimension_depth,
coord.points[0]))
else:
for index in np.ndindex(coord.points.shape):
index_slice = tuple(list(index) + [slice(None, None)])
cf_var[index_slice] = list('%- *s' %
(string_dimension_depth,
coord.points[index]))
else:
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
if coord in cf_coordinates:
# By definition of a CF-netCDF coordinate variable this
# coordinate must be 1-D and the name of the CF-netCDF variable
# must be the same as its dimension name.
cf_name = cf_dimensions[0]
# Get the values in a form which is valid for the file format.
points = self._ensure_valid_dtype(coord.points, 'coordinate',
coord)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, points.dtype.newbyteorder('='), cf_dimensions)
# Add the axis attribute for spatio-temporal CF-netCDF coordinates.
if coord in cf_coordinates:
axis = iris.util.guess_coord_axis(coord)
if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
cf_var.axis = axis.upper()
# Add the data to the CF-netCDF variable.
cf_var[:] = points
# Create the associated CF-netCDF bounds variable.
self._create_cf_bounds(coord, cf_var, cf_name)
# Deal with CF-netCDF units and standard name.
standard_name, long_name, units = self._cf_coord_identity(coord)
# If this coordinate should describe a dimensionless vertical
# coordinate, then override `standard_name`, `long_name`, and `axis`,
# and also set the `formula_terms` attribute.
if factory_defn:
dependencies = cube.aux_factories[0].dependencies
if coord is dependencies[factory_defn.primary]:
standard_name = factory_defn.std_name
cf_var.axis = 'Z'
fmt = factory_defn.formula_terms_format
names = {key: coord.name() for key, coord in
dependencies.iteritems()}
formula_terms = fmt.format(**names)
cf_var.formula_terms = formula_terms
if units != 'unknown':
cf_var.units = units
if standard_name is not None:
cf_var.standard_name = standard_name
if long_name is not None:
cf_var.long_name = long_name
# Add the CF-netCDF calendar attribute.
if coord.units.calendar:
cf_var.calendar = coord.units.calendar
# Add any other custom coordinate attributes.
for name in sorted(coord.attributes):
value = coord.attributes[name]
if name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
name = 'ukmo__um_stash_source'
value = str(value)
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
setattr(cf_var, name, value)
return cf_name
def _create_cf_cell_methods(self, cube, dimension_names):
"""
Create CF-netCDF string representation of a cube cell methods.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
CF-netCDF string representation of a cube cell methods.
"""
cell_methods = []
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
for cm in cube.cell_methods:
names = ''
for name in cm.coord_names:
coord = cube.coords(name)
if coord:
coord = coord[0]
if coord in cf_coordinates:
name = dimension_names[cube.coord_dims(coord)[0]]
names += '%s: ' % name
interval = ' '.join(['interval: %s' % interval for interval in
cm.intervals or []])
comment = ' '.join(['comment: %s' % comment for comment in
cm.comments or []])
extra = ' '.join([interval, comment]).strip()
if extra:
extra = ' (%s)' % extra
cell_methods.append(names + cm.method + extra)
return ' '.join(cell_methods)
def _create_cf_grid_mapping(self, cube, cf_var_cube):
"""
Create CF-netCDF grid mapping variable and associated CF-netCDF
data variable grid mapping attribute.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
Returns:
None
"""
cs = cube.coord_system('CoordSystem')
if cs is not None:
# Grid var not yet created?
if cs not in self._coord_systems:
while cs.grid_mapping_name in self._dataset.variables:
cs.grid_mapping_name = (
self._increment_name(cs.grid_mapping_name))
cf_var_grid = self._dataset.createVariable(
cs.grid_mapping_name, np.int32)
cf_var_grid.grid_mapping_name = cs.grid_mapping_name
def add_ellipsoid():
if cs.ellipsoid:
cf_var_grid.longitude_of_prime_meridian = (
cs.ellipsoid.longitude_of_prime_meridian)
cf_var_grid.semi_major_axis = (
cs.ellipsoid.semi_major_axis)
cf_var_grid.semi_minor_axis = (
cs.ellipsoid.semi_minor_axis)
# latlon
if isinstance(cs, iris.coord_systems.GeogCS):
cf_var_grid.longitude_of_prime_meridian = (
cs.longitude_of_prime_meridian)
cf_var_grid.semi_major_axis = cs.semi_major_axis
cf_var_grid.semi_minor_axis = cs.semi_minor_axis
# rotated latlon
elif isinstance(cs, iris.coord_systems.RotatedGeogCS):
add_ellipsoid()
cf_var_grid.grid_north_pole_latitude = (
cs.grid_north_pole_latitude)
cf_var_grid.grid_north_pole_longitude = (
cs.grid_north_pole_longitude)
cf_var_grid.north_pole_grid_longitude = (
cs.north_pole_grid_longitude)
# tmerc
elif isinstance(cs, iris.coord_systems.TransverseMercator):
add_ellipsoid()
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_central_meridian = (
cs.scale_factor_at_central_meridian)
# osgb (a specific tmerc)
elif isinstance(cs, iris.coord_systems.OSGB):
warnings.warn('OSGB coordinate system not yet handled')
# other
else:
warnings.warn('Unable to represent the horizontal '
'coordinate system. The coordinate system '
'type %r is not yet implemented.' % type(cs))
self._coord_systems.append(cs)
# Refer to grid var
cf_var_cube.grid_mapping = cs.grid_mapping_name
def _create_cf_data_variable(self, cube, dimension_names, local_keys=None):
"""
Create CF-netCDF data variable for the cube and any associated grid
mapping.
Args:
* dataset (:class:`netCDF4.Dataset`):
The CF-netCDF data file being created.
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
String names for each dimension of the cube.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes
with matching keys will become attributes on the data variable.
Returns:
The newly created CF-netCDF data variable.
"""
cf_name = self._get_cube_variable_name(cube)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Determine whether there is a cube MDI value.
fill_value = None
if isinstance(cube.data, ma.core.MaskedArray):
fill_value = cube.data.fill_value
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cube.data, 'cube', cube)
# Create the cube CF-netCDF data variable with data payload.
cf_var = self._dataset.createVariable(cf_name,
data.dtype.newbyteorder('='),
dimension_names,
fill_value=fill_value)
cf_var[:] = data
if cube.standard_name:
cf_var.standard_name = cube.standard_name
if cube.long_name:
cf_var.long_name = cube.long_name
if cube.units != 'unknown':
cf_var.units = str(cube.units)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add any cube attributes whose keys are in local_keys as
# CF-netCDF data variable attributes.
attr_names = set(cube.attributes).intersection(local_keys)
for attr_name in sorted(attr_names):
# Do not output 'conventions' attribute.
if attr_name.lower() == 'conventions':
continue
value = cube.attributes[attr_name]
if attr_name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
attr_name = 'ukmo__um_stash_source'
value = str(value)
if attr_name == "ukmo__process_flags":
value = " ".join([x.replace(" ", "_") for x in value])
if attr_name in _CF_GLOBAL_ATTRS:
msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
warnings.warn(msg)
setattr(cf_var, attr_name, value)
# Create the CF-netCDF data variable cell method attribute.
cell_methods = self._create_cf_cell_methods(cube, dimension_names)
if cell_methods:
cf_var.cell_methods = cell_methods
# Create the CF-netCDF grid mapping.
self._create_cf_grid_mapping(cube, cf_var)
return cf_var
def _increment_name(self, varname):
"""
Increment string name or begin increment.
Avoidance of conflicts between variable names, where the name is
incremented to distinguish it from others.
Args:
* varname (string):
Variable name to increment.
Returns:
Incremented varname.
"""
num = 0
try:
name, endnum = varname.rsplit('_', 1)
if endnum.isdigit():
num = int(endnum) + 1
varname = name
except ValueError:
pass
return '{}_{}'.format(varname, num)
def save(cube, filename, netcdf_format='NETCDF4', local_keys=None):
"""
Save cube(s) to a netCDF file, given the cube and the filename.
* Iris will write CF 1.5 compliant NetCDF files.
* The attributes dictionaries on each cube in the saved cube list
will be compared and common attributes saved as NetCDF global
attributes where appropriate.
Args:
* cube (:class:`iris.cube.Cube` or :class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or other
iterable of cubes to be saved to a netCDF file.
* filename (string):
Name of the netCDF file to save the cube(s).
Kwargs:
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
Returns:
None.
.. seealso::
NetCDF Context manager (:class:`~Saver`).
"""
if isinstance(cube, iris.cube.Cube):
cubes = iris.cube.CubeList()
cubes.append(cube)
else:
cubes = cube
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
# Determine the attribute keys that are common across all cubes and
# thereby extend the collection of local_keys for attributes
# that should be attributes on data variables.
attributes = cubes[0].attributes
common_keys = set(attributes)
for cube in cubes[1:]:
keys = set(cube.attributes)
local_keys.update(keys.symmetric_difference(common_keys))
common_keys.intersection_update(keys)
different_value_keys = []
for key in common_keys:
if attributes[key] != cube.attributes[key]:
different_value_keys.append(key)
common_keys.difference_update(different_value_keys)
local_keys.update(different_value_keys)
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube, local_keys)
# Add conventions attribute.
sman.update_global_attributes(Conventions=_CF_CONVENTIONS_VERSION)
|
kwilliams-mo/iris
|
lib/iris/fileformats/netcdf.py
|
Python
|
gpl-3.0
| 49,965
|
[
"NetCDF"
] |
1dec12d4d2204304a5aabf7047c218270074cce7b0a190a633f7d070cf6f1725
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from skbio import OrdinationResults
class TestOrdinationResults(unittest.TestCase):
def setUp(self):
# Define in-memory CA results to serialize and deserialize.
eigvals = pd.Series([0.0961330159181, 0.0409418140138], ['CA1', 'CA2'])
features = np.array([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]])
samples = np.array([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]])
features_ids = ['Species1', 'Species2', 'Species3']
sample_ids = ['Site1', 'Site2', 'Site3']
samples_df = pd.DataFrame(samples, index=sample_ids,
columns=['CA1', 'CA2'])
features_df = pd.DataFrame(features, index=features_ids,
columns=['CA1', 'CA2'])
self.ordination_results = OrdinationResults(
'CA', 'Correspondance Analysis', eigvals=eigvals,
samples=samples_df, features=features_df)
# DataFrame for testing plot method. Has a categorical column with a
# mix of numbers and strings. Has a numeric column with a mix of ints,
# floats, and strings that can be converted to floats. Has a numeric
# column with missing data (np.nan).
self.df = pd.DataFrame([['foo', '42', 10],
[22, 0, 8],
[22, -4.2, np.nan],
['foo', '42.19', 11]],
index=['A', 'B', 'C', 'D'],
columns=['categorical', 'numeric', 'nancolumn'])
# Minimal ordination results for easier testing of plotting method.
# Paired with df above.
eigvals = np.array([0.50, 0.25, 0.25])
samples = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
samples_df = pd.DataFrame(samples, ['A', 'B', 'C', 'D'],
['PC1', 'PC2', 'PC3'])
self.min_ord_results = OrdinationResults(
'PCoA', 'Principal Coordinate Analysis', eigvals, samples_df)
def test_str(self):
exp = ("Ordination results:\n"
"\tMethod: Correspondance Analysis (CA)\n"
"\tEigvals: 2\n"
"\tProportion explained: N/A\n"
"\tFeatures: 3x2\n"
"\tSamples: 3x2\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: 'Species1', 'Species2', 'Species3'\n"
"\tSample IDs: 'Site1', 'Site2', 'Site3'")
obs = str(self.ordination_results)
self.assertEqual(obs, exp)
# all optional attributes missing
exp = ("Ordination results:\n"
"\tMethod: Principal Coordinate Analysis (PCoA)\n"
"\tEigvals: 1\n"
"\tProportion explained: N/A\n"
"\tFeatures: N/A\n"
"\tSamples: 2x1\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: N/A\n"
"\tSample IDs: 0, 1")
samples_df = pd.DataFrame(np.array([[1], [2]]))
obs = str(OrdinationResults('PCoA', 'Principal Coordinate Analysis',
pd.Series(np.array([4.2])), samples_df))
self.assertEqual(obs.split('\n'), exp.split('\n'))
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
exp_legend_exists, exp_xlabel, exp_ylabel,
exp_zlabel):
# check type
self.assertIsInstance(fig, mpl.figure.Figure)
# check number of subplots
axes = fig.get_axes()
npt.assert_equal(len(axes), exp_num_subplots)
# check title
ax = axes[0]
npt.assert_equal(ax.get_title(), exp_title)
# shouldn't have tick labels
for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
ax.get_zticklabels()):
npt.assert_equal(tick_label.get_text(), '')
# check if legend is present
legend = ax.get_legend()
if exp_legend_exists:
self.assertTrue(legend is not None)
else:
self.assertTrue(legend is None)
# check axis labels
npt.assert_equal(ax.get_xlabel(), exp_xlabel)
npt.assert_equal(ax.get_ylabel(), exp_ylabel)
npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_plot_no_metadata(self):
fig = self.min_ord_results.plot()
self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
def test_plot_with_numeric_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'numeric', axes=(1, 0, 2),
axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
self.check_basic_figure_sanity(
fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
def test_plot_with_categorical_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'categorical', axes=[2, 0, 1], title='a title',
cmap='Accent')
self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
def test_plot_with_invalid_axis_labels(self):
with self.assertRaisesRegex(ValueError, 'axis_labels.*4'):
self.min_ord_results.plot(axes=[2, 0, 1],
axis_labels=('a', 'b', 'c', 'd'))
def test_validate_plot_axes_valid_input(self):
# shouldn't raise an error on valid input. nothing is returned, so
# nothing to check here
samples = self.min_ord_results.samples.values.T
self.min_ord_results._validate_plot_axes(samples, (1, 2, 0))
def test_validate_plot_axes_invalid_input(self):
# not enough dimensions
with self.assertRaisesRegex(ValueError, '2 dimension\(s\)'):
self.min_ord_results._validate_plot_axes(
np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
coord_matrix = self.min_ord_results.samples.values.T
# wrong number of axes
with self.assertRaisesRegex(ValueError, 'exactly three.*found 0'):
self.min_ord_results._validate_plot_axes(coord_matrix, [])
with self.assertRaisesRegex(ValueError, 'exactly three.*found 4'):
self.min_ord_results._validate_plot_axes(coord_matrix,
(0, 1, 2, 3))
# duplicate axes
with self.assertRaisesRegex(ValueError, 'must be unique'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
# out of range axes
with self.assertRaisesRegex(ValueError, 'axes\[1\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
with self.assertRaisesRegex(ValueError, 'axes\[2\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
def test_get_plot_point_colors_invalid_input(self):
# column provided without df
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(None, 'numeric',
['B', 'C'], 'jet')
# df provided without column
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(self.df, None,
['B', 'C'], 'jet')
# column not in df
with self.assertRaisesRegex(ValueError, 'missingcol'):
self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
['B', 'C'], 'jet')
# id not in df
with self.assertRaisesRegex(ValueError, 'numeric'):
self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
# missing data in df
with self.assertRaisesRegex(ValueError, 'nancolumn'):
self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
['B', 'C', 'A'], 'jet')
def test_get_plot_point_colors_no_df_or_column(self):
obs = self.min_ord_results._get_plot_point_colors(None, None,
['B', 'C'], 'jet')
npt.assert_equal(obs, (None, None))
def test_get_plot_point_colors_numeric_column(self):
# subset of the ids in df
exp = [0.0, -4.2, 42.0]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp)
self.assertTrue(obs[1] is None)
# all ids in df
exp = [0.0, 42.0, 42.19, -4.2]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp)
self.assertTrue(obs[1] is None)
def test_get_plot_point_colors_categorical_column(self):
# subset of the ids in df
exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
exp_color_dict = {
'foo': [0.5, 0., 0., 1.],
22: [0., 0., 0.5, 1.]
}
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
npt.assert_equal(obs[1], exp_color_dict)
# all ids in df
exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
[0., 0., 0.5, 1.]]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
# should get same color dict as before
npt.assert_equal(obs[1], exp_color_dict)
def test_plot_categorical_legend(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# we shouldn't have a legend yet
self.assertTrue(ax.get_legend() is None)
self.min_ord_results._plot_categorical_legend(
ax, {'foo': 'red', 'bar': 'green'})
# make sure we have a legend now
legend = ax.get_legend()
self.assertTrue(legend is not None)
# do some light sanity checking to make sure our input labels and
# colors are present. we're not using nose.tools.assert_items_equal
# because it isn't available in Python 3.
labels = [t.get_text() for t in legend.get_texts()]
npt.assert_equal(sorted(labels), ['bar', 'foo'])
colors = [l.get_color() for l in legend.get_lines()]
npt.assert_equal(sorted(colors), ['green', 'red'])
def test_repr_png(self):
obs = self.min_ord_results._repr_png_()
self.assertIsInstance(obs, bytes)
self.assertTrue(len(obs) > 0)
def test_repr_svg(self):
obs = self.min_ord_results._repr_svg_()
self.assertIsInstance(obs, str)
self.assertTrue(len(obs) > 0)
def test_png(self):
self.assertIsInstance(self.min_ord_results.png, Image)
def test_svg(self):
self.assertIsInstance(self.min_ord_results.svg, SVG)
if __name__ == '__main__':
unittest.main()
|
kdmurray91/scikit-bio
|
skbio/stats/ordination/tests/test_ordination_results.py
|
Python
|
bsd-3-clause
| 12,214
|
[
"scikit-bio"
] |
a3b6b76903748290e8d989cc01e4c139167f30fdc4db321fd09ccdea70c98b66
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import os
import warnings
from string import Template
from six import string_types
from six.moves import zip
import numpy as np
from monty.io import zopen
from pymatgen.core.structure import Molecule, Structure
from monty.json import MSONable
from pymatgen.core.units import Energy
from pymatgen.core.units import FloatWithUnit
from pymatgen.analysis.excitation import ExcitationSpectrum
"""
This module implements input and output processing from Nwchem.
2015/09/21 - Xin Chen (chenxin13@mails.tsinghua.edu.cn):
NwOutput will read new kinds of data:
1. normal hessian matrix. ["hessian"]
2. projected hessian matrix. ["projected_hessian"]
3. normal frequencies. ["normal_frequencies"]
For backward compatibility, the key for accessing the projected frequencies
is still 'frequencies'.
2015/10/12 - Xin Chen
NwOutput will read new kinds of data:
1. forces. ["forces"]
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "6/5/13"
NWCHEM_BASIS_LIBRARY = None
if os.environ.get("NWCHEM_BASIS_LIBRARY"):
NWCHEM_BASIS_LIBRARY = set(os.listdir(os.environ["NWCHEM_BASIS_LIBRARY"]))
class NwTask(MSONable):
"""
Base task for Nwchem.
"""
theories = {"g3gn": "some description",
"scf": "Hartree-Fock",
"dft": "DFT",
"esp": "ESP",
"sodft": "Spin-Orbit DFT",
"mp2": "MP2 using a semi-direct algorithm",
"direct_mp2": "MP2 using a full-direct algorithm",
"rimp2": "MP2 using the RI approximation",
"ccsd": "Coupled-cluster single and double excitations",
"ccsd(t)": "Coupled-cluster linearized triples approximation",
"ccsd+t(ccsd)": "Fourth order triples contribution",
"mcscf": "Multiconfiguration SCF",
"selci": "Selected CI with perturbation correction",
"md": "Classical molecular dynamics simulation",
"pspw": "Pseudopotential plane-wave DFT for molecules and "
"insulating solids using NWPW",
"band": "Pseudopotential plane-wave DFT for solids using NWPW",
"tce": "Tensor Contraction Engine",
"tddft": "Time Dependent DFT"}
operations = {"energy": "Evaluate the single point energy.",
"gradient": "Evaluate the derivative of the energy with "
"respect to nuclear coordinates.",
"optimize": "Minimize the energy by varying the molecular "
"structure.",
"saddle": "Conduct a search for a transition state (or "
"saddle point).",
"hessian": "Compute second derivatives.",
"frequencies": "Compute second derivatives and print out an "
"analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"vscf": "Compute anharmonic contributions to the "
"vibrational modes.",
"property": "Calculate the properties for the wave "
"function.",
"dynamics": "Perform classical molecular dynamics.",
"thermodynamics": "Perform multi-configuration "
"thermodynamic integration using "
"classical MD.",
"": "dummy"}
def __init__(self, charge, spin_multiplicity, basis_set,
basis_set_option="cartesian",
title=None, theory="dft", operation="optimize",
theory_directives=None, alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set used for the task as a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"}.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations and dielectric
constant of 78, you'd supply {'cosmo': {"dielectric": 78}}.
"""
# Basic checks.
if theory.lower() not in NwTask.theories.keys():
raise NwInputError("Invalid theory {}".format(theory))
if operation.lower() not in NwTask.operations.keys():
raise NwInputError("Invalid operation {}".format(operation))
self.charge = charge
self.spin_multiplicity = spin_multiplicity
self.title = title if title is not None else "{} {}".format(theory,
operation)
self.theory = theory
self.basis_set = basis_set or {}
if NWCHEM_BASIS_LIBRARY is not None:
for b in set(self.basis_set.values()):
if re.sub(r'\*', "s", b.lower()) not in NWCHEM_BASIS_LIBRARY:
warnings.warn(
"Basis set %s not in in NWCHEM_BASIS_LIBRARY" % b)
self.basis_set_option = basis_set_option
self.operation = operation
self.theory_directives = theory_directives or {}
self.alternate_directives = alternate_directives or {}
def __str__(self):
bset_spec = []
for el, bset in sorted(self.basis_set.items(), key=lambda x: x[0]):
bset_spec.append(" {} library \"{}\"".format(el, bset))
theory_spec = []
if self.theory_directives:
theory_spec.append("{}".format(self.theory))
for k in sorted(self.theory_directives.keys()):
theory_spec.append(" {} {}".format(k, self.theory_directives[
k]))
theory_spec.append("end")
for k in sorted(self.alternate_directives.keys()):
theory_spec.append(k)
for k2 in sorted(self.alternate_directives[k].keys()):
theory_spec.append(" {} {}".format(
k2, self.alternate_directives[k][k2]))
theory_spec.append("end")
t = Template("""title "$title"
charge $charge
basis $basis_set_option
$bset_spec
end
$theory_spec
""")
output = t.substitute(
title=self.title, charge=self.charge,
spinmult=self.spin_multiplicity,
basis_set_option=self.basis_set_option,
bset_spec="\n".join(bset_spec),
theory_spec="\n".join(theory_spec),
theory=self.theory)
if self.operation is not None:
output += "task %s %s" % (self.theory, self.operation)
return output
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"title": self.title, "theory": self.theory,
"operation": self.operation, "basis_set": self.basis_set,
"basis_set_option": self.basis_set_option,
"theory_directives": self.theory_directives,
"alternate_directives": self.alternate_directives}
@classmethod
def from_dict(cls, d):
return NwTask(charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
title=d["title"], theory=d["theory"],
operation=d["operation"], basis_set=d["basis_set"],
basis_set_option=d['basis_set_option'],
theory_directives=d["theory_directives"],
alternate_directives=d["alternate_directives"])
@classmethod
def from_molecule(cls, mol, theory, charge=None, spin_multiplicity=None,
basis_set="6-31g", basis_set_option="cartesian",
title=None, operation="optimize", theory_directives=None,
alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
basis_set_option: cartesian (default) | spherical,
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else "{} {} {}".format(
re.sub(r"\s", "", mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = - charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
charge, spin_multiplicity))
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, string_types):
basis_set = {el: basis_set for el in elements}
basis_set_option = basis_set_option
return NwTask(charge, spin_multiplicity, basis_set,
basis_set_option=basis_set_option,
title=title, theory=theory, operation=operation,
theory_directives=theory_directives,
alternate_directives=alternate_directives)
@classmethod
def dft_task(cls, mol, xc="b3lyp", **kwargs):
"""
A class method for quickly creating DFT tasks with optional
cosmo parameter .
Args:
mol: Input molecule
xc: Exchange correlation to use.
\\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
t = NwTask.from_molecule(mol, theory="dft", **kwargs)
t.theory_directives.update({"xc": xc,
"mult": t.spin_multiplicity})
return t
@classmethod
def esp_task(cls, mol, **kwargs):
"""
A class method for quickly creating ESP tasks with RESP
charge fitting.
Args:
mol: Input molecule
\\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
return NwTask.from_molecule(mol, theory="esp", **kwargs)
class NwInput(MSONable):
"""
An object representing a Nwchem input file, which is essentially a list
of tasks on a particular molecule.
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
tasks: List of NwTasks.
directives: List of root level directives as tuple. E.g.,
[("start", "water"), ("print", "high")]
geometry_options: Additional list of options to be supplied to the
geometry. E.g., ["units", "angstroms", "noautoz"]. Defaults to
("units", "angstroms").
symmetry_options: Addition list of option to be supplied to the
symmetry. E.g. ["c1"] to turn off the symmetry
memory_options: Memory controlling options. str.
E.g "total 1000 mb stack 400 mb"
"""
def __init__(self, mol, tasks, directives=None,
geometry_options=("units", "angstroms"),
symmetry_options=None,
memory_options=None):
self._mol = mol
self.directives = directives if directives is not None else []
self.tasks = tasks
self.geometry_options = geometry_options
self.symmetry_options = symmetry_options
self.memory_options = memory_options
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
def __str__(self):
o = []
if self.memory_options:
o.append('memory ' + self.memory_options)
for d in self.directives:
o.append("{} {}".format(d[0], d[1]))
o.append("geometry "
+ " ".join(self.geometry_options))
if self.symmetry_options:
o.append(" symmetry " + " ".join(self.symmetry_options))
for site in self._mol:
o.append(" {} {} {} {}".format(site.specie.symbol, site.x, site.y,
site.z))
o.append("end\n")
for t in self.tasks:
o.append(str(t))
o.append("")
return "\n".join(o)
def write_file(self, filename):
with zopen(filename, "w") as f:
f.write(self.__str__())
def as_dict(self):
return {
"mol": self._mol.as_dict(),
"tasks": [t.as_dict() for t in self.tasks],
"directives": [list(t) for t in self.directives],
"geometry_options": list(self.geometry_options),
"symmetry_options": self.symmetry_options,
"memory_options": self.memory_options
}
@classmethod
def from_dict(cls, d):
return NwInput(Molecule.from_dict(d["mol"]),
tasks=[NwTask.from_dict(dt) for dt in d["tasks"]],
directives=[tuple(li) for li in d["directives"]],
geometry_options=d["geometry_options"],
symmetry_options=d["symmetry_options"],
memory_options=d["memory_options"])
@classmethod
def from_string(cls, string_input):
"""
Read an NwInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
NwInput object
"""
directives = []
tasks = []
charge = None
spin_multiplicity = None
title = None
basis_set = None
basis_set_option = None
theory_directives = {}
geom_options = None
symmetry_options = None
memory_options = None
lines = string_input.strip().split("\n")
while len(lines) > 0:
l = lines.pop(0).strip()
if l == "":
continue
toks = l.split()
if toks[0].lower() == "geometry":
geom_options = toks[1:]
l = lines.pop(0).strip()
toks = l.split()
if toks[0].lower() == "symmetry":
symmetry_options = toks[1:]
l = lines.pop(0).strip()
# Parse geometry
species = []
coords = []
while l.lower() != "end":
toks = l.split()
species.append(toks[0])
coords.append([float(i) for i in toks[1:]])
l = lines.pop(0).strip()
mol = Molecule(species, coords)
elif toks[0].lower() == "charge":
charge = int(toks[1])
elif toks[0].lower() == "title":
title = l[5:].strip().strip("\"")
elif toks[0].lower() == "basis":
# Parse basis sets
l = lines.pop(0).strip()
basis_set = {}
while l.lower() != "end":
toks = l.split()
basis_set[toks[0]] = toks[-1].strip("\"")
l = lines.pop(0).strip()
elif toks[0].lower() in NwTask.theories:
# read the basis_set_option
if len(toks) > 1:
basis_set_option = toks[1]
# Parse theory directives.
theory = toks[0].lower()
l = lines.pop(0).strip()
theory_directives[theory] = {}
while l.lower() != "end":
toks = l.split()
theory_directives[theory][toks[0]] = toks[-1]
if toks[0] == "mult":
spin_multiplicity = float(toks[1])
l = lines.pop(0).strip()
elif toks[0].lower() == "task":
tasks.append(
NwTask(charge=charge,
spin_multiplicity=spin_multiplicity,
title=title, theory=toks[1],
operation=toks[2], basis_set=basis_set,
basis_set_option=basis_set_option,
theory_directives=theory_directives.get(toks[1])))
elif toks[0].lower() == "memory":
memory_options = ' '.join(toks[1:])
else:
directives.append(l.strip().split())
return NwInput(mol, tasks=tasks, directives=directives,
geometry_options=geom_options,
symmetry_options=symmetry_options,
memory_options=memory_options)
@classmethod
def from_file(cls, filename):
"""
Read an NwInput from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
NwInput object
"""
with zopen(filename) as f:
return cls.from_string(f.read())
class NwInputError(Exception):
"""
Error class for NwInput.
"""
pass
class NwOutput(object):
"""
A Nwchem output file parser. Very basic for now - supports only dft and
only parses energies and geometries. Please note that Nwchem typically
outputs energies in either au or kJ/mol. All energies are converted to
eV in the parser.
Args:
filename: Filename to read.
"""
def __init__(self, filename):
self.filename = filename
with zopen(filename) as f:
data = f.read()
chunks = re.split(r"NWChem Input Module", data)
if re.search(r"CITATION", chunks[-1]):
chunks.pop()
preamble = chunks.pop(0)
self.raw = data
self.job_info = self._parse_preamble(preamble)
self.data = [self._parse_job(c) for c in chunks]
def parse_tddft(self):
"""
Parses TDDFT roots. Adapted from nw_spectrum.py script.
Returns:
{
"singlet": [
{
"energy": float,
"osc_strength: float
}
],
"triplet": [
{
"energy": float
}
]
}
"""
start_tag = "Convergence criterion met"
end_tag = "Excited state energy"
singlet_tag = "singlet excited"
triplet_tag = "triplet excited"
state = "singlet"
inside = False # true when we are inside output block
lines = self.raw.split("\n")
roots = {"singlet": [], "triplet": []}
while lines:
line = lines.pop(0).strip()
if start_tag in line:
inside = True
elif end_tag in line:
inside = False
elif singlet_tag in line:
state = "singlet"
elif triplet_tag in line:
state = "triplet"
elif inside and "Root" in line and "eV" in line:
toks = line.split()
roots[state].append({"energy": float(toks[-2])})
elif inside and "Dipole Oscillator Strength" in line:
osc = float(line.split()[-1])
roots[state][-1]["osc_strength"] = osc
return roots
def get_excitation_spectrum(self, width=0.1, npoints=2000):
"""
Generate an excitation spectra from the singlet roots of TDDFT
calculations.
Args:
width (float): Width for Gaussian smearing.
npoints (int): Number of energy points. More points => smoother
curve.
Returns:
(ExcitationSpectrum) which can be plotted using
pymatgen.vis.plotters.SpectrumPlotter.
"""
roots = self.parse_tddft()
data = roots["singlet"]
en = np.array([d["energy"] for d in data])
osc = np.array([d["osc_strength"] for d in data])
epad = 20.0 * width
emin = en[0] - epad
emax = en[-1] + epad
de = (emax - emin) / npoints
# Use width of at least two grid points
if width < 2 * de:
width = 2 * de
energies = [emin + ie * de for ie in range(npoints)]
cutoff = 20.0 * width
gamma = 0.5 * width
gamma_sqrd = gamma * gamma
de = (energies[-1] - energies[0]) / (len(energies) - 1)
prefac = gamma / np.pi * de
x = []
y = []
for energy in energies:
xx0 = energy - en
stot = osc / (xx0 * xx0 + gamma_sqrd)
t = np.sum(stot[np.abs(xx0) <= cutoff])
x.append(energy)
y.append(t * prefac)
return ExcitationSpectrum(x, y)
def _parse_preamble(self, preamble):
info = {}
for l in preamble.split("\n"):
toks = l.split("=")
if len(toks) > 1:
info[toks[0].strip()] = toks[-1].strip()
return info
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, ind):
return self.data[ind]
def __len__(self):
return len(self.data)
def _parse_job(self, output):
energy_patt = re.compile(r'Total \w+ energy\s+=\s+([.\-\d]+)')
energy_gas_patt = re.compile(r'gas phase energy\s+=\s+([.\-\d]+)')
energy_sol_patt = re.compile(r'sol phase energy\s+=\s+([.\-\d]+)')
coord_patt = re.compile(r'\d+\s+(\w+)\s+[.\-\d]+\s+([.\-\d]+)\s+'
r'([.\-\d]+)\s+([.\-\d]+)')
lat_vector_patt = re.compile(r'a[123]=<\s+([.\-\d]+)\s+'
r'([.\-\d]+)\s+([.\-\d]+)\s+>')
corrections_patt = re.compile(r'([\w\-]+ correction to \w+)\s+='
r'\s+([.\-\d]+)')
preamble_patt = re.compile(r'(No. of atoms|No. of electrons'
r'|SCF calculation type|Charge|Spin '
r'multiplicity)\s*:\s*(\S+)')
force_patt = re.compile(r'\s+(\d+)\s+(\w+)' + 6 * r'\s+([0-9\.\-]+)')
time_patt = re.compile(
r'\s+ Task \s+ times \s+ cpu: \s+ ([.\d]+)s .+ ', re.VERBOSE)
error_defs = {
"calculations not reaching convergence": "Bad convergence",
"Calculation failed to converge": "Bad convergence",
"geom_binvr: #indep variables incorrect": "autoz error",
"dft optimize failed": "Geometry optimization failed"}
fort2py = lambda x: x.replace("D", "e")
isfloatstring = lambda s: s.find(".") == -1
parse_hess = False
parse_proj_hess = False
hessian = None
projected_hessian = None
parse_force = False
all_forces = []
forces = []
data = {}
energies = []
frequencies = None
normal_frequencies = None
corrections = {}
molecules = []
structures = []
species = []
coords = []
lattice = []
errors = []
basis_set = {}
bset_header = []
parse_geom = False
parse_freq = False
parse_bset = False
parse_projected_freq = False
job_type = ""
parse_time = False
time = 0
for l in output.split("\n"):
for e, v in error_defs.items():
if l.find(e) != -1:
errors.append(v)
if parse_time:
m = time_patt.search(l)
if m:
time = m.group(1)
parse_time = False
if parse_geom:
if l.strip() == "Atomic Mass":
if lattice:
structures.append(Structure(lattice, species, coords,
coords_are_cartesian=True))
else:
molecules.append(Molecule(species, coords))
species = []
coords = []
lattice = []
parse_geom = False
else:
m = coord_patt.search(l)
if m:
species.append(m.group(1).capitalize())
coords.append([float(m.group(2)), float(m.group(3)),
float(m.group(4))])
m = lat_vector_patt.search(l)
if m:
lattice.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
if parse_force:
m = force_patt.search(l)
if m:
forces.extend(map(float, m.groups()[5:]))
elif len(forces) > 0:
all_forces.append(forces)
forces = []
parse_force = False
elif parse_freq:
if len(l.strip()) == 0:
if len(normal_frequencies[-1][1]) == 0:
continue
else:
parse_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(normal_frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_projected_freq:
if len(l.strip()) == 0:
if len(frequencies[-1][1]) == 0:
continue
else:
parse_projected_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(
frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_bset:
if l.strip() == "":
parse_bset = False
else:
toks = l.split()
if toks[0] != "Tag" and not re.match(r"-+", toks[0]):
basis_set[toks[0]] = dict(zip(bset_header[1:],
toks[1:]))
elif toks[0] == "Tag":
bset_header = toks
bset_header.pop(4)
bset_header = [h.lower() for h in bset_header]
elif parse_hess:
if l.strip() == "":
continue
if len(hessian) > 0 and l.find("----------") != -1:
parse_hess = False
continue
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(hessian) < row:
hessian.append(vals)
else:
hessian[row - 1].extend(vals)
elif parse_proj_hess:
if l.strip() == "":
continue
nat3 = len(hessian)
toks = l.strip().split()
if len(toks) > 1:
try:
row = int(toks[0])
except Exception:
continue
if isfloatstring(toks[1]):
continue
vals = [float(fort2py(x)) for x in toks[1:]]
if len(projected_hessian) < row:
projected_hessian.append(vals)
else:
projected_hessian[row - 1].extend(vals)
if len(projected_hessian[-1]) == nat3:
parse_proj_hess = False
else:
m = energy_patt.search(l)
if m:
energies.append(Energy(m.group(1), "Ha").to("eV"))
parse_time = True
continue
m = energy_gas_patt.search(l)
if m:
cosmo_scf_energy = energies[-1]
energies[-1] = dict()
energies[-1].update({"cosmo scf": cosmo_scf_energy})
energies[-1].update({"gas phase":
Energy(m.group(1), "Ha").to("eV")})
m = energy_sol_patt.search(l)
if m:
energies[-1].update(
{"sol phase": Energy(m.group(1), "Ha").to("eV")})
m = preamble_patt.search(l)
if m:
try:
val = int(m.group(2))
except ValueError:
val = m.group(2)
k = m.group(1).replace("No. of ", "n").replace(" ", "_")
data[k.lower()] = val
elif l.find("Geometry \"geometry\"") != -1:
parse_geom = True
elif l.find("Summary of \"ao basis\"") != -1:
parse_bset = True
elif l.find("P.Frequency") != -1:
parse_projected_freq = True
if frequencies is None:
frequencies = []
toks = l.strip().split()[1:]
frequencies.extend([(float(freq), []) for freq in toks])
elif l.find("Frequency") != -1:
toks = l.strip().split()
if len(toks) > 1 and toks[0] == "Frequency":
parse_freq = True
if normal_frequencies is None:
normal_frequencies = []
normal_frequencies.extend([(float(freq), []) for freq
in l.strip().split()[1:]])
elif l.find("MASS-WEIGHTED NUCLEAR HESSIAN") != -1:
parse_hess = True
if not hessian:
hessian = []
elif l.find("MASS-WEIGHTED PROJECTED HESSIAN") != -1:
parse_proj_hess = True
if not projected_hessian:
projected_hessian = []
elif l.find("atom coordinates gradient") != -1:
parse_force = True
elif job_type == "" and l.strip().startswith("NWChem"):
job_type = l.strip()
if job_type == "NWChem DFT Module" and \
"COSMO solvation results" in output:
job_type += " COSMO"
else:
m = corrections_patt.search(l)
if m:
corrections[m.group(1)] = FloatWithUnit(
m.group(2), "kJ mol^-1").to("eV atom^-1")
if frequencies:
for freq, mode in frequencies:
mode[:] = zip(*[iter(mode)]*3)
if normal_frequencies:
for freq, mode in normal_frequencies:
mode[:] = zip(*[iter(mode)]*3)
if hessian:
n = len(hessian)
for i in range(n):
for j in range(i + 1, n):
hessian[i].append(hessian[j][i])
if projected_hessian:
n = len(projected_hessian)
for i in range(n):
for j in range(i + 1, n):
projected_hessian[i].append(projected_hessian[j][i])
data.update({"job_type": job_type, "energies": energies,
"corrections": corrections,
"molecules": molecules,
"structures": structures,
"basis_set": basis_set,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": frequencies,
"normal_frequencies": normal_frequencies,
"hessian": hessian,
"projected_hessian": projected_hessian,
"forces": all_forces,
"task_time": time})
return data
|
nisse3000/pymatgen
|
pymatgen/io/nwchem.py
|
Python
|
mit
| 36,074
|
[
"Gaussian",
"NWChem",
"pymatgen"
] |
e2144b094fe47af2e4a400073114701636bd1723a0d19ea1fa9401c6686772ba
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbservice
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of GslbService Avi RESTful Object
description:
- This module is used to configure GslbService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
application_persistence_profile_ref:
description:
- The federated application persistence associated with gslbservice site persistence functionality.
- It is a reference to an object of type applicationpersistenceprofile.
- Field introduced in 17.2.1.
version_added: "2.5"
controller_health_status_enabled:
description:
- Gs member's overall health status is derived based on a combination of controller and datapath health-status inputs.
- Note that the datapath status is determined by the association of health monitor profiles.
- Only the controller provided status is determined through this configuration.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
created_by:
description:
- Creator name.
- Field introduced in 17.1.2.
description:
description:
- User defined description for the object.
domain_names:
description:
- Fully qualified domain name of the gslb service.
down_response:
description:
- Response to the client query when the gslb service is down.
enabled:
description:
- Enable or disable the gslb service.
- If the gslb service is enabled, then the vips are sent in the dns responses based on reachability and configured algorithm.
- If the gslb service is disabled, then the vips are no longer available in the dns response.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
groups:
description:
- Select list of pools belonging to this gslb service.
health_monitor_refs:
description:
- Verify vs health by applying one or more health monitors.
- Active monitors generate synthetic traffic from dns service engine and to mark a vs up or down based on the response.
- It is a reference to an object of type healthmonitor.
health_monitor_scope:
description:
- Health monitor probe can be executed for all the members or it can be executed only for third-party members.
- This operational mode is useful to reduce the number of health monitor probes in case of a hybrid scenario.
- In such a case, avi members can have controller derived status while non-avi members can be probed by via health monitor probes in dataplane.
- Enum options - GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS, GSLB_SERVICE_HEALTH_MONITOR_ONLY_NON_AVI_MEMBERS.
- Default value when not specified in API or module is interpreted by Avi Controller as GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
min_members:
description:
- The minimum number of members to distribute traffic to.
- Allowed values are 1-65535.
- Special values are 0 - 'disable'.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.5"
name:
description:
- Name for the gslb service.
required: true
num_dns_ip:
description:
- Number of ip addresses of this gslb service to be returned by the dns service.
- Enter 0 to return all ip addresses.
- Allowed values are 1-20.
- Special values are 0- 'return all ip addresses'.
pool_algorithm:
description:
- The load balancing algorithm will pick a gslb pool within the gslb service list of available pools.
- Enum options - GSLB_SERVICE_ALGORITHM_PRIORITY, GSLB_SERVICE_ALGORITHM_GEO.
- Field introduced in 17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as GSLB_SERVICE_ALGORITHM_PRIORITY.
version_added: "2.5"
site_persistence_enabled:
description:
- Enable site-persistence for the gslbservice.
- Field introduced in 17.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
ttl:
description:
- Ttl value (in seconds) for records served for this gslb service by the dns service.
- Allowed values are 1-86400.
- Units(SEC).
url:
description:
- Avi controller URL of the object.
use_edns_client_subnet:
description:
- Use the client ip subnet from the edns option as source ipaddress for client geo-location and consistent hash algorithm.
- Default is true.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
uuid:
description:
- Uuid of the gslb service.
wildcard_match:
description:
- Enable wild-card match of fqdn if an exact match is not found in the dns table, the longest match is chosen by wild-carding the fqdn in the dns
- request.
- Default is false.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbService object
avi_gslbservice:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbservice
"""
RETURN = '''
obj:
description: GslbService (api/gslbservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
application_persistence_profile_ref=dict(type='str',),
controller_health_status_enabled=dict(type='bool',),
created_by=dict(type='str',),
description=dict(type='str',),
domain_names=dict(type='list',),
down_response=dict(type='dict',),
enabled=dict(type='bool',),
groups=dict(type='list',),
health_monitor_refs=dict(type='list',),
health_monitor_scope=dict(type='str',),
is_federated=dict(type='bool',),
min_members=dict(type='int',),
name=dict(type='str', required=True),
num_dns_ip=dict(type='int',),
pool_algorithm=dict(type='str',),
site_persistence_enabled=dict(type='bool',),
tenant_ref=dict(type='str',),
ttl=dict(type='int',),
url=dict(type='str',),
use_edns_client_subnet=dict(type='bool',),
uuid=dict(type='str',),
wildcard_match=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbservice',
set([]))
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/avi/avi_gslbservice.py
|
Python
|
gpl-3.0
| 9,674
|
[
"VisIt"
] |
7569d7b24a4ab511e8e689426632f74d1b0478e93cbdafc63dae66e64a323549
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide a python evaluation window
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import sys
if sys.version_info[0] < 3:
from cStringIO import StringIO
else:
from io import StringIO
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import traceback
#-------------------------------------------------------------------------
#
# Gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gen.constfunc import cuni
#-------------------------------------------------------------------------
#
# PythonEvaluation
#
#-------------------------------------------------------------------------
class PythonEvaluation(Gramplet):
"""
Allows the user to evaluate python code.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.gui.WIDGET)
def build_gui(self):
"""
Build the GUI interface.
"""
self.top = Gtk.VBox()
self.top.set_border_width(6)
self.ebuf = self.__add_text_view(_("Evaluation"))
self.dbuf = self.__add_text_view(_("Output"))
self.error = self.__add_text_view(_("Error"))
bbox = Gtk.HButtonBox()
apply_button = Gtk.Button(_("Apply"))
apply_button.connect('clicked', self.apply_clicked)
bbox.pack_start(apply_button, False, False, 6)
clear_button = Gtk.Button(_("Clear"))
clear_button.connect('clicked', self.clear_clicked)
bbox.pack_start(clear_button, False, False, 6)
self.top.pack_start(bbox, False, False, 6)
self.top.show_all()
return self.top
def __add_text_view(self, name):
"""
Add a text view to the interface.
"""
label = Gtk.Label(name)
label.set_markup('<b>%s</b>' % name)
label.set_alignment(0, 0.5)
self.top.pack_start(label, False, False, 6)
swin = Gtk.ScrolledWindow()
swin.set_shadow_type(Gtk.ShadowType.IN)
tview = Gtk.TextView()
swin.add_with_viewport(tview)
self.top.pack_start(swin, True, True, 6)
return tview.get_buffer()
def apply_clicked(self, obj):
text = cuni(self.ebuf.get_text(self.ebuf.get_start_iter(),
self.ebuf.get_end_iter(),False))
outtext = StringIO()
errtext = StringIO()
sys.stdout = outtext
sys.stderr = errtext
try:
exec(text)
except:
traceback.print_exc()
self.dbuf.set_text(outtext.getvalue())
self.error.set_text(errtext.getvalue())
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def clear_clicked(self, obj):
self.dbuf.set_text("")
self.ebuf.set_text("")
self.error.set_text("")
|
pmghalvorsen/gramps_branch
|
gramps/plugins/gramplet/eval.py
|
Python
|
gpl-2.0
| 4,212
|
[
"Brian"
] |
ad2cf1f1fadae6e15f89c658a1d59508d61d65f61e80e11dd6b688c51e6ccc97
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Matthew Harrigan
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import pytest
import os
@pytest.fixture(scope='session')
def get_fn():
test_dir = os.path.dirname(os.path.abspath(__file__))
def _get_fn(fn):
return '{}/data/{}'.format(test_dir, fn)
return _get_fn
|
leeping/mdtraj
|
tests/conftest.py
|
Python
|
lgpl-2.1
| 1,234
|
[
"MDTraj"
] |
4dac104a52354dac4fe57c00cec8aa5cb5c451c9489eb09bc6174b336f0c63e1
|
#!/usr/bin/env python
import os
from ase.io import read,write
from ase.optimize import QuasiNewton
# Turbomole input coordinates must be in the file 'coord'.
# The coordinates are updated to the file 'coord' during the minimization.
test = read('coord')
test.set_calculator(Turbomole())
dyn = QuasiNewton(test, trajectory='test.traj')
dyn.run(fmax=0.01)
write('coord.final.tmol', test)
|
alexei-matveev/ase-local
|
doc/ase/calculators/turbomole_ex1_relax.py
|
Python
|
gpl-2.0
| 393
|
[
"ASE",
"TURBOMOLE"
] |
c4196e37f89e439b239cefaa2a9e0626e29995404ca81a1459e0542d0a7aca64
|
#!/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,invalid-name
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: BSD
| Thanks to ga2arch for help with IS_IN_DB and IS_NOT_IN_DB on GAE
Validators
-----------
"""
import os
import re
import math
import datetime
import time
import cgi
import uuid
import hashlib
import hmac
import json
import struct
import decimal
import binascii
import unicodedata
import encodings.idna
from functools import reduce
from ._compat import (
StringIO,
integer_types,
basestring,
unicodeT,
urllib_unquote,
unichr,
to_bytes,
PY2,
to_unicode,
to_native,
string_types,
urlparse,
ipaddress,
)
from .objects import Field, FieldVirtual, FieldMethod, Table
JSONErrors = (NameError, TypeError, ValueError, AttributeError, KeyError)
__all__ = [
"ANY_OF",
"CLEANUP",
"CRYPT",
"IS_ALPHANUMERIC",
"IS_DATE_IN_RANGE",
"IS_DATE",
"IS_DATETIME_IN_RANGE",
"IS_DATETIME",
"IS_DECIMAL_IN_RANGE",
"IS_EMAIL",
"IS_LIST_OF_EMAILS",
"IS_EMPTY_OR",
"IS_EXPR",
"IS_FILE",
"IS_FLOAT_IN_RANGE",
"IS_IMAGE",
"IS_IN_DB",
"IS_IN_SET",
"IS_INT_IN_RANGE",
"IS_IPV4",
"IS_IPV6",
"IS_IPADDRESS",
"IS_LENGTH",
"IS_LIST_OF",
"IS_LOWER",
"IS_MATCH",
"IS_EQUAL_TO",
"IS_NOT_EMPTY",
"IS_NOT_IN_DB",
"IS_NULL_OR",
"IS_SLUG",
"IS_STRONG",
"IS_TIME",
"IS_UPLOAD_FILENAME",
"IS_UPPER",
"IS_URL",
"IS_JSON",
]
def options_sorter(x, y):
return (str(x[1]).upper() > str(y[1]).upper() and 1) or -1
def translate(text):
return Validator.translator(text)
class ValidationError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class Validator(object):
"""
Root for all validators, mainly for documentation purposes.
Validators are classes used to validate input fields (including forms
generated from database tables).
Here is an example of using a validator with a FORM::
INPUT(_name='a', requires=IS_INT_IN_RANGE(0, 10))
Here is an example of how to require a validator for a table field::
db.define_table('person', Field('name'))
db.person.name.requires=IS_NOT_EMPTY()
Validators are always assigned using the requires attribute of a field. A
field can have a single validator or multiple validators. Multiple
validators are made part of a list::
db.person.name.requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db, 'person.id')]
Validators are called by the function accepts on a FORM or other HTML
helper object that contains a form. They are always called in the order in
which they are listed.
Built-in validators have constructors that take the optional argument error
message which allows you to change the default error message.
Here is an example of a validator on a database table::
db.person.name.requires=IS_NOT_EMPTY(error_message=T('Fill this'))
where we have used the translation operator T to allow for
internationalization.
Notice that default error messages are not translated.
"""
translator = staticmethod(lambda text: text)
def formatter(self, value):
"""
For some validators returns a formatted version (matching the validator)
of value. Otherwise just returns the value.
"""
return value
@staticmethod
def validate(value, record_id=None):
raise NotImplementedError
def __call__(self, value, record_id=None):
try:
return self.validate(value, record_id), None
except ValidationError as e:
return value, e.message
def validator_caller(func, value, record_id=None):
validate = getattr(func, "validate", None)
if validate and validate is not Validator.validate:
return validate(value, record_id)
value, error = func(value)
if error is not None:
raise ValidationError(error)
return value
class IS_MATCH(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_MATCH('.+'))
The argument of IS_MATCH is a regular expression::
>>> IS_MATCH('.+')('hello')
('hello', None)
>>> IS_MATCH('hell')('hello')
('hello', None)
>>> IS_MATCH('hell.*', strict=False)('hello')
('hello', None)
>>> IS_MATCH('hello')('shello')
('shello', 'invalid expression')
>>> IS_MATCH('hello', search=True)('shello')
('shello', None)
>>> IS_MATCH('hello', search=True, strict=False)('shellox')
('shellox', None)
>>> IS_MATCH('.*hello.*', search=True, strict=False)('shellox')
('shellox', None)
>>> IS_MATCH('.+')('')
('', 'invalid expression')
"""
def __init__(
self,
expression,
error_message="Invalid expression",
strict=False,
search=False,
extract=False,
is_unicode=False,
):
if strict or not search:
if not expression.startswith("^"):
expression = "^(%s)" % expression
if strict:
if not expression.endswith("$"):
expression = "(%s)$" % expression
if is_unicode:
if not isinstance(expression, unicodeT):
expression = expression.decode("utf8")
self.regex = re.compile(expression, re.UNICODE)
else:
self.regex = re.compile(expression)
self.error_message = error_message
self.extract = extract
self.is_unicode = is_unicode or not PY2
def validate(self, value, record_id=None):
if not PY2: # PY3 convert bytes to unicode
value = to_unicode(value)
if self.is_unicode or not PY2:
if not isinstance(value, unicodeT):
match = self.regex.search(str(value).decode("utf8"))
else:
match = self.regex.search(value)
else:
if not isinstance(value, unicodeT):
match = self.regex.search(str(value))
else:
match = self.regex.search(value.encode("utf8"))
if match is not None:
return self.extract and match.group() or value
raise ValidationError(self.translator(self.error_message))
class IS_EQUAL_TO(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='password')
INPUT(_type='text', _name='password2',
requires=IS_EQUAL_TO(request.vars.password))
The argument of IS_EQUAL_TO is a string::
>>> IS_EQUAL_TO('aaa')('aaa')
('aaa', None)
>>> IS_EQUAL_TO('aaa')('aab')
('aab', 'no match')
"""
def __init__(self, expression, error_message="No match"):
self.expression = expression
self.error_message = error_message
def validate(self, value, record_id=None):
if value != self.expression:
raise ValidationError(self.translator(self.error_message))
return value
class IS_EXPR(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name',
requires=IS_EXPR('5 < int(value) < 10'))
The argument of IS_EXPR must be python condition::
>>> IS_EXPR('int(value) < 2')('1')
('1', None)
>>> IS_EXPR('int(value) < 2')('2')
('2', 'invalid expression')
"""
def __init__(
self, expression, error_message="Invalid expression", environment=None
):
self.expression = expression
self.error_message = error_message
self.environment = environment or {}
def validate(self, value, record_id=None):
if callable(self.expression):
message = self.expression(value)
if message:
raise ValidationError(message)
return value
# for backward compatibility
self.environment.update(value=value)
exec("__ret__=" + self.expression, self.environment)
if self.environment["__ret__"]:
return value
raise ValidationError(self.translator(self.error_message))
class IS_LENGTH(Validator):
"""
Checks if length of field's value fits between given boundaries. Works
for both text and file inputs.
Args:
maxsize: maximum allowed length / size
minsize: minimum allowed length / size
Examples:
Check if text string is shorter than 33 characters::
INPUT(_type='text', _name='name', requires=IS_LENGTH(32))
Check if password string is longer than 5 characters::
INPUT(_type='password', _name='name', requires=IS_LENGTH(minsize=6))
Check if uploaded file has size between 1KB and 1MB::
INPUT(_type='file', _name='name', requires=IS_LENGTH(1048576, 1024))
Other examples::
>>> IS_LENGTH()('')
('', None)
>>> IS_LENGTH()('1234567890')
('1234567890', None)
>>> IS_LENGTH(maxsize=5, minsize=0)('1234567890') # too long
('1234567890', 'enter from 0 to 5 characters')
>>> IS_LENGTH(maxsize=50, minsize=20)('1234567890') # too short
('1234567890', 'enter from 20 to 50 characters')
"""
def __init__(
self,
maxsize=255,
minsize=0,
error_message="Enter from %(min)g to %(max)g characters",
):
self.maxsize = maxsize
self.minsize = minsize
self.error_message = error_message
def validate(self, value, record_id=None):
if value is None:
length = 0
elif isinstance(value, str):
try:
length = len(to_unicode(value))
except:
length = len(value)
elif isinstance(value, unicodeT):
length = len(value)
value = value.encode("utf8")
elif isinstance(value, (bytes, bytearray, tuple, list)):
length = len(value)
elif isinstance(value, cgi.FieldStorage):
if value.file:
value.file.seek(0, os.SEEK_END)
length = value.file.tell()
value.file.seek(0, os.SEEK_SET)
elif hasattr(value, "value"):
val = value.value
if val:
length = len(val)
else:
length = 0
else:
value = str(value)
length = len(str(value))
if self.minsize <= length <= self.maxsize:
return value
raise ValidationError(
self.translator(self.error_message)
% dict(min=self.minsize, max=self.maxsize)
)
class IS_JSON(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name',
requires=IS_JSON(error_message="This is not a valid json input")
>>> IS_JSON()('{"a": 100}')
({u'a': 100}, None)
>>> IS_JSON()('spam1234')
('spam1234', 'invalid json')
"""
def __init__(self, error_message="Invalid json", native_json=False):
self.native_json = native_json
self.error_message = error_message
def validate(self, value, record_id=None):
if isinstance(value, (str, bytes)):
try:
if self.native_json:
json.loads(value) # raises error in case of malformed json
return value # the serialized value is not passed
else:
return json.loads(value)
except JSONErrors:
raise ValidationError(self.translator(self.error_message))
else:
return value
def formatter(self, value):
if value is None:
return None
if self.native_json:
return value
else:
return json.dumps(value)
class IS_IN_SET(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name',
requires=IS_IN_SET(['max', 'john'],zero=''))
The argument of IS_IN_SET must be a list or set::
>>> IS_IN_SET(['max', 'john'])('max')
('max', None)
>>> IS_IN_SET(['max', 'john'])('massimo')
('massimo', 'value not allowed')
>>> IS_IN_SET(['max', 'john'], multiple=True)(('max', 'john'))
(('max', 'john'), None)
>>> IS_IN_SET(['max', 'john'], multiple=True)(('bill', 'john'))
(('bill', 'john'), 'value not allowed')
>>> IS_IN_SET(('id1','id2'), ['first label','second label'])('id1') # Traditional way
('id1', None)
>>> IS_IN_SET({'id1':'first label', 'id2':'second label'})('id1')
('id1', None)
>>> import itertools
>>> IS_IN_SET(itertools.chain(['1','3','5'],['2','4','6']))('1')
('1', None)
>>> IS_IN_SET([('id1','first label'), ('id2','second label')])('id1') # Redundant way
('id1', None)
"""
def __init__(
self,
theset,
labels=None,
error_message="Value not allowed",
multiple=False,
zero="",
sort=False,
):
self.multiple = multiple
if isinstance(theset, dict):
self.theset = [str(item) for item in theset]
self.labels = list(theset.values())
elif (
theset
and isinstance(theset, (tuple, list))
and isinstance(theset[0], (tuple, list))
and len(theset[0]) == 2
):
self.theset = [str(item) for item, label in theset]
self.labels = [str(label) for item, label in theset]
else:
self.theset = [str(item) for item in theset]
self.labels = labels
self.error_message = error_message
self.zero = zero
self.sort = sort
def options(self, zero=True):
if not self.labels:
items = [(k, k) for (i, k) in enumerate(self.theset)]
else:
items = [(k, list(self.labels)[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(key=lambda o: str(o[1]).upper())
if zero and self.zero is not None and not self.multiple:
items.insert(0, ("", self.zero))
return items
def validate(self, value, record_id=None):
if self.multiple:
# if below was values = re.compile("[\w\-:]+").findall(str(value))
if not value:
values = []
elif isinstance(value, (tuple, list)):
values = value
else:
values = [value]
else:
values = [value]
thestrset = [str(x) for x in self.theset]
failures = [x for x in values if not str(x) in thestrset]
if failures and self.theset:
raise ValidationError(self.translator(self.error_message))
if self.multiple:
if (
isinstance(self.multiple, (tuple, list))
and not self.multiple[0] <= len(values) < self.multiple[1]
):
raise ValidationError(self.translator(self.error_message))
return values
return value
class IS_IN_DB(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name',
requires=IS_IN_DB(db, db.mytable.myfield, zero=''))
used for reference fields, rendered as a dropbox
"""
REGEX_TABLE_DOT_FIELD = r"^(\w+)\.(\w+)$"
REGEX_INTERP_CONV_SPECIFIER = r"%\((\w+)\)\d*(?:\.\d+)?[a-zA-Z]"
def __init__(
self,
dbset,
field,
label=None,
error_message="Value not in database",
orderby=None,
groupby=None,
distinct=None,
cache=None,
multiple=False,
zero="",
sort=False,
_and=None,
left=None,
delimiter=None,
auto_add=False,
):
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
if isinstance(field, Table):
field = field._id
elif isinstance(field, str):
items = field.split(".")
if len(items) == 1:
field = items[0] + ".id"
(ktable, kfield) = str(field).split(".")
if not label:
label = "%%(%s)s" % kfield
if isinstance(label, str):
m = re.match(self.REGEX_TABLE_DOT_FIELD, label)
if m:
label = "%%(%s)s" % m.group(2)
fieldnames = re.findall(self.REGEX_INTERP_CONV_SPECIFIER, label)
if kfield not in fieldnames:
fieldnames.append(kfield) # kfield must be last
elif isinstance(label, Field):
fieldnames = [label.name, kfield] # kfield must be last
label = "%%(%s)s" % label.name
elif callable(label):
fieldnames = "*"
else:
raise NotImplementedError
self.fieldnames = fieldnames # fields requires to build the formatting
self.label = label
self.ktable = ktable
self.kfield = kfield
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.distinct = distinct
self.cache = cache
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
self.left = left
self.delimiter = delimiter
self.auto_add = auto_add
def set_self_id(self, id):
if self._and:
self._and.record_id = id
def build_set(self):
table = self.dbset.db[self.ktable]
if self.fieldnames == "*":
fields = [f for f in table]
else:
fields = [table[k] for k in self.fieldnames]
ignore = (FieldVirtual, FieldMethod)
fields = [f for f in fields if not isinstance(f, ignore)]
if self.dbset.db._dbname != "gae":
orderby = self.orderby or reduce(lambda a, b: a | b, fields)
groupby = self.groupby
distinct = self.distinct
left = self.left
dd = dict(
orderby=orderby,
groupby=groupby,
distinct=distinct,
cache=self.cache,
cacheable=True,
left=left,
)
records = self.dbset(table).select(*fields, **dd)
else:
orderby = self.orderby or reduce(
lambda a, b: a | b, (f for f in fields if not f.name == "id")
)
dd = dict(orderby=orderby, cache=self.cache, cacheable=True)
records = self.dbset(table).select(table.ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
if isinstance(self.label, str):
self.labels = [self.label % r for r in records]
else:
self.labels = [self.label(r) for r in records]
def options(self, zero=True):
self.build_set()
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(key=lambda o: str(o[1]).upper())
if zero and self.zero is not None and not self.multiple:
items.insert(0, ("", self.zero))
return items
def maybe_add(self, table, fieldname, value):
d = {fieldname: value}
record = table(**d)
if record:
return record.id
else:
return table.insert(**d)
def validate(self, value, record_id=None):
table = self.dbset.db[self.ktable]
field = table[self.kfield]
if self.multiple:
if self._and:
raise NotImplementedError
if isinstance(value, list):
values = value
elif self.delimiter:
values = value.split(self.delimiter) # because of autocomplete
elif value:
values = [value]
else:
values = []
if field.type in ("id", "integer"):
new_values = []
for value in values:
if not (isinstance(value, integer_types) or value.isdigit()):
if self.auto_add:
value = str(
self.maybe_add(table, self.fieldnames[0], value)
)
else:
raise ValidationError(self.translator(self.error_message))
new_values.append(value)
values = new_values
if (
isinstance(self.multiple, (tuple, list))
and not self.multiple[0] <= len(values) < self.multiple[1]
):
raise ValidationError(self.translator(self.error_message))
if self.theset:
if not [v for v in values if v not in self.theset]:
return values
else:
def count(values, s=self.dbset, f=field):
return s(f.belongs(list(map(int, values)))).count()
if self.dbset.db._adapter.dbengine == "google:datastore":
range_ids = range(0, len(values), 30)
total = sum(count(values[i : i + 30]) for i in range_ids)
if total == len(values):
return values
elif count(values) == len(values):
return values
else:
if field.type in ("id", "integer"):
if isinstance(value, integer_types) or (
isinstance(value, string_types) and value.isdigit()
):
value = int(value)
elif self.auto_add:
value = self.maybe_add(table, self.fieldnames[0], value)
else:
raise ValidationError(self.translator(self.error_message))
try:
value = int(value)
except TypeError:
raise ValidationError(self.translator(self.error_message))
if self.theset:
if str(value) in self.theset:
if self._and:
return validator_caller(self._and, value, record_id)
return value
else:
if self.dbset(field == value).count():
if self._and:
return validator_caller(self._and, value, record_id)
return value
raise ValidationError(self.translator(self.error_message))
class IS_NOT_IN_DB(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_NOT_IN_DB(db, db.table))
makes the field unique
"""
def __init__(
self,
dbset,
field,
error_message="Value already in database or empty",
allowed_override=[],
ignore_common_filters=False,
):
if isinstance(field, Table):
field = field._id
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
self.error_message = error_message
self.record_id = 0
self.allowed_override = allowed_override
self.ignore_common_filters = ignore_common_filters
def set_self_id(self, id):
# this is legacy - web2py uses but nobody else should
# it is not safe if the object is recycled
self.record_id = id
def validate(self, value, record_id=None):
value = to_native(str(value))
if not value.strip():
raise ValidationError(self.translator(self.error_message))
if value in self.allowed_override:
return value
(tablename, fieldname) = str(self.field).split(".")
table = self.dbset.db[tablename]
field = table[fieldname]
query = field == value
# make sure exclude the record_id
id = record_id or self.record_id
if isinstance(id, dict):
id = table(**id)
if not id is None:
query &= table._id != id
subset = self.dbset(query, ignore_common_filters=self.ignore_common_filters)
if subset.select(limitby=(0, 1)):
raise ValidationError(self.translator(self.error_message))
return value
def range_error_message(error_message, what_to_enter, minimum, maximum):
"""build the error message for the number range validators"""
if error_message is None:
error_message = "Enter " + what_to_enter
if minimum is not None and maximum is not None:
error_message += " between %(min)g and %(max)g"
elif minimum is not None:
error_message += " greater than or equal to %(min)g"
elif maximum is not None:
error_message += " less than or equal to %(max)g"
if type(maximum) in integer_types:
maximum -= 1
return str(translate(error_message)) % dict(min=minimum, max=maximum)
class IS_INT_IN_RANGE(Validator):
"""
Determines that the argument is (or can be represented as) an int,
and that it falls within the specified range. The range is interpreted
in the Pythonic way, so the test is: min <= value < max.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_INT_IN_RANGE(0, 10))
>>> IS_INT_IN_RANGE(1,5)('4')
(4, None)
>>> IS_INT_IN_RANGE(1,5)(4)
(4, None)
>>> IS_INT_IN_RANGE(1,5)(1)
(1, None)
>>> IS_INT_IN_RANGE(1,5)(5)
(5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(1,5)(5)
(5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(1,5)(3.5)
(3.5, 'enter an integer between 1 and 4')
>>> IS_INT_IN_RANGE(None,5)('4')
(4, None)
>>> IS_INT_IN_RANGE(None,5)('6')
('6', 'enter an integer less than or equal to 4')
>>> IS_INT_IN_RANGE(1,None)('4')
(4, None)
>>> IS_INT_IN_RANGE(1,None)('0')
('0', 'enter an integer greater than or equal to 1')
>>> IS_INT_IN_RANGE()(6)
(6, None)
>>> IS_INT_IN_RANGE()('abc')
('abc', 'enter an integer')
"""
REGEX_INT = r"^[+-]?\d+$"
def __init__(self, minimum=None, maximum=None, error_message=None):
self.minimum = int(minimum) if minimum is not None else None
self.maximum = int(maximum) if maximum is not None else None
self.error_message = error_message
def validate(self, value, record_id=None):
if re.match(self.REGEX_INT, str(value)):
v = int(value)
if (self.minimum is None or v >= self.minimum) and (
self.maximum is None or v < self.maximum
):
return v
raise ValidationError(
range_error_message(
self.error_message, "an integer", self.minimum, self.maximum
)
)
def str2dec(number):
s = str(number)
if "." not in s:
s += ".00"
else:
s += "0" * (2 - len(s.split(".")[1]))
return s
class IS_FLOAT_IN_RANGE(Validator):
"""
Determines that the argument is (or can be represented as) a float,
and that it falls within the specified inclusive range.
The comparison is made with native arithmetic.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_FLOAT_IN_RANGE(0, 10))
>>> IS_FLOAT_IN_RANGE(1,5)('4')
(4.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(4)
(4.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(1)
(1.0, None)
>>> IS_FLOAT_IN_RANGE(1,5)(5.25)
(5.25, 'enter a number between 1 and 5')
>>> IS_FLOAT_IN_RANGE(1,5)(6.0)
(6.0, 'enter a number between 1 and 5')
>>> IS_FLOAT_IN_RANGE(1,5)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(1,None)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(None,5)(3.5)
(3.5, None)
>>> IS_FLOAT_IN_RANGE(1,None)(0.5)
(0.5, 'enter a number greater than or equal to 1')
>>> IS_FLOAT_IN_RANGE(None,5)(6.5)
(6.5, 'enter a number less than or equal to 5')
>>> IS_FLOAT_IN_RANGE()(6.5)
(6.5, None)
>>> IS_FLOAT_IN_RANGE()('abc')
('abc', 'enter a number')
"""
def __init__(self, minimum=None, maximum=None, error_message=None, dot="."):
self.minimum = float(minimum) if minimum is not None else None
self.maximum = float(maximum) if maximum is not None else None
self.dot = str(dot)
self.error_message = error_message
def validate(self, value, record_id=None):
try:
if self.dot == ".":
v = float(value)
else:
v = float(str(value).replace(self.dot, "."))
if (self.minimum is None or v >= self.minimum) and (
self.maximum is None or v <= self.maximum
):
return v
except (ValueError, TypeError):
pass
raise ValidationError(
range_error_message(
self.error_message, "a number", self.minimum, self.maximum
)
)
def formatter(self, value):
if value is None:
return None
return str2dec(value).replace(".", self.dot)
class IS_DECIMAL_IN_RANGE(Validator):
"""
Determines that the argument is (or can be represented as) a Python Decimal,
and that it falls within the specified inclusive range.
The comparison is made with Python Decimal arithmetic.
The minimum and maximum limits can be None, meaning no lower or upper limit,
respectively.
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_DECIMAL_IN_RANGE(0, 10))
>>> IS_DECIMAL_IN_RANGE(1,5)('4')
(Decimal('4'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(4)
(Decimal('4'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(1)
(Decimal('1'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(5.25)
(5.25, 'enter a number between 1 and 5')
>>> IS_DECIMAL_IN_RANGE(5.25,6)(5.25)
(Decimal('5.25'), None)
>>> IS_DECIMAL_IN_RANGE(5.25,6)('5.25')
(Decimal('5.25'), None)
>>> IS_DECIMAL_IN_RANGE(1,5)(6.0)
(6.0, 'enter a number between 1 and 5')
>>> IS_DECIMAL_IN_RANGE(1,5)(3.5)
(Decimal('3.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,5.5)(3.5)
(Decimal('3.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,5.5)(6.5)
(6.5, 'enter a number between 1.5 and 5.5')
>>> IS_DECIMAL_IN_RANGE(1.5,None)(6.5)
(Decimal('6.5'), None)
>>> IS_DECIMAL_IN_RANGE(1.5,None)(0.5)
(0.5, 'enter a number greater than or equal to 1.5')
>>> IS_DECIMAL_IN_RANGE(None,5.5)(4.5)
(Decimal('4.5'), None)
>>> IS_DECIMAL_IN_RANGE(None,5.5)(6.5)
(6.5, 'enter a number less than or equal to 5.5')
>>> IS_DECIMAL_IN_RANGE()(6.5)
(Decimal('6.5'), None)
>>> IS_DECIMAL_IN_RANGE(0,99)(123.123)
(123.123, 'enter a number between 0 and 99')
>>> IS_DECIMAL_IN_RANGE(0,99)('123.123')
('123.123', 'enter a number between 0 and 99')
>>> IS_DECIMAL_IN_RANGE(0,99)('12.34')
(Decimal('12.34'), None)
>>> IS_DECIMAL_IN_RANGE()('abc')
('abc', 'enter a number')
"""
def __init__(self, minimum=None, maximum=None, error_message=None, dot="."):
self.minimum = decimal.Decimal(str(minimum)) if minimum is not None else None
self.maximum = decimal.Decimal(str(maximum)) if maximum is not None else None
self.dot = str(dot)
self.error_message = error_message
def validate(self, value, record_id=None):
try:
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value).replace(self.dot, "."))
if (self.minimum is None or value >= self.minimum) and (
self.maximum is None or value <= self.maximum
):
return value
except (ValueError, TypeError, decimal.InvalidOperation):
pass
raise ValidationError(
range_error_message(
self.error_message, "a number", self.minimum, self.maximum
)
)
def formatter(self, value):
if value is None:
return None
return str2dec(value).replace(".", self.dot)
def is_empty(value, empty_regex=None):
_value = value
"""test empty field"""
if isinstance(value, (str, unicodeT)):
value = value.strip()
if empty_regex is not None and empty_regex.match(value):
value = ""
if value is None or value == "" or value == b"" or value == []:
return (_value, True)
return (_value, False)
class IS_NOT_EMPTY(Validator):
"""
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_NOT_EMPTY())
>>> IS_NOT_EMPTY()(1)
(1, None)
>>> IS_NOT_EMPTY()(0)
(0, None)
>>> IS_NOT_EMPTY()('x')
('x', None)
>>> IS_NOT_EMPTY()(' x ')
('x', None)
>>> IS_NOT_EMPTY()(None)
(None, 'enter a value')
>>> IS_NOT_EMPTY()('')
('', 'enter a value')
>>> IS_NOT_EMPTY()(' ')
('', 'enter a value')
>>> IS_NOT_EMPTY()(' \\n\\t')
('', 'enter a value')
>>> IS_NOT_EMPTY()([])
([], 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='def')('def')
('', 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='de[fg]')('deg')
('', 'enter a value')
>>> IS_NOT_EMPTY(empty_regex='def')('abc')
('abc', None)
"""
def __init__(self, error_message="Enter a value", empty_regex=None):
self.error_message = error_message
if empty_regex is not None:
self.empty_regex = re.compile(empty_regex)
else:
self.empty_regex = None
def validate(self, value, record_id=None):
value, empty = is_empty(value, empty_regex=self.empty_regex)
if empty:
raise ValidationError(self.translator(self.error_message))
return value
class IS_ALPHANUMERIC(IS_MATCH):
"""
Example:
Used as::
INPUT(_type='text', _name='name', requires=IS_ALPHANUMERIC())
>>> IS_ALPHANUMERIC()('1')
('1', None)
>>> IS_ALPHANUMERIC()('')
('', None)
>>> IS_ALPHANUMERIC()('A_a')
('A_a', None)
>>> IS_ALPHANUMERIC()('!')
('!', 'enter only letters, numbers, and underscore')
"""
def __init__(self, error_message="Enter only letters, numbers, and underscore"):
IS_MATCH.__init__(self, r"^[\w]*$", error_message)
class IS_EMAIL(Validator):
"""
Checks if field's value is a valid email address. Can be set to disallow
or force addresses from certain domain(s).
Email regex adapted from
http://haacked.com/archive/2007/08/21/i-knew-how-to-validate-an-email-address-until-i.aspx,
generally following the RFCs, except that we disallow quoted strings
and permit underscores and leading numerics in subdomain labels
Args:
banned: regex text for disallowed address domains
forced: regex text for required address domains
Both arguments can also be custom objects with a match(value) method.
Example:
Check for valid email address::
INPUT(_type='text', _name='name',
requires=IS_EMAIL())
Check for valid email address that can't be from a .com domain::
INPUT(_type='text', _name='name',
requires=IS_EMAIL(banned='^.*\\.com(|\\..*)$'))
Check for valid email address that must be from a .edu domain::
INPUT(_type='text', _name='name',
requires=IS_EMAIL(forced='^.*\\.edu(|\\..*)$'))
>>> IS_EMAIL()('a@b.com')
('a@b.com', None)
>>> IS_EMAIL()('abc@def.com')
('abc@def.com', None)
>>> IS_EMAIL()('abc@3def.com')
('abc@3def.com', None)
>>> IS_EMAIL()('abc@def.us')
('abc@def.us', None)
>>> IS_EMAIL()('abc@d_-f.us')
('abc@d_-f.us', None)
>>> IS_EMAIL()('@def.com') # missing name
('@def.com', 'enter a valid email address')
>>> IS_EMAIL()('"abc@def".com') # quoted name
('"abc@def".com', 'enter a valid email address')
>>> IS_EMAIL()('abc+def.com') # no @
('abc+def.com', 'enter a valid email address')
>>> IS_EMAIL()('abc@def.x') # one-char TLD
('abc@def.x', 'enter a valid email address')
>>> IS_EMAIL()('abc@def.12') # numeric TLD
('abc@def.12', 'enter a valid email address')
>>> IS_EMAIL()('abc@def..com') # double-dot in domain
('abc@def..com', 'enter a valid email address')
>>> IS_EMAIL()('abc@.def.com') # dot starts domain
('abc@.def.com', 'enter a valid email address')
>>> IS_EMAIL()('abc@def.c_m') # underscore in TLD
('abc@def.c_m', 'enter a valid email address')
>>> IS_EMAIL()('NotAnEmail') # missing @
('NotAnEmail', 'enter a valid email address')
>>> IS_EMAIL()('abc@NotAnEmail') # missing TLD
('abc@NotAnEmail', 'enter a valid email address')
>>> IS_EMAIL()('customer/department@example.com')
('customer/department@example.com', None)
>>> IS_EMAIL()('$A12345@example.com')
('$A12345@example.com', None)
>>> IS_EMAIL()('!def!xyz%abc@example.com')
('!def!xyz%abc@example.com', None)
>>> IS_EMAIL()('_Yosemite.Sam@example.com')
('_Yosemite.Sam@example.com', None)
>>> IS_EMAIL()('~@example.com')
('~@example.com', None)
>>> IS_EMAIL()('.wooly@example.com') # dot starts name
('.wooly@example.com', 'enter a valid email address')
>>> IS_EMAIL()('wo..oly@example.com') # adjacent dots in name
('wo..oly@example.com', 'enter a valid email address')
>>> IS_EMAIL()('pootietang.@example.com') # dot ends name
('pootietang.@example.com', 'enter a valid email address')
>>> IS_EMAIL()('.@example.com') # name is bare dot
('.@example.com', 'enter a valid email address')
>>> IS_EMAIL()('Ima.Fool@example.com')
('Ima.Fool@example.com', None)
>>> IS_EMAIL()('Ima Fool@example.com') # space in name
('Ima Fool@example.com', 'enter a valid email address')
>>> IS_EMAIL()('localguy@localhost') # localhost as domain
('localguy@localhost', None)
"""
# NOTE: use these with flags = re.VERBOSE | re.IGNORECASE
REGEX_BODY = r"""
^(?!\.) # name may not begin with a dot
(
[-a-z0-9!\#$%&'*+/=?^_`{|}~] # all legal characters except dot
|
(?<!\.)\. # single dots only
)+
(?<!\.)$ # name may not end with a dot
"""
REGEX_DOMAIN = r"""
(
localhost
|
(
[a-z0-9] # [sub]domain begins with alphanumeric
(
[-\w]* # alphanumeric, underscore, dot, hyphen
[a-z0-9] # ending alphanumeric
)?
\. # ending dot
)+
[a-z]{2,} # TLD alpha-only
)$
"""
# regex_proposed_but_failed = re.compile(r'^([\w\!\#$\%\&\'\*\+\-\/\=\?\^\`{\|\}\~]+\.)*[\w\!\#$\%\&\'\*\+\-\/\=\?\^\`{\|\}\~]+@((((([a-z0-9]{1}[a-z0-9\-]{0,62}[a-z0-9]{1})|[a-z])\.)+[a-z]{2,6})|(\d{1,3}\.){3}\d{1,3}(\:\d{1,5})?)$', re.VERBOSE | re.IGNORECASE)
def __init__(
self, banned=None, forced=None, error_message="Enter a valid email address"
):
if isinstance(banned, str):
banned = re.compile(banned)
if isinstance(forced, str):
forced = re.compile(forced)
self.banned = banned
self.forced = forced
self.error_message = error_message
def validate(self, value, record_id=None):
if (
not (isinstance(value, (basestring, unicodeT)))
or not value
or "@" not in value
):
raise ValidationError(self.translator(self.error_message))
body, domain = value.rsplit("@", 1)
try:
regex_flags = re.VERBOSE | re.IGNORECASE
match_body = re.match(self.REGEX_BODY, body, regex_flags)
match_domain = re.match(self.REGEX_DOMAIN, domain, regex_flags)
if not match_domain:
# check for Internationalized Domain Names
# see https://docs.python.org/2/library/codecs.html#module-encodings.idna
domain_encoded = to_unicode(domain).encode("idna").decode("ascii")
match_domain = re.match(self.REGEX_DOMAIN, domain_encoded, regex_flags)
match = (match_body is not None) and (match_domain is not None)
except (TypeError, UnicodeError):
# Value may not be a string where we can look for matches.
# Example: we're calling ANY_OF formatter and IS_EMAIL is asked to validate a date.
match = None
if match:
if (not self.banned or not self.banned.match(domain)) and (
not self.forced or self.forced.match(domain)
):
return value
raise ValidationError(self.translator(self.error_message))
class IS_LIST_OF_EMAILS(Validator):
"""
Example:
Used as::
Field('emails', 'list:string',
widget=SQLFORM.widgets.text.widget,
requires=IS_LIST_OF_EMAILS(),
represent=lambda v, r: \
XML(', '.join([A(x, _href='mailto:'+x).xml() for x in (v or [])]))
)
"""
REGEX_NOT_EMAIL_SPLITTER = r"[^,;\s]+"
def __init__(self, error_message="Invalid emails: %s"):
self.error_message = error_message
def validate(self, value, record_id=None):
bad_emails = []
f = IS_EMAIL()
for email in re.findall(self.REGEX_NOT_EMAIL_SPLITTER, value):
error = f(email)[1]
if error and email not in bad_emails:
bad_emails.append(email)
if bad_emails:
raise ValidationError(
self.translator(self.error_message) % ", ".join(bad_emails)
)
return value
def formatter(self, value, row=None):
return ", ".join(value or [])
# URL scheme source:
# <http://en.wikipedia.org/wiki/URI_scheme> obtained on 2008-Nov-10
official_url_schemes = [
"aaa",
"aaas",
"acap",
"cap",
"cid",
"crid",
"data",
"dav",
"dict",
"dns",
"fax",
"file",
"ftp",
"go",
"gopher",
"h323",
"http",
"https",
"icap",
"im",
"imap",
"info",
"ipp",
"iris",
"iris.beep",
"iris.xpc",
"iris.xpcs",
"iris.lws",
"ldap",
"mailto",
"mid",
"modem",
"msrp",
"msrps",
"mtqp",
"mupdate",
"news",
"nfs",
"nntp",
"opaquelocktoken",
"pop",
"pres",
"prospero",
"rtsp",
"service",
"shttp",
"sip",
"sips",
"snmp",
"soap.beep",
"soap.beeps",
"tag",
"tel",
"telnet",
"tftp",
"thismessage",
"tip",
"tv",
"urn",
"vemmi",
"wais",
"xmlrpc.beep",
"xmlrpc.beep",
"xmpp",
"z39.50r",
"z39.50s",
]
unofficial_url_schemes = [
"about",
"adiumxtra",
"aim",
"afp",
"aw",
"callto",
"chrome",
"cvs",
"ed2k",
"feed",
"fish",
"gg",
"gizmoproject",
"iax2",
"irc",
"ircs",
"itms",
"jar",
"javascript",
"keyparc",
"lastfm",
"ldaps",
"magnet",
"mms",
"msnim",
"mvn",
"notes",
"nsfw",
"psyc",
"paparazzi:http",
"rmi",
"rsync",
"secondlife",
"sgn",
"skype",
"ssh",
"sftp",
"smb",
"sms",
"soldat",
"steam",
"svn",
"teamspeak",
"unreal",
"ut2004",
"ventrilo",
"view-source",
"webcal",
"wyciwyg",
"xfire",
"xri",
"ymsgr",
]
all_url_schemes = [None] + official_url_schemes + unofficial_url_schemes
http_schemes = [None, "http", "https"]
# Defined in RFC 3490, Section 3.1, Requirement #1
# Use this regex to split the authority component of a unicode URL into
# its component labels
REGEX_AUTHORITY_SPLITTER = u"[\u002e\u3002\uff0e\uff61]"
def escape_unicode(string):
"""
Converts a unicode string into US-ASCII, using a simple conversion scheme.
Each unicode character that does not have a US-ASCII equivalent is
converted into a URL escaped form based on its hexadecimal value.
For example, the unicode character '\\u4e86' will become the string '%4e%86'
Args:
string: unicode string, the unicode string to convert into an
escaped US-ASCII form
Returns:
string: the US-ASCII escaped form of the inputted string
@author: Jonathan Benn
"""
returnValue = StringIO()
for character in string:
code = ord(character)
if code > 0x7F:
hexCode = hex(code)
returnValue.write("%" + hexCode[2:4] + "%" + hexCode[4:6])
else:
returnValue.write(character)
return returnValue.getvalue()
def unicode_to_ascii_authority(authority):
"""
Follows the steps in RFC 3490, Section 4 to convert a unicode authority
string into its ASCII equivalent.
For example, u'www.Alliancefran\\xe7aise.nu' will be converted into
'www.xn--alliancefranaise-npb.nu'
Args:
authority: unicode string, the URL authority component to convert,
e.g. u'www.Alliancefran\\xe7aise.nu'
Returns:
string: the US-ASCII character equivalent to the inputed authority,
e.g. 'www.xn--alliancefranaise-npb.nu'
Raises:
Exception: if the function is not able to convert the inputed
authority
@author: Jonathan Benn
"""
# RFC 3490, Section 4, Step 1
# The encodings.idna Python module assumes that AllowUnassigned == True
# RFC 3490, Section 4, Step 2
labels = re.split(REGEX_AUTHORITY_SPLITTER, authority)
# RFC 3490, Section 4, Step 3
# The encodings.idna Python module assumes that UseSTD3ASCIIRules == False
# RFC 3490, Section 4, Step 4
# We use the ToASCII operation because we are about to put the authority
# into an IDN-unaware slot
asciiLabels = []
for label in labels:
if label:
asciiLabels.append(to_native(encodings.idna.ToASCII(label)))
else:
# encodings.idna.ToASCII does not accept an empty string, but
# it is necessary for us to allow for empty labels so that we
# don't modify the URL
asciiLabels.append("")
# RFC 3490, Section 4, Step 5
return str(reduce(lambda x, y: x + unichr(0x002E) + y, asciiLabels))
def unicode_to_ascii_url(url, prepend_scheme):
"""
Converts the inputed unicode url into a US-ASCII equivalent. This function
goes a little beyond RFC 3490, which is limited in scope to the domain name
(authority) only. Here, the functionality is expanded to what was observed
on Wikipedia on 2009-Jan-22:
Component Can Use Unicode?
--------- ----------------
scheme No
authority Yes
path Yes
query Yes
fragment No
The authority component gets converted to punycode, but occurrences of
unicode in other components get converted into a pair of URI escapes (we
assume 4-byte unicode). E.g. the unicode character U+4E2D will be
converted into '%4E%2D'. Testing with Firefox v3.0.5 has shown that it can
understand this kind of URI encoding.
Args:
url: unicode string, the URL to convert from unicode into US-ASCII
prepend_scheme: string, a protocol scheme to prepend to the URL if
we're having trouble parsing it.
e.g. "http". Input None to disable this functionality
Returns:
string: a US-ASCII equivalent of the inputed url
@author: Jonathan Benn
"""
# convert the authority component of the URL into an ASCII punycode string,
# but encode the rest using the regular URI character encoding
components = urlparse.urlparse(url)
prepended = False
# If no authority was found
if not components.netloc:
# Try appending a scheme to see if that fixes the problem
scheme_to_prepend = prepend_scheme or "http"
components = urlparse.urlparse(to_unicode(scheme_to_prepend) + u"://" + url)
prepended = True
# if we still can't find the authority
if not components.netloc:
# And it's not because the url is a relative url
if not url.startswith("/"):
raise Exception(
"No authority component found, "
+ "could not decode unicode to US-ASCII"
)
# We're here if we found an authority, let's rebuild the URL
scheme = components.scheme
authority = components.netloc
path = components.path
query = components.query
fragment = components.fragment
if prepended:
scheme = ""
unparsed = urlparse.urlunparse(
(
scheme,
unicode_to_ascii_authority(authority),
escape_unicode(path),
"",
escape_unicode(query),
str(fragment),
)
)
if unparsed.startswith("//"):
unparsed = unparsed[2:] # Remove the // urlunparse puts in the beginning
return unparsed
class IS_GENERIC_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The URL scheme specified (if one is specified) is not valid
Based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL if and only if that's necessary to successfully parse the URL.
Please note that a scheme will be prepended only for rare cases
(e.g. 'google.ca:80')
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
@author: Jonathan Benn
>>> IS_GENERIC_URL()('http://user@abc.com')
('http://user@abc.com', None)
Args:
error_message: a string, the error message to give the end user
if the URL does not validate
allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
"""
def __init__(
self,
error_message="Enter a valid URL",
allowed_schemes=None,
prepend_scheme=None,
):
self.error_message = error_message
if allowed_schemes is None:
self.allowed_schemes = all_url_schemes
else:
self.allowed_schemes = allowed_schemes
self.prepend_scheme = prepend_scheme
if self.prepend_scheme not in self.allowed_schemes:
raise SyntaxError(
"prepend_scheme='%s' is not in allowed_schemes=%s"
% (self.prepend_scheme, self.allowed_schemes)
)
REGEX_GENERIC_URL = r"%[^0-9A-Fa-f]{2}|%[^0-9A-Fa-f][0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]|%$|%[0-9A-Fa-f]$|%[^0-9A-Fa-f]$"
REGEX_GENERIC_URL_VALID = r"[A-Za-z0-9;/?:@&=+$,\-_\.!~*'\(\)%]+$"
REGEX_URL_FRAGMENT_VALID = r"[|A-Za-z0-9;/?:@&=+$,\-_\.!~*'\(\)%]+$"
def validate(self, value, record_id=None):
"""
Args:
value: a string, the URL to validate
Returns:
a tuple, where tuple[0] is the inputed value (possible
prepended with prepend_scheme), and tuple[1] is either
None (success!) or the string error_message
"""
# if we dont have anything or the URL misuses the '%' character
if not value or re.search(self.REGEX_GENERIC_URL, value):
raise ValidationError(self.translator(self.error_message))
if "#" in value:
url, fragment_part = value.split("#")
else:
url, fragment_part = value, ""
# if the URL is only composed of valid characters
if not re.match(self.REGEX_GENERIC_URL_VALID, url) or (
fragment_part and not re.match(self.REGEX_URL_FRAGMENT_VALID, fragment_part)
):
raise ValidationError(self.translator(self.error_message))
# Then parse the URL into its components and check on
try:
components = urlparse.urlparse(urllib_unquote(value))._asdict()
except ValueError:
raise ValidationError(self.translator(self.error_message))
# Clean up the scheme before we check it
scheme = components["scheme"]
if len(scheme) == 0:
scheme = None
else:
scheme = components["scheme"].lower()
# If the scheme doesn't really exists
if (
scheme not in self.allowed_schemes
or not scheme
and ":" in components["path"]
):
# for the possible case of abbreviated URLs with
# ports, check to see if adding a valid scheme fixes
# the problem (but only do this if it doesn't have
# one already!)
if "://" not in value and None in self.allowed_schemes:
schemeToUse = self.prepend_scheme or "http"
new_value = self.validate(schemeToUse + "://" + value)
return new_value if self.prepend_scheme else value
raise ValidationError(self.translator(self.error_message))
return value
# Sources (obtained 2017-Nov-11):
# http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# see scripts/parse_top_level_domains.py for an easy update
official_top_level_domains = [
# a
"aaa",
"aarp",
"abarth",
"abb",
"abbott",
"abbvie",
"abc",
"able",
"abogado",
"abudhabi",
"ac",
"academy",
"accenture",
"accountant",
"accountants",
"aco",
"active",
"actor",
"ad",
"adac",
"ads",
"adult",
"ae",
"aeg",
"aero",
"aetna",
"af",
"afamilycompany",
"afl",
"africa",
"ag",
"agakhan",
"agency",
"ai",
"aig",
"aigo",
"airbus",
"airforce",
"airtel",
"akdn",
"al",
"alfaromeo",
"alibaba",
"alipay",
"allfinanz",
"allstate",
"ally",
"alsace",
"alstom",
"am",
"americanexpress",
"americanfamily",
"amex",
"amfam",
"amica",
"amsterdam",
"analytics",
"android",
"anquan",
"anz",
"ao",
"aol",
"apartments",
"app",
"apple",
"aq",
"aquarelle",
"ar",
"arab",
"aramco",
"archi",
"army",
"arpa",
"art",
"arte",
"as",
"asda",
"asia",
"associates",
"at",
"athleta",
"attorney",
"au",
"auction",
"audi",
"audible",
"audio",
"auspost",
"author",
"auto",
"autos",
"avianca",
"aw",
"aws",
"ax",
"axa",
"az",
"azure",
# b
"ba",
"baby",
"baidu",
"banamex",
"bananarepublic",
"band",
"bank",
"bar",
"barcelona",
"barclaycard",
"barclays",
"barefoot",
"bargains",
"baseball",
"basketball",
"bauhaus",
"bayern",
"bb",
"bbc",
"bbt",
"bbva",
"bcg",
"bcn",
"bd",
"be",
"beats",
"beauty",
"beer",
"bentley",
"berlin",
"best",
"bestbuy",
"bet",
"bf",
"bg",
"bh",
"bharti",
"bi",
"bible",
"bid",
"bike",
"bing",
"bingo",
"bio",
"biz",
"bj",
"black",
"blackfriday",
"blanco",
"blockbuster",
"blog",
"bloomberg",
"blue",
"bm",
"bms",
"bmw",
"bn",
"bnl",
"bnpparibas",
"bo",
"boats",
"boehringer",
"bofa",
"bom",
"bond",
"boo",
"book",
"booking",
"boots",
"bosch",
"bostik",
"boston",
"bot",
"boutique",
"box",
"br",
"bradesco",
"bridgestone",
"broadway",
"broker",
"brother",
"brussels",
"bs",
"bt",
"budapest",
"bugatti",
"build",
"builders",
"business",
"buy",
"buzz",
"bv",
"bw",
"by",
"bz",
"bzh",
# c
"ca",
"cab",
"cafe",
"cal",
"call",
"calvinklein",
"cam",
"camera",
"camp",
"cancerresearch",
"canon",
"capetown",
"capital",
"capitalone",
"car",
"caravan",
"cards",
"care",
"career",
"careers",
"cars",
"cartier",
"casa",
"case",
"caseih",
"cash",
"casino",
"cat",
"catering",
"catholic",
"cba",
"cbn",
"cbre",
"cbs",
"cc",
"cd",
"ceb",
"center",
"ceo",
"cern",
"cf",
"cfa",
"cfd",
"cg",
"ch",
"chanel",
"channel",
"chase",
"chat",
"cheap",
"chintai",
"christmas",
"chrome",
"chrysler",
"church",
"ci",
"cipriani",
"circle",
"cisco",
"citadel",
"citi",
"citic",
"city",
"cityeats",
"ck",
"cl",
"claims",
"cleaning",
"click",
"clinic",
"clinique",
"clothing",
"cloud",
"club",
"clubmed",
"cm",
"cn",
"co",
"coach",
"codes",
"coffee",
"college",
"cologne",
"com",
"comcast",
"commbank",
"community",
"company",
"compare",
"computer",
"comsec",
"condos",
"construction",
"consulting",
"contact",
"contractors",
"cooking",
"cookingchannel",
"cool",
"coop",
"corsica",
"country",
"coupon",
"coupons",
"courses",
"cr",
"credit",
"creditcard",
"creditunion",
"cricket",
"crown",
"crs",
"cruise",
"cruises",
"csc",
"cu",
"cuisinella",
"cv",
"cw",
"cx",
"cy",
"cymru",
"cyou",
"cz",
# d
"dabur",
"dad",
"dance",
"data",
"date",
"dating",
"datsun",
"day",
"dclk",
"dds",
"de",
"deal",
"dealer",
"deals",
"degree",
"delivery",
"dell",
"deloitte",
"delta",
"democrat",
"dental",
"dentist",
"desi",
"design",
"dev",
"dhl",
"diamonds",
"diet",
"digital",
"direct",
"directory",
"discount",
"discover",
"dish",
"diy",
"dj",
"dk",
"dm",
"dnp",
"do",
"docs",
"doctor",
"dodge",
"dog",
"doha",
"domains",
"dot",
"download",
"drive",
"dtv",
"dubai",
"duck",
"dunlop",
"duns",
"dupont",
"durban",
"dvag",
"dvr",
"dz",
# e
"earth",
"eat",
"ec",
"eco",
"edeka",
"edu",
"education",
"ee",
"eg",
"email",
"emerck",
"energy",
"engineer",
"engineering",
"enterprises",
"epost",
"epson",
"equipment",
"er",
"ericsson",
"erni",
"es",
"esq",
"estate",
"esurance",
"et",
"etisalat",
"eu",
"eurovision",
"eus",
"events",
"everbank",
"exchange",
"expert",
"exposed",
"express",
"extraspace",
# f
"fage",
"fail",
"fairwinds",
"faith",
"family",
"fan",
"fans",
"farm",
"farmers",
"fashion",
"fast",
"fedex",
"feedback",
"ferrari",
"ferrero",
"fi",
"fiat",
"fidelity",
"fido",
"film",
"final",
"finance",
"financial",
"fire",
"firestone",
"firmdale",
"fish",
"fishing",
"fit",
"fitness",
"fj",
"fk",
"flickr",
"flights",
"flir",
"florist",
"flowers",
"fly",
"fm",
"fo",
"foo",
"food",
"foodnetwork",
"football",
"ford",
"forex",
"forsale",
"forum",
"foundation",
"fox",
"fr",
"free",
"fresenius",
"frl",
"frogans",
"frontdoor",
"frontier",
"ftr",
"fujitsu",
"fujixerox",
"fun",
"fund",
"furniture",
"futbol",
"fyi",
# g
"ga",
"gal",
"gallery",
"gallo",
"gallup",
"game",
"games",
"gap",
"garden",
"gb",
"gbiz",
"gd",
"gdn",
"ge",
"gea",
"gent",
"genting",
"george",
"gf",
"gg",
"ggee",
"gh",
"gi",
"gift",
"gifts",
"gives",
"giving",
"gl",
"glade",
"glass",
"gle",
"global",
"globo",
"gm",
"gmail",
"gmbh",
"gmo",
"gmx",
"gn",
"godaddy",
"gold",
"goldpoint",
"golf",
"goo",
"goodhands",
"goodyear",
"goog",
"google",
"gop",
"got",
"gov",
"gp",
"gq",
"gr",
"grainger",
"graphics",
"gratis",
"green",
"gripe",
"grocery",
"group",
"gs",
"gt",
"gu",
"guardian",
"gucci",
"guge",
"guide",
"guitars",
"guru",
"gw",
"gy",
# h
"hair",
"hamburg",
"hangout",
"haus",
"hbo",
"hdfc",
"hdfcbank",
"health",
"healthcare",
"help",
"helsinki",
"here",
"hermes",
"hgtv",
"hiphop",
"hisamitsu",
"hitachi",
"hiv",
"hk",
"hkt",
"hm",
"hn",
"hockey",
"holdings",
"holiday",
"homedepot",
"homegoods",
"homes",
"homesense",
"honda",
"honeywell",
"horse",
"hospital",
"host",
"hosting",
"hot",
"hoteles",
"hotels",
"hotmail",
"house",
"how",
"hr",
"hsbc",
"ht",
"hu",
"hughes",
"hyatt",
"hyundai",
# i
"ibm",
"icbc",
"ice",
"icu",
"id",
"ie",
"ieee",
"ifm",
"ikano",
"il",
"im",
"imamat",
"imdb",
"immo",
"immobilien",
"in",
"industries",
"infiniti",
"info",
"ing",
"ink",
"institute",
"insurance",
"insure",
"int",
"intel",
"international",
"intuit",
"investments",
"io",
"ipiranga",
"iq",
"ir",
"irish",
"is",
"iselect",
"ismaili",
"ist",
"istanbul",
"it",
"itau",
"itv",
"iveco",
"iwc",
# j
"jaguar",
"java",
"jcb",
"jcp",
"je",
"jeep",
"jetzt",
"jewelry",
"jio",
"jlc",
"jll",
"jm",
"jmp",
"jnj",
"jo",
"jobs",
"joburg",
"jot",
"joy",
"jp",
"jpmorgan",
"jprs",
"juegos",
"juniper",
# k
"kaufen",
"kddi",
"ke",
"kerryhotels",
"kerrylogistics",
"kerryproperties",
"kfh",
"kg",
"kh",
"ki",
"kia",
"kim",
"kinder",
"kindle",
"kitchen",
"kiwi",
"km",
"kn",
"koeln",
"komatsu",
"kosher",
"kp",
"kpmg",
"kpn",
"kr",
"krd",
"kred",
"kuokgroup",
"kw",
"ky",
"kyoto",
"kz",
# l
"la",
"lacaixa",
"ladbrokes",
"lamborghini",
"lamer",
"lancaster",
"lancia",
"lancome",
"land",
"landrover",
"lanxess",
"lasalle",
"lat",
"latino",
"latrobe",
"law",
"lawyer",
"lb",
"lc",
"lds",
"lease",
"leclerc",
"lefrak",
"legal",
"lego",
"lexus",
"lgbt",
"li",
"liaison",
"lidl",
"life",
"lifeinsurance",
"lifestyle",
"lighting",
"like",
"lilly",
"limited",
"limo",
"lincoln",
"linde",
"link",
"lipsy",
"live",
"living",
"lixil",
"lk",
"loan",
"loans",
"localhost",
"locker",
"locus",
"loft",
"lol",
"london",
"lotte",
"lotto",
"love",
"lpl",
"lplfinancial",
"lr",
"ls",
"lt",
"ltd",
"ltda",
"lu",
"lundbeck",
"lupin",
"luxe",
"luxury",
"lv",
"ly",
# m
"ma",
"macys",
"madrid",
"maif",
"maison",
"makeup",
"man",
"management",
"mango",
"map",
"market",
"marketing",
"markets",
"marriott",
"marshalls",
"maserati",
"mattel",
"mba",
"mc",
"mckinsey",
"md",
"me",
"med",
"media",
"meet",
"melbourne",
"meme",
"memorial",
"men",
"menu",
"meo",
"merckmsd",
"metlife",
"mg",
"mh",
"miami",
"microsoft",
"mil",
"mini",
"mint",
"mit",
"mitsubishi",
"mk",
"ml",
"mlb",
"mls",
"mm",
"mma",
"mn",
"mo",
"mobi",
"mobile",
"mobily",
"moda",
"moe",
"moi",
"mom",
"monash",
"money",
"monster",
"mopar",
"mormon",
"mortgage",
"moscow",
"moto",
"motorcycles",
"mov",
"movie",
"movistar",
"mp",
"mq",
"mr",
"ms",
"msd",
"mt",
"mtn",
"mtr",
"mu",
"museum",
"mutual",
"mv",
"mw",
"mx",
"my",
"mz",
# n
"na",
"nab",
"nadex",
"nagoya",
"name",
"nationwide",
"natura",
"navy",
"nba",
"nc",
"ne",
"nec",
"net",
"netbank",
"netflix",
"network",
"neustar",
"new",
"newholland",
"news",
"next",
"nextdirect",
"nexus",
"nf",
"nfl",
"ng",
"ngo",
"nhk",
"ni",
"nico",
"nike",
"nikon",
"ninja",
"nissan",
"nissay",
"nl",
"no",
"nokia",
"northwesternmutual",
"norton",
"now",
"nowruz",
"nowtv",
"np",
"nr",
"nra",
"nrw",
"ntt",
"nu",
"nyc",
"nz",
# o
"obi",
"observer",
"off",
"office",
"okinawa",
"olayan",
"olayangroup",
"oldnavy",
"ollo",
"om",
"omega",
"one",
"ong",
"onl",
"online",
"onyourside",
"ooo",
"open",
"oracle",
"orange",
"org",
"organic",
"origins",
"osaka",
"otsuka",
"ott",
"ovh",
# p
"pa",
"page",
"panasonic",
"panerai",
"paris",
"pars",
"partners",
"parts",
"party",
"passagens",
"pay",
"pccw",
"pe",
"pet",
"pf",
"pfizer",
"pg",
"ph",
"pharmacy",
"phd",
"philips",
"phone",
"photo",
"photography",
"photos",
"physio",
"piaget",
"pics",
"pictet",
"pictures",
"pid",
"pin",
"ping",
"pink",
"pioneer",
"pizza",
"pk",
"pl",
"place",
"play",
"playstation",
"plumbing",
"plus",
"pm",
"pn",
"pnc",
"pohl",
"poker",
"politie",
"porn",
"post",
"pr",
"pramerica",
"praxi",
"press",
"prime",
"pro",
"prod",
"productions",
"prof",
"progressive",
"promo",
"properties",
"property",
"protection",
"pru",
"prudential",
"ps",
"pt",
"pub",
"pw",
"pwc",
"py",
# q
"qa",
"qpon",
"quebec",
"quest",
"qvc",
# r
"racing",
"radio",
"raid",
"re",
"read",
"realestate",
"realtor",
"realty",
"recipes",
"red",
"redstone",
"redumbrella",
"rehab",
"reise",
"reisen",
"reit",
"reliance",
"ren",
"rent",
"rentals",
"repair",
"report",
"republican",
"rest",
"restaurant",
"review",
"reviews",
"rexroth",
"rich",
"richardli",
"ricoh",
"rightathome",
"ril",
"rio",
"rip",
"rmit",
"ro",
"rocher",
"rocks",
"rodeo",
"rogers",
"room",
"rs",
"rsvp",
"ru",
"rugby",
"ruhr",
"run",
"rw",
"rwe",
"ryukyu",
# s
"sa",
"saarland",
"safe",
"safety",
"sakura",
"sale",
"salon",
"samsclub",
"samsung",
"sandvik",
"sandvikcoromant",
"sanofi",
"sap",
"sapo",
"sarl",
"sas",
"save",
"saxo",
"sb",
"sbi",
"sbs",
"sc",
"sca",
"scb",
"schaeffler",
"schmidt",
"scholarships",
"school",
"schule",
"schwarz",
"science",
"scjohnson",
"scor",
"scot",
"sd",
"se",
"search",
"seat",
"secure",
"security",
"seek",
"select",
"sener",
"services",
"ses",
"seven",
"sew",
"sex",
"sexy",
"sfr",
"sg",
"sh",
"shangrila",
"sharp",
"shaw",
"shell",
"shia",
"shiksha",
"shoes",
"shop",
"shopping",
"shouji",
"show",
"showtime",
"shriram",
"si",
"silk",
"sina",
"singles",
"site",
"sj",
"sk",
"ski",
"skin",
"sky",
"skype",
"sl",
"sling",
"sm",
"smart",
"smile",
"sn",
"sncf",
"so",
"soccer",
"social",
"softbank",
"software",
"sohu",
"solar",
"solutions",
"song",
"sony",
"soy",
"space",
"spiegel",
"spot",
"spreadbetting",
"sr",
"srl",
"srt",
"st",
"stada",
"staples",
"star",
"starhub",
"statebank",
"statefarm",
"statoil",
"stc",
"stcgroup",
"stockholm",
"storage",
"store",
"stream",
"studio",
"study",
"style",
"su",
"sucks",
"supplies",
"supply",
"support",
"surf",
"surgery",
"suzuki",
"sv",
"swatch",
"swiftcover",
"swiss",
"sx",
"sy",
"sydney",
"symantec",
"systems",
"sz",
# t
"tab",
"taipei",
"talk",
"taobao",
"target",
"tatamotors",
"tatar",
"tattoo",
"tax",
"taxi",
"tc",
"tci",
"td",
"tdk",
"team",
"tech",
"technology",
"tel",
"telecity",
"telefonica",
"temasek",
"tennis",
"teva",
"tf",
"tg",
"th",
"thd",
"theater",
"theatre",
"tiaa",
"tickets",
"tienda",
"tiffany",
"tips",
"tires",
"tirol",
"tj",
"tjmaxx",
"tjx",
"tk",
"tkmaxx",
"tl",
"tm",
"tmall",
"tn",
"to",
"today",
"tokyo",
"tools",
"top",
"toray",
"toshiba",
"total",
"tours",
"town",
"toyota",
"toys",
"tr",
"trade",
"trading",
"training",
"travel",
"travelchannel",
"travelers",
"travelersinsurance",
"trust",
"trv",
"tt",
"tube",
"tui",
"tunes",
"tushu",
"tv",
"tvs",
"tw",
"tz",
# u
"ua",
"ubank",
"ubs",
"uconnect",
"ug",
"uk",
"unicom",
"university",
"uno",
"uol",
"ups",
"us",
"uy",
"uz",
# v
"va",
"vacations",
"vana",
"vanguard",
"vc",
"ve",
"vegas",
"ventures",
"verisign",
"versicherung",
"vet",
"vg",
"vi",
"viajes",
"video",
"vig",
"viking",
"villas",
"vin",
"vip",
"virgin",
"visa",
"vision",
"vista",
"vistaprint",
"viva",
"vivo",
"vlaanderen",
"vn",
"vodka",
"volkswagen",
"volvo",
"vote",
"voting",
"voto",
"voyage",
"vu",
"vuelos",
# w
"wales",
"walmart",
"walter",
"wang",
"wanggou",
"warman",
"watch",
"watches",
"weather",
"weatherchannel",
"webcam",
"weber",
"website",
"wed",
"wedding",
"weibo",
"weir",
"wf",
"whoswho",
"wien",
"wiki",
"williamhill",
"win",
"windows",
"wine",
"winners",
"wme",
"wolterskluwer",
"woodside",
"work",
"works",
"world",
"wow",
"ws",
"wtc",
"wtf",
# x
"xbox",
"xerox",
"xfinity",
"xihuan",
"xin",
"xn--11b4c3d",
"xn--1ck2e1b",
"xn--1qqw23a",
"xn--2scrj9c",
"xn--30rr7y",
"xn--3bst00m",
"xn--3ds443g",
"xn--3e0b707e",
"xn--3hcrj9c",
"xn--3oq18vl8pn36a",
"xn--3pxu8k",
"xn--42c2d9a",
"xn--45br5cyl",
"xn--45brj9c",
"xn--45q11c",
"xn--4gbrim",
"xn--54b7fta0cc",
"xn--55qw42g",
"xn--55qx5d",
"xn--5su34j936bgsg",
"xn--5tzm5g",
"xn--6frz82g",
"xn--6qq986b3xl",
"xn--80adxhks",
"xn--80ao21a",
"xn--80aqecdr1a",
"xn--80asehdb",
"xn--80aswg",
"xn--8y0a063a",
"xn--90a3ac",
"xn--90ae",
"xn--90ais",
"xn--9dbq2a",
"xn--9et52u",
"xn--9krt00a",
"xn--b4w605ferd",
"xn--bck1b9a5dre4c",
"xn--c1avg",
"xn--c2br7g",
"xn--cck2b3b",
"xn--cg4bki",
"xn--clchc0ea0b2g2a9gcd",
"xn--czr694b",
"xn--czrs0t",
"xn--czru2d",
"xn--d1acj3b",
"xn--d1alf",
"xn--e1a4c",
"xn--eckvdtc9d",
"xn--efvy88h",
"xn--estv75g",
"xn--fct429k",
"xn--fhbei",
"xn--fiq228c5hs",
"xn--fiq64b",
"xn--fiqs8s",
"xn--fiqz9s",
"xn--fjq720a",
"xn--flw351e",
"xn--fpcrj9c3d",
"xn--fzc2c9e2c",
"xn--fzys8d69uvgm",
"xn--g2xx48c",
"xn--gckr3f0f",
"xn--gecrj9c",
"xn--gk3at1e",
"xn--h2breg3eve",
"xn--h2brj9c",
"xn--h2brj9c8c",
"xn--hxt814e",
"xn--i1b6b1a6a2e",
"xn--imr513n",
"xn--io0a7i",
"xn--j1aef",
"xn--j1amh",
"xn--j6w193g",
"xn--jlq61u9w7b",
"xn--jvr189m",
"xn--kcrx77d1x4a",
"xn--kprw13d",
"xn--kpry57d",
"xn--kpu716f",
"xn--kput3i",
"xn--l1acc",
"xn--lgbbat1ad8j",
"xn--mgb9awbf",
"xn--mgba3a3ejt",
"xn--mgba3a4f16a",
"xn--mgba7c0bbn0a",
"xn--mgbaakc7dvf",
"xn--mgbaam7a8h",
"xn--mgbab2bd",
"xn--mgbai9azgqp6j",
"xn--mgbayh7gpa",
"xn--mgbb9fbpob",
"xn--mgbbh1a",
"xn--mgbbh1a71e",
"xn--mgbc0a9azcg",
"xn--mgbca7dzdo",
"xn--mgberp4a5d4ar",
"xn--mgbgu82a",
"xn--mgbi4ecexp",
"xn--mgbpl2fh",
"xn--mgbt3dhd",
"xn--mgbtx2b",
"xn--mgbx4cd0ab",
"xn--mix891f",
"xn--mk1bu44c",
"xn--mxtq1m",
"xn--ngbc5azd",
"xn--ngbe9e0a",
"xn--ngbrx",
"xn--node",
"xn--nqv7f",
"xn--nqv7fs00ema",
"xn--nyqy26a",
"xn--o3cw4h",
"xn--ogbpf8fl",
"xn--p1acf",
"xn--p1ai",
"xn--pbt977c",
"xn--pgbs0dh",
"xn--pssy2u",
"xn--q9jyb4c",
"xn--qcka1pmc",
"xn--qxam",
"xn--rhqv96g",
"xn--rovu88b",
"xn--rvc1e0am3e",
"xn--s9brj9c",
"xn--ses554g",
"xn--t60b56a",
"xn--tckwe",
"xn--tiq49xqyj",
"xn--unup4y",
"xn--vermgensberater-ctb",
"xn--vermgensberatung-pwb",
"xn--vhquv",
"xn--vuq861b",
"xn--w4r85el8fhu5dnra",
"xn--w4rs40l",
"xn--wgbh1c",
"xn--wgbl6a",
"xn--xhq521b",
"xn--xkc2al3hye2a",
"xn--xkc2dl3a5ee0h",
"xn--y9a3aq",
"xn--yfro4i67o",
"xn--ygbi2ammx",
"xn--zfr164b",
"xperia",
"xxx",
"xyz",
# y
"yachts",
"yahoo",
"yamaxun",
"yandex",
"ye",
"yodobashi",
"yoga",
"yokohama",
"you",
"youtube",
"yt",
"yun",
# z
"za",
"zappos",
"zara",
"zero",
"zip",
"zippo",
"zm",
"zone",
"zuerich",
"zw",
]
class IS_HTTP_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The string breaks any of the HTTP syntactic rules
* The URL scheme specified (if one is specified) is not 'http' or 'https'
* The top-level domain (if a host name is specified) does not exist
Based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL in the case of an abbreviated URL (e.g. 'google.ca').
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
@author: Jonathan Benn
>>> IS_HTTP_URL()('http://1.2.3.4')
('http://1.2.3.4', None)
>>> IS_HTTP_URL()('http://abc.com')
('http://abc.com', None)
>>> IS_HTTP_URL()('https://abc.com')
('https://abc.com', None)
>>> IS_HTTP_URL()('httpx://abc.com')
('httpx://abc.com', 'enter a valid URL')
>>> IS_HTTP_URL()('http://abc.com:80')
('http://abc.com:80', None)
>>> IS_HTTP_URL()('http://user@abc.com')
('http://user@abc.com', None)
>>> IS_HTTP_URL()('http://user@1.2.3.4')
('http://user@1.2.3.4', None)
Args:
error_message: a string, the error message to give the end user
if the URL does not validate
allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
"""
REGEX_GENERIC_VALID_IP = r"([\w.!~*'|;:&=+$,-]+@)?\d+\.\d+\.\d+\.\d+(:\d*)*$"
REGEX_GENERIC_VALID_DOMAIN = r"([\w.!~*'|;:&=+$,-]+@)?(([A-Za-z0-9]+[A-Za-z0-9\-]*[A-Za-z0-9]+\.)*([A-Za-z0-9]+\.)*)*([A-Za-z]+[A-Za-z0-9\-]*[A-Za-z0-9]+)\.?(:\d*)*$"
def __init__(
self,
error_message="Enter a valid URL",
allowed_schemes=None,
prepend_scheme="http",
allowed_tlds=None,
):
self.error_message = error_message
if allowed_schemes is None:
self.allowed_schemes = http_schemes
else:
self.allowed_schemes = allowed_schemes
if allowed_tlds is None:
self.allowed_tlds = official_top_level_domains
else:
self.allowed_tlds = allowed_tlds
self.prepend_scheme = prepend_scheme
for i in self.allowed_schemes:
if i not in http_schemes:
raise SyntaxError(
"allowed_scheme value '%s' is not in %s" % (i, http_schemes)
)
if self.prepend_scheme not in self.allowed_schemes:
raise SyntaxError(
"prepend_scheme='%s' is not in allowed_schemes=%s"
% (self.prepend_scheme, self.allowed_schemes)
)
def validate(self, value, record_id=None):
"""
Args:
value: a string, the URL to validate
Returns:
a tuple, where tuple[0] is the inputed value
(possible prepended with prepend_scheme), and tuple[1] is either
None (success!) or the string error_message
"""
try:
# if the URL passes generic validation
x = IS_GENERIC_URL(
error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme,
)
if x(value)[1] is None:
components = urlparse.urlparse(value)
authority = components.netloc
# if there is an authority component
if authority:
# if authority is a valid IP address
if re.match(self.REGEX_GENERIC_VALID_IP, authority):
# Then this HTTP URL is valid
return value
else:
# else if authority is a valid domain name
domainMatch = re.match(
self.REGEX_GENERIC_VALID_DOMAIN, authority
)
if domainMatch:
# if the top-level domain really exists
if domainMatch.group(5).lower() in self.allowed_tlds:
# Then this HTTP URL is valid
return value
else:
# else this is a relative/abbreviated URL, which will parse
# into the URL's path component
path = components.path
# relative case: if this is a valid path (if it starts with
# a slash)
if not path.startswith("/"):
# abbreviated case: if we haven't already, prepend a
# scheme and see if it fixes the problem
if "://" not in value and None in self.allowed_schemes:
schemeToUse = self.prepend_scheme or "http"
new_value = self.validate(schemeToUse + "://" + value)
return new_value if self.prepend_scheme else value
return value
except:
pass
raise ValidationError(self.translator(self.error_message))
class IS_URL(Validator):
"""
Rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The string breaks any of the HTTP syntactic rules
* The URL scheme specified (if one is specified) is not 'http' or 'https'
* The top-level domain (if a host name is specified) does not exist
(These rules are based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html)
This function only checks the URL's syntax. It does not check that the URL
points to a real document, for example, or that it otherwise makes sense
semantically. This function does automatically prepend 'http://' in front
of a URL in the case of an abbreviated URL (e.g. 'google.ca').
If the parameter mode='generic' is used, then this function's behavior
changes. It then rejects a URL string if any of the following is true:
* The string is empty or None
* The string uses characters that are not allowed in a URL
* The URL scheme specified (if one is specified) is not valid
(These rules are based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html)
The list of allowed schemes is customizable with the allowed_schemes
parameter. If you exclude None from the list, then abbreviated URLs
(lacking a scheme such as 'http') will be rejected.
The default prepended scheme is customizable with the prepend_scheme
parameter. If you set prepend_scheme to None then prepending will be
disabled. URLs that require prepending to parse will still be accepted,
but the return value will not be modified.
IS_URL is compatible with the Internationalized Domain Name (IDN) standard
specified in RFC 3490 (http://tools.ietf.org/html/rfc3490). As a result,
URLs can be regular strings or unicode strings.
If the URL's domain component (e.g. google.ca) contains non-US-ASCII
letters, then the domain will be converted into Punycode (defined in
RFC 3492, http://tools.ietf.org/html/rfc3492). IS_URL goes a bit beyond
the standards, and allows non-US-ASCII characters to be present in the path
and query components of the URL as well. These non-US-ASCII characters will
be escaped using the standard '%20' type syntax. e.g. the unicode
character with hex code 0x4e86 will become '%4e%86'
Args:
error_message: a string, the error message to give the end user
if the URL does not validate
allowed_schemes: a list containing strings or None. Each element
is a scheme the inputed URL is allowed to use
prepend_scheme: a string, this scheme is prepended if it's
necessary to make the URL valid
Code Examples::
INPUT(_type='text', _name='name', requires=IS_URL())
>>> IS_URL()('abc.com')
('http://abc.com', None)
INPUT(_type='text', _name='name', requires=IS_URL(mode='generic'))
>>> IS_URL(mode='generic')('abc.com')
('abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(allowed_schemes=['https'], prepend_scheme='https'))
>>> IS_URL(allowed_schemes=['https'], prepend_scheme='https')('https://abc.com')
('https://abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(prepend_scheme='https'))
>>> IS_URL(prepend_scheme='https')('abc.com')
('https://abc.com', None)
INPUT(_type='text', _name='name',
requires=IS_URL(mode='generic', allowed_schemes=['ftps', 'https'],
prepend_scheme='https'))
>>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https'], prepend_scheme='https')('https://abc.com')
('https://abc.com', None)
>>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https', None], prepend_scheme='https')('abc.com')
('abc.com', None)
@author: Jonathan Benn
"""
def __init__(
self,
error_message="Enter a valid URL",
mode="http",
allowed_schemes=None,
prepend_scheme="http",
allowed_tlds=None,
):
self.error_message = error_message
self.mode = mode.lower()
if self.mode not in ["generic", "http"]:
raise SyntaxError("invalid mode '%s' in IS_URL" % self.mode)
self.allowed_schemes = allowed_schemes
if allowed_tlds is None:
self.allowed_tlds = official_top_level_domains
else:
self.allowed_tlds = allowed_tlds
if self.allowed_schemes:
if prepend_scheme not in self.allowed_schemes:
raise SyntaxError(
"prepend_scheme='%s' is not in allowed_schemes=%s"
% (prepend_scheme, self.allowed_schemes)
)
# if allowed_schemes is None, then we will defer testing
# prepend_scheme's validity to a sub-method
self.prepend_scheme = prepend_scheme
def validate(self, value, record_id=None):
"""
Args:
value: a unicode or regular string, the URL to validate
Returns:
a (string, string) tuple, where tuple[0] is the modified
input value and tuple[1] is either None (success!) or the
string error_message. The input value will never be modified in the
case of an error. However, if there is success then the input URL
may be modified to (1) prepend a scheme, and/or (2) convert a
non-compliant unicode URL into a compliant US-ASCII version.
"""
if self.mode == "generic":
subMethod = IS_GENERIC_URL(
error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme,
)
elif self.mode == "http":
subMethod = IS_HTTP_URL(
error_message=self.error_message,
allowed_schemes=self.allowed_schemes,
prepend_scheme=self.prepend_scheme,
allowed_tlds=self.allowed_tlds,
)
else:
raise SyntaxError("invalid mode '%s' in IS_URL" % self.mode)
if isinstance(value, unicodeT):
try:
value = unicode_to_ascii_url(value, self.prepend_scheme)
except Exception as e:
# If we are not able to convert the unicode url into a
# US-ASCII URL, then the URL is not valid
raise ValidationError(self.translator(self.error_message))
return subMethod.validate(value, record_id)
class IS_TIME(Validator):
"""
Example:
Use as::
INPUT(_type='text', _name='name', requires=IS_TIME())
understands the following formats
hh:mm:ss [am/pm]
hh:mm [am/pm]
hh [am/pm]
[am/pm] is optional, ':' can be replaced by any other non-space non-digit::
>>> IS_TIME()('21:30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21-30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21.30')
(datetime.time(21, 30), None)
>>> IS_TIME()('21:30:59')
(datetime.time(21, 30, 59), None)
>>> IS_TIME()('5:30')
(datetime.time(5, 30), None)
>>> IS_TIME()('5:30 am')
(datetime.time(5, 30), None)
>>> IS_TIME()('5:30 pm')
(datetime.time(17, 30), None)
>>> IS_TIME()('5:30 whatever')
('5:30 whatever', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('5:30 20')
('5:30 20', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('24:30')
('24:30', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('21:60')
('21:60', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('21:30::')
('21:30::', 'enter time as hh:mm:ss (seconds, am, pm optional)')
>>> IS_TIME()('')
('', 'enter time as hh:mm:ss (seconds, am, pm optional)')ù
"""
REGEX_TIME = "((?P<h>[0-9]+))([^0-9 ]+(?P<m>[0-9 ]+))?([^0-9ap ]+(?P<s>[0-9]*))?((?P<d>[ap]m))?"
def __init__(
self, error_message="Enter time as hh:mm:ss (seconds, am, pm optional)"
):
self.error_message = error_message
def validate(self, value, record_id=None):
try:
ivalue = value
value = re.match(self.REGEX_TIME, value.lower())
(h, m, s) = (int(value.group("h")), 0, 0)
if not value.group("m") is None:
m = int(value.group("m"))
if not value.group("s") is None:
s = int(value.group("s"))
if value.group("d") == "pm" and 0 < h < 12:
h += 12
if value.group("d") == "am" and h == 12:
h = 0
if not (h in range(24) and m in range(60) and s in range(60)):
raise ValueError(
"Hours or minutes or seconds are outside of allowed range"
)
value = datetime.time(h, m, s)
return value
except Exception:
raise ValidationError(self.translator(self.error_message))
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return UTC.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return UTC.ZERO
utc = UTC()
class IS_DATE(Validator):
"""
Examples:
Use as::
INPUT(_type='text', _name='name', requires=IS_DATE())
date has to be in the ISO8960 format YYYY-MM-DD
"""
def __init__(self, format="%Y-%m-%d", error_message="Enter date as %(format)s"):
self.format = self.translator(format)
self.error_message = str(error_message)
self.extremes = {}
def validate(self, value, record_id=None):
if isinstance(value, datetime.date):
return value
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(value, str(self.format))
value = datetime.date(y, m, d)
return value
except:
self.extremes.update(IS_DATETIME.nice(self.format))
raise ValidationError(self.translator(self.error_message) % self.extremes)
def formatter(self, value):
if value is None or value == "":
return None
format = self.format
year = value.year
y = "%.4i" % year
format = format.replace("%y", y[-2:])
format = format.replace("%Y", y)
if year < 1900:
year = 2000
d = datetime.date(year, value.month, value.day)
return d.strftime(format)
class IS_DATETIME(Validator):
"""
Examples:
Use as::
INPUT(_type='text', _name='name', requires=IS_DATETIME())
datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss
timezome must be None or a pytz.timezone("America/Chicago") object
"""
isodatetime = "%Y-%m-%d %H:%M:%S"
@staticmethod
def nice(format):
code = (
("%Y", "1963"),
("%y", "63"),
("%d", "28"),
("%m", "08"),
("%b", "Aug"),
("%B", "August"),
("%H", "14"),
("%I", "02"),
("%p", "PM"),
("%M", "30"),
("%S", "59"),
)
for (a, b) in code:
format = format.replace(a, b)
return dict(format=format)
def __init__(
self,
format="%Y-%m-%d %H:%M:%S",
error_message="Enter date and time as %(format)s",
timezone=None,
):
self.format = self.translator(format)
self.error_message = str(error_message)
self.extremes = {}
self.timezone = timezone
def validate(self, value, record_id=None):
if isinstance(value, datetime.datetime):
return value
try:
if self.format == self.isodatetime:
value = value.replace("T", " ")
if len(value) == 16:
value += ":00"
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(value, str(self.format))
value = datetime.datetime(y, m, d, hh, mm, ss)
if self.timezone is not None:
# TODO: https://github.com/web2py/web2py/issues/1094 (temporary solution)
value = (
self.timezone.localize(value).astimezone(utc).replace(tzinfo=None)
)
return value
except:
self.extremes.update(IS_DATETIME.nice(self.format))
raise ValidationError(self.translator(self.error_message) % self.extremes)
def formatter(self, value):
if value is None or value == "":
return None
format = self.format
year = value.year
y = "%.4i" % year
format = format.replace("%y", y[-2:])
format = format.replace("%Y", y)
if year < 1900:
year = 2000
d = datetime.datetime(
year, value.month, value.day, value.hour, value.minute, value.second
)
if self.timezone is not None:
d = d.replace(tzinfo=utc).astimezone(self.timezone)
return d.strftime(format)
class IS_DATE_IN_RANGE(IS_DATE):
"""
Examples:
Use as::
>>> v = IS_DATE_IN_RANGE(minimum=datetime.date(2008,1,1), \
maximum=datetime.date(2009,12,31), \
format="%m/%d/%Y",error_message="Oops")
>>> v('03/03/2008')
(datetime.date(2008, 3, 3), None)
>>> v('03/03/2010')
('03/03/2010', 'oops')
>>> v(datetime.date(2008,3,3))
(datetime.date(2008, 3, 3), None)
>>> v(datetime.date(2010,3,3))
(datetime.date(2010, 3, 3), 'oops')
"""
def __init__(
self, minimum=None, maximum=None, format="%Y-%m-%d", error_message=None
):
self.minimum = minimum
self.maximum = maximum
if error_message is None:
if minimum is None:
error_message = "Enter date on or before %(max)s"
elif maximum is None:
error_message = "Enter date on or after %(min)s"
else:
error_message = "Enter date in range %(min)s %(max)s"
IS_DATE.__init__(self, format=format, error_message=error_message)
self.extremes = dict(min=self.formatter(minimum), max=self.formatter(maximum))
def validate(self, value, record_id=None):
value = IS_DATE.validate(self, value, record_id=None)
if self.minimum and self.minimum > value:
raise ValidationError(self.translator(self.error_message) % self.extremes)
if self.maximum and value > self.maximum:
raise ValidationError(self.translator(self.error_message) % self.extremes)
return value
class IS_DATETIME_IN_RANGE(IS_DATETIME):
"""
Examples:
Use as::
>>> v = IS_DATETIME_IN_RANGE(\
minimum=datetime.datetime(2008,1,1,12,20), \
maximum=datetime.datetime(2009,12,31,12,20), \
format="%m/%d/%Y %H:%M",error_message="Oops")
>>> v('03/03/2008 12:40')
(datetime.datetime(2008, 3, 3, 12, 40), None)
>>> v('03/03/2010 10:34')
('03/03/2010 10:34', 'oops')
>>> v(datetime.datetime(2008,3,3,0,0))
(datetime.datetime(2008, 3, 3, 0, 0), None)
>>> v(datetime.datetime(2010,3,3,0,0))
(datetime.datetime(2010, 3, 3, 0, 0), 'oops')
"""
def __init__(
self,
minimum=None,
maximum=None,
format="%Y-%m-%d %H:%M:%S",
error_message=None,
timezone=None,
):
self.minimum = minimum
self.maximum = maximum
if error_message is None:
if minimum is None:
error_message = "Enter date and time on or before %(max)s"
elif maximum is None:
error_message = "Enter date and time on or after %(min)s"
else:
error_message = "Enter date and time in range %(min)s %(max)s"
IS_DATETIME.__init__(
self, format=format, error_message=error_message, timezone=timezone
)
self.extremes = dict(min=self.formatter(minimum), max=self.formatter(maximum))
def validate(self, value, record_id=None):
value = IS_DATETIME.validate(self, value, record_id=None)
if self.minimum and self.minimum > value:
raise ValidationError(self.translator(self.error_message) % self.extremes)
if self.maximum and value > self.maximum:
raise ValidationError(self.translator(self.error_message) % self.extremes)
return value
class IS_LIST_OF(Validator):
def __init__(self, other=None, minimum=None, maximum=None, error_message=None):
self.other = other
self.minimum = minimum
self.maximum = maximum
self.error_message = error_message
def validate(self, value, record_id=None):
ivalue = value
if not isinstance(value, list):
ivalue = [ivalue]
ivalue = [i for i in ivalue if str(i).strip()]
if self.minimum is not None and len(ivalue) < self.minimum:
raise ValidationError(
self.translator(self.error_message or "Minimum length is %(min)s")
% dict(min=self.minimum, max=self.maximum)
)
if self.maximum is not None and len(ivalue) > self.maximum:
raise ValidationError(
self.translator(self.error_message or "Maximum length is %(max)s")
% dict(min=self.minimum, max=self.maximum)
)
new_value = []
other = self.other
if self.other:
if not isinstance(other, (list, tuple)):
other = [other]
for item in ivalue:
v = item
for validator in other:
v = validator_caller(validator, v, record_id)
new_value.append(v)
ivalue = new_value
return ivalue
class IS_LOWER(Validator):
"""
Converts to lowercase::
>>> IS_LOWER()('ABC')
('abc', None)
>>> IS_LOWER()('Ñ')
('\\xc3\\xb1', None)
"""
def validate(self, value, record_id=None):
cast_back = lambda x: x
if isinstance(value, str):
cast_back = to_native
elif isinstance(value, bytes):
cast_back = to_bytes
value = to_unicode(value).lower()
return cast_back(value)
class IS_UPPER(Validator):
"""
Converts to uppercase::
>>> IS_UPPER()('abc')
('ABC', None)
>>> IS_UPPER()('ñ')
('\\xc3\\x91', None)
"""
def validate(self, value, record_id=None):
cast_back = lambda x: x
if isinstance(value, str):
cast_back = to_native
elif isinstance(value, bytes):
cast_back = to_bytes
value = to_unicode(value).upper()
return cast_back(value)
def urlify(s, maxlen=80, keep_underscores=False):
"""
Converts incoming string to a simplified ASCII subset.
if (keep_underscores): underscores are retained in the string
else: underscores are translated to hyphens (default)
"""
s = to_unicode(s) # to unicode
s = s.lower() # to lowercase
s = unicodedata.normalize("NFKD", s) # replace special characters
s = to_native(s, charset="ascii", errors="ignore") # encode as ASCII
s = re.sub(r"&\w+?;", "", s) # strip html entities
if keep_underscores:
s = re.sub(r"\s+", "-", s) # whitespace to hyphens
s = re.sub(r"[^\w\-]", "", s)
# strip all but alphanumeric/underscore/hyphen
else:
s = re.sub(r"[\s_]+", "-", s) # whitespace & underscores to hyphens
s = re.sub(r"[^a-z0-9\-]", "", s) # strip all but alphanumeric/hyphen
s = re.sub(r"[-_][-_]+", "-", s) # collapse strings of hyphens
s = s.strip("-") # remove leading and trailing hyphens
return s[:maxlen] # enforce maximum length
class IS_SLUG(Validator):
"""
converts arbitrary text string to a slug::
>>> IS_SLUG()('abc123')
('abc123', None)
>>> IS_SLUG()('ABC123')
('abc123', None)
>>> IS_SLUG()('abc-123')
('abc-123', None)
>>> IS_SLUG()('abc--123')
('abc-123', None)
>>> IS_SLUG()('abc 123')
('abc-123', None)
>>> IS_SLUG()('abc\t_123')
('abc-123', None)
>>> IS_SLUG()('-abc-')
('abc', None)
>>> IS_SLUG()('--a--b--_ -c--')
('a-b-c', None)
>>> IS_SLUG()('abc&123')
('abc123', None)
>>> IS_SLUG()('abc&123&def')
('abc123def', None)
>>> IS_SLUG()('ñ')
('n', None)
>>> IS_SLUG(maxlen=4)('abc123')
('abc1', None)
>>> IS_SLUG()('abc_123')
('abc-123', None)
>>> IS_SLUG(keep_underscores=False)('abc_123')
('abc-123', None)
>>> IS_SLUG(keep_underscores=True)('abc_123')
('abc_123', None)
>>> IS_SLUG(check=False)('abc')
('abc', None)
>>> IS_SLUG(check=True)('abc')
('abc', None)
>>> IS_SLUG(check=False)('a bc')
('a-bc', None)
>>> IS_SLUG(check=True)('a bc')
('a bc', 'must be slug')
"""
@staticmethod
def urlify(value, maxlen=80, keep_underscores=False):
return urlify(value, maxlen, keep_underscores)
def __init__(
self,
maxlen=80,
check=False,
error_message="Must be slug",
keep_underscores=False,
):
self.maxlen = maxlen
self.check = check
self.error_message = error_message
self.keep_underscores = keep_underscores
def validate(self, value, record_id=None):
if self.check and value != urlify(value, self.maxlen, self.keep_underscores):
raise ValidationError(self.translator(self.error_message))
return urlify(value, self.maxlen, self.keep_underscores)
class ANY_OF(Validator):
"""
Tests if any of the validators in a list returns successfully::
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('a@b.co')
('a@b.co', None)
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('abco')
('abco', None)
>>> ANY_OF([IS_EMAIL(),IS_ALPHANUMERIC()])('@ab.co')
('@ab.co', 'enter only letters, numbers, and underscore')
>>> ANY_OF([IS_ALPHANUMERIC(),IS_EMAIL()])('@ab.co')
('@ab.co', 'enter a valid email address')
"""
def __init__(self, subs, error_message=None):
self.subs = subs
self.error_message = error_message
def validate(self, value, record_id=None):
for validator in self.subs:
v, e = validator(value)
if not e:
return v
raise ValidationError(e)
def formatter(self, value):
# Use the formatter of the first subvalidator
# that validates the value and has a formatter
for validator in self.subs:
if hasattr(validator, "formatter") and validator(value)[1] is None:
return validator.formatter(value)
class IS_EMPTY_OR(Validator):
"""
Dummy class for testing IS_EMPTY_OR::
>>> IS_EMPTY_OR(IS_EMAIL())('abc@def.com')
('abc@def.com', None)
>>> IS_EMPTY_OR(IS_EMAIL())(' ')
(None, None)
>>> IS_EMPTY_OR(IS_EMAIL(), null='abc')(' ')
('abc', None)
>>> IS_EMPTY_OR(IS_EMAIL(), null='abc', empty_regex='def')('def')
('abc', None)
>>> IS_EMPTY_OR(IS_EMAIL())('abc')
('abc', 'enter a valid email address')
>>> IS_EMPTY_OR(IS_EMAIL())(' abc ')
('abc', 'enter a valid email address')
"""
def __init__(self, other, null=None, empty_regex=None):
(self.other, self.null) = (other, null)
if empty_regex is not None:
self.empty_regex = re.compile(empty_regex)
else:
self.empty_regex = None
if hasattr(other, "multiple"):
self.multiple = other.multiple
if hasattr(other, "options"):
self.options = self._options
def _options(self, *args, **kwargs):
options = self.other.options(*args, **kwargs)
if (not options or options[0][0] != "") and not self.multiple:
options.insert(0, ("", ""))
return options
def set_self_id(self, id):
if isinstance(self.other, (list, tuple)):
for item in self.other:
if hasattr(item, "set_self_id"):
item.set_self_id(id)
else:
if hasattr(self.other, "set_self_id"):
self.other.set_self_id(id)
def validate(self, value, record_id=None):
value, empty = is_empty(value, empty_regex=self.empty_regex)
if empty:
return self.null
if isinstance(self.other, (list, tuple)):
for item in self.other:
value = validator_caller(item, value, record_id)
return value
return validator_caller(self.other, value, record_id)
def formatter(self, value):
if hasattr(self.other, "formatter"):
return self.other.formatter(value)
return value
IS_NULL_OR = IS_EMPTY_OR # for backward compatibility
class CLEANUP(Validator):
"""
Examples:
Use as::
INPUT(_type='text', _name='name', requires=CLEANUP())
removes special characters on validation
"""
REGEX_CLEANUP = "[^\x09\x0a\x0d\x20-\x7e]"
def __init__(self, regex=None):
self.regex = (
re.compile(self.REGEX_CLEANUP) if regex is None else re.compile(regex)
)
def validate(self, value, record_id=None):
v = self.regex.sub("", str(value).strip())
return v
def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
hashfunc = hashfunc or hashlib.sha1
hmac = hashlib.pbkdf2_hmac(
hashfunc().name, to_bytes(data), to_bytes(salt), iterations, keylen
)
return binascii.hexlify(hmac)
def simple_hash(text, key="", salt="", digest_alg="md5"):
"""Generate hash with the given text using the specified digest algorithm."""
text = to_bytes(text)
key = to_bytes(key)
salt = to_bytes(salt)
if not digest_alg:
raise RuntimeError("simple_hash with digest_alg=None")
elif not isinstance(digest_alg, str): # manual approach
h = digest_alg(text + key + salt)
elif digest_alg.startswith("pbkdf2"): # latest and coolest!
iterations, keylen, alg = digest_alg[7:-1].split(",")
return to_native(
pbkdf2_hex(text, salt, int(iterations), int(keylen), get_digest(alg))
)
elif key: # use hmac
digest_alg = get_digest(digest_alg)
h = hmac.new(key + salt, text, digest_alg)
else: # compatible with third party systems
h = get_digest(digest_alg)()
h.update(text + salt)
return h.hexdigest()
def get_digest(value):
"""Return a hashlib digest algorithm from a string."""
if isinstance(value, str):
value = value.lower()
if value not in ("md5", "sha1", "sha224", "sha256", "sha384", "sha512"):
raise ValueError("Invalid digest algorithm: %s" % value)
value = getattr(hashlib, value)
return value
DIGEST_ALG_BY_SIZE = {
128 // 4: "md5",
160 // 4: "sha1",
224 // 4: "sha224",
256 // 4: "sha256",
384 // 4: "sha384",
512 // 4: "sha512",
}
class LazyCrypt(object):
"""
Stores a lazy password hash
"""
def __init__(self, crypt, password):
"""
crypt is an instance of the CRYPT validator,
password is the password as inserted by the user
"""
self.crypt = crypt
self.password = password
self.crypted = None
def __str__(self):
"""
Encrypted self.password and caches it in self.crypted.
If self.crypt.salt the output is in the format <algorithm>$<salt>$<hash>
Try get the digest_alg from the key (if it exists)
else assume the default digest_alg. If not key at all, set key=''
If a salt is specified use it, if salt is True, set salt to uuid
(this should all be backward compatible)
Options:
key = 'uuid'
key = 'md5:uuid'
key = 'sha512:uuid'
...
key = 'pbkdf2(1000,64,sha512):uuid' 1000 iterations and 64 chars length
"""
if self.crypted:
return self.crypted
if self.crypt.key:
if ":" in self.crypt.key:
digest_alg, key = self.crypt.key.split(":", 1)
else:
digest_alg, key = self.crypt.digest_alg, self.crypt.key
else:
digest_alg, key = self.crypt.digest_alg, ""
if self.crypt.salt:
if self.crypt.salt is True:
salt = str(uuid.uuid4()).replace("-", "")[-16:]
else:
salt = self.crypt.salt
else:
salt = ""
hashed = simple_hash(self.password, key, salt, digest_alg)
self.crypted = "%s$%s$%s" % (digest_alg, salt, hashed)
return self.crypted
def __eq__(self, stored_password):
"""
compares the current lazy crypted password with a stored password
"""
# LazyCrypt objects comparison
if isinstance(stored_password, self.__class__):
return (self is stored_password) or (
(self.crypt.key == stored_password.crypt.key)
and (self.password == stored_password.password)
)
if self.crypt.key:
if ":" in self.crypt.key:
key = self.crypt.key.split(":")[1]
else:
key = self.crypt.key
else:
key = ""
if stored_password is None:
return False
elif stored_password.count("$") == 2:
(digest_alg, salt, hash) = stored_password.split("$")
h = simple_hash(self.password, key, salt, digest_alg)
temp_pass = "%s$%s$%s" % (digest_alg, salt, h)
else: # no salting
# guess digest_alg
digest_alg = DIGEST_ALG_BY_SIZE.get(len(stored_password), None)
if not digest_alg:
return False
else:
temp_pass = simple_hash(self.password, key, "", digest_alg)
return temp_pass == stored_password
def __ne__(self, other):
return not self.__eq__(other)
class CRYPT(Validator):
"""
Examples:
Use as::
INPUT(_type='text', _name='name', requires=CRYPT())
encodes the value on validation with a digest.
If no arguments are provided CRYPT uses the MD5 algorithm.
If the key argument is provided the HMAC+MD5 algorithm is used.
If the digest_alg is specified this is used to replace the
MD5 with, for example, SHA512. The digest_alg can be
the name of a hashlib algorithm as a string or the algorithm itself.
min_length is the minimal password length (default 4) - IS_STRONG for serious security
error_message is the message if password is too short
Notice that an empty password is accepted but invalid. It will not allow login back.
Stores junk as hashed password.
Specify an algorithm or by default we will use sha512.
Typical available algorithms:
md5, sha1, sha224, sha256, sha384, sha512
If salt, it hashes a password with a salt.
If salt is True, this method will automatically generate one.
Either case it returns an encrypted password string in the following format:
<algorithm>$<salt>$<hash>
Important: hashed password is returned as a LazyCrypt object and computed only if needed.
The LasyCrypt object also knows how to compare itself with an existing salted password
Supports standard algorithms
>>> for alg in ('md5','sha1','sha256','sha384','sha512'):
... print(str(CRYPT(digest_alg=alg,salt=True)('test')[0]))
md5$...$...
sha1$...$...
sha256$...$...
sha384$...$...
sha512$...$...
The syntax is always alg$salt$hash
Supports for pbkdf2
>>> alg = 'pbkdf2(1000,20,sha512)'
>>> print(str(CRYPT(digest_alg=alg,salt=True)('test')[0]))
pbkdf2(1000,20,sha512)$...$...
An optional hmac_key can be specified and it is used as salt prefix
>>> a = str(CRYPT(digest_alg='md5',key='mykey',salt=True)('test')[0])
>>> print(a)
md5$...$...
Even if the algorithm changes the hash can still be validated
>>> CRYPT(digest_alg='sha1',key='mykey',salt=True)('test')[0] == a
True
If no salt is specified CRYPT can guess the algorithms from length:
>>> a = str(CRYPT(digest_alg='sha1',salt=False)('test')[0])
>>> a
'sha1$$a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'
>>> CRYPT(digest_alg='sha1',salt=False)('test')[0] == a
True
>>> CRYPT(digest_alg='sha1',salt=False)('test')[0] == a[6:]
True
>>> CRYPT(digest_alg='md5',salt=False)('test')[0] == a
True
>>> CRYPT(digest_alg='md5',salt=False)('test')[0] == a[6:]
True
"""
STARS = "******"
def __init__(
self,
key=None,
digest_alg="pbkdf2(1000,20,sha512)",
min_length=0,
error_message="Too short",
salt=True,
max_length=1024,
):
"""
important, digest_alg='md5' is not the default hashing algorithm for
web2py. This is only an example of usage of this function.
The actual hash algorithm is determined from the key which is
generated by web2py in tools.py. This defaults to hmac+sha512.
"""
self.key = key
self.digest_alg = digest_alg
self.min_length = min_length
self.max_length = max_length
self.error_message = error_message
self.salt = salt
def validate(self, value, record_id=None):
if value == self.STARS:
return None
v = value and str(value)[: self.max_length]
if not v or len(v) < self.min_length:
raise ValidationError(self.translator(self.error_message))
if isinstance(value, LazyCrypt):
return value
return LazyCrypt(self, value)
def formatter(self, value):
return self.STARS
# entropy calculator for IS_STRONG
#
lowerset = frozenset(b"abcdefghijklmnopqrstuvwxyz")
upperset = frozenset(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ")
numberset = frozenset(b"0123456789")
sym1set = frozenset(b"!@#$%^&*() ")
sym2set = frozenset(b"~`-_=+[]{}\\|;:'\",.<>?/")
otherset = frozenset(b"".join(chr(x) if PY2 else chr(x).encode() for x in range(256)))
def calc_entropy(string):
""" calculates a simple entropy for a given string """
alphabet = 0 # alphabet size
other = set()
seen = set()
lastset = None
string = to_bytes(string or "")
for c in string:
# classify this character
inset = None
for cset in (lowerset, upperset, numberset, sym1set, sym2set, otherset):
if c in cset:
inset = cset
break
assert inset is not None
# calculate effect of character on alphabet size
if inset not in seen:
seen.add(inset)
alphabet += len(inset) # credit for a new character set
elif c not in other:
alphabet += 1 # credit for unique characters
other.add(c)
if inset is not lastset:
alphabet += 1 # credit for set transitions
lastset = cset
entropy = len(string) * math.log(alphabet or 1) / 0.6931471805599453 # math.log(2)
return round(entropy, 2)
class IS_STRONG(Validator):
"""
Examples:
Use as::
INPUT(_type='password', _name='passwd',
requires=IS_STRONG(min=10, special=2, upper=2))
enforces complexity requirements on a field
>>> IS_STRONG(es=True)('Abcd1234')
('Abcd1234',
'Must include at least 1 of the following: ~!@#$%^&*()_+-=?<>,.:;{}[]|')
>>> IS_STRONG(es=True)('Abcd1234!')
('Abcd1234!', None)
>>> IS_STRONG(es=True, entropy=1)('a')
('a', None)
>>> IS_STRONG(es=True, entropy=1, min=2)('a')
('a', 'Minimum length is 2')
>>> IS_STRONG(es=True, entropy=100)('abc123')
('abc123', 'Password too simple (32.35/100)')
>>> IS_STRONG(es=True, entropy=100)('and')
('and', 'Password too simple (14.57/100)')
>>> IS_STRONG(es=True, entropy=100)('aaa')
('aaa', 'Password too simple (14.42/100)')
>>> IS_STRONG(es=True, entropy=100)('a1d')
('a1d', 'Password too simple (15.97/100)')
>>> IS_STRONG(es=True, entropy=100)('añd')
('a\\xc3\\xb1d', 'Password too simple (31.26/10)')
"""
def __init__(
self,
min=None,
max=None,
upper=None,
lower=None,
number=None,
entropy=None,
special=None,
specials=r"~!@#$%^&*()_+-=?<>,.:;{}[]|",
invalid=' "',
error_message=None,
es=False,
):
self.entropy = entropy
if entropy is None:
# enforce default requirements
self.min = 8 if min is None else min
self.max = max # was 20, but that doesn't make sense
self.upper = 1 if upper is None else upper
self.lower = 1 if lower is None else lower
self.number = 1 if number is None else number
self.special = 1 if special is None else special
else:
# by default, an entropy spec is exclusive
self.min = min
self.max = max
self.upper = upper
self.lower = lower
self.number = number
self.special = special
self.specials = specials
self.invalid = invalid
self.error_message = error_message
self.estring = es # return error message as string (for doctest)
def validate(self, value, record_id=None):
failures = []
if value is None:
value = ""
if value and len(value) == value.count("*") > 4:
return value
if self.entropy is not None:
entropy = calc_entropy(value)
if entropy < self.entropy:
failures.append(
self.translator("Password too simple (%(have)s/%(need)s)")
% dict(have=entropy, need=self.entropy)
)
if isinstance(self.min, int) and self.min > 0:
if not len(value) >= self.min:
failures.append(self.translator("Minimum length is %s") % self.min)
if isinstance(self.max, int) and self.max > 0:
if not len(value) <= self.max:
failures.append(self.translator("Maximum length is %s") % self.max)
if isinstance(self.special, int):
all_special = [ch in value for ch in self.specials]
if self.special > 0:
if not all_special.count(True) >= self.special:
failures.append(
self.translator("Must include at least %s of the following: %s")
% (self.special, self.specials)
)
elif self.special == 0 and self.special is not False:
if len([item for item in all_special if item]) > 0:
failures.append(
self.translator("May not contain any of the following: %s")
% self.specials
)
if self.invalid:
all_invalid = [ch in value for ch in self.invalid]
if all_invalid.count(True) > 0:
failures.append(
self.translator("May not contain any of the following: %s")
% self.invalid
)
if isinstance(self.upper, int):
all_upper = re.findall("[A-Z]", value)
if self.upper > 0:
if not len(all_upper) >= self.upper:
failures.append(
self.translator("Must include at least %s uppercase")
% str(self.upper)
)
elif self.upper == 0 and self.upper is not False:
if len(all_upper) > 0:
failures.append(
self.translator("May not include any uppercase letters")
)
if isinstance(self.lower, int):
all_lower = re.findall("[a-z]", value)
if self.lower > 0:
if not len(all_lower) >= self.lower:
failures.append(
self.translator("Must include at least %s lowercase")
% str(self.lower)
)
elif self.lower == 0 and self.lower is not False:
if len(all_lower) > 0:
failures.append(
self.translator("May not include any lowercase letters")
)
if isinstance(self.number, int):
all_number = re.findall("[0-9]", value)
if self.number > 0:
numbers = "number"
if self.number > 1:
numbers = "numbers"
numbers = self.translator(numbers)
if not len(all_number) >= self.number:
failures.append(
self.translator("Must include at least %s %s")
% (str(self.number), numbers)
)
elif self.number == 0 and self.number is not False:
if len(all_number) > 0:
failures.append(self.translator("May not include any numbers"))
if len(failures) == 0:
return value
if not self.error_message:
if self.estring:
raise ValidationError("|".join(map(str, failures)))
raise ValidationError(", ".join(failures))
else:
raise ValidationError(self.translator(self.error_message))
class IS_IMAGE(Validator):
"""
Checks if file uploaded through file input was saved in one of selected
image formats and has dimensions (width and height) within given boundaries.
Does *not* check for maximum file size (use IS_LENGTH for that). Returns
validation failure if no data was uploaded.
Supported file formats: BMP, GIF, JPEG, PNG.
Code parts taken from
http://mail.python.org/pipermail/python-list/2007-June/617126.html
Args:
extensions: iterable containing allowed *lowercase* image file extensions
('jpg' extension of uploaded file counts as 'jpeg')
maxsize: iterable containing maximum width and height of the image
minsize: iterable containing minimum width and height of the image
aspectratio: iterable containing target aspect ratio
Use (-1, -1) as minsize to pass image size check.
Use (-1, -1) as aspectratio to pass aspect ratio check.
Examples:
Check if uploaded file is in any of supported image formats:
INPUT(_type='file', _name='name', requires=IS_IMAGE())
Check if uploaded file is either JPEG or PNG:
INPUT(_type='file', _name='name',
requires=IS_IMAGE(extensions=('jpeg', 'png')))
Check if uploaded file is PNG with maximum size of 200x200 pixels:
INPUT(_type='file', _name='name',
requires=IS_IMAGE(extensions=('png'), maxsize=(200, 200)))
Check if uploaded file has a 16:9 aspect ratio:
INPUT(_type='file', _name='name',
requires=IS_IMAGE(aspectratio=(16, 9)))
"""
def __init__(
self,
extensions=("bmp", "gif", "jpeg", "png"),
maxsize=(10000, 10000),
minsize=(0, 0),
aspectratio=(-1, -1),
error_message="Invalid image",
):
self.extensions = extensions
self.maxsize = maxsize
self.minsize = minsize
self.aspectratio = aspectratio
self.error_message = error_message
def validate(self, value, record_id=None):
try:
extension = value.filename.rfind(".")
assert extension >= 0
extension = value.filename[extension + 1 :].lower()
if extension == "jpg":
extension = "jpeg"
assert extension in self.extensions
if extension == "bmp":
width, height = self.__bmp(value.file)
elif extension == "gif":
width, height = self.__gif(value.file)
elif extension == "jpeg":
width, height = self.__jpeg(value.file)
elif extension == "png":
width, height = self.__png(value.file)
else:
width = -1
height = -1
assert (
self.minsize[0] <= width <= self.maxsize[0]
and self.minsize[1] <= height <= self.maxsize[1]
)
if self.aspectratio > (-1, -1):
target_ratio = (1.0 * self.aspectratio[1]) / self.aspectratio[0]
actual_ratio = (1.0 * height) / width
assert actual_ratio == target_ratio
value.file.seek(0)
return value
except Exception as e:
raise ValidationError(self.translator(self.error_message))
def __bmp(self, stream):
if stream.read(2) == b"BM":
stream.read(16)
return struct.unpack("<LL", stream.read(8))
return (-1, -1)
def __gif(self, stream):
if stream.read(6) in (b"GIF87a", b"GIF89a"):
stream = stream.read(5)
if len(stream) == 5:
return tuple(struct.unpack("<HHB", stream)[:-1])
return (-1, -1)
def __jpeg(self, stream):
if stream.read(2) == b"\xFF\xD8":
while True:
(marker, code, length) = struct.unpack("!BBH", stream.read(4))
if marker != 0xFF:
break
elif code >= 0xC0 and code <= 0xC3:
return tuple(reversed(struct.unpack("!xHH", stream.read(5))))
else:
stream.read(length - 2)
return (-1, -1)
def __png(self, stream):
if stream.read(8) == b"\211PNG\r\n\032\n":
stream.read(4)
if stream.read(4) == b"IHDR":
return struct.unpack("!LL", stream.read(8))
return (-1, -1)
class IS_FILE(Validator):
"""
Checks if name and extension of file uploaded through file input matches
given criteria.
Does *not* ensure the file type in any way. Returns validation failure
if no data was uploaded.
Args:
filename: string/compiled regex or a list of strings/regex of valid filenames
extension: string/compiled regex or a list of strings/regex of valid extensions
lastdot: which dot should be used as a filename / extension separator:
True means last dot, eg. file.jpg.png -> file.jpg / png
False means first dot, eg. file.tar.gz -> file / tar.gz
case: 0 - keep the case, 1 - transform the string into lowercase (default),
2 - transform the string into uppercase
If there is no dot present, extension checks will be done against empty
string and filename checks against whole value.
Examples:
Check if file has a pdf extension (case insensitive):
INPUT(_type='file', _name='name',
requires=IS_FILE(extension='pdf'))
Check if file is called 'thumbnail' and has a jpg or png extension
(case insensitive):
INPUT(_type='file', _name='name',
requires=IS_FILE(filename='thumbnail',
extension=['jpg', 'png']))
Check if file has a tar.gz extension and name starting with backup:
INPUT(_type='file', _name='name',
requires=IS_FILE(filename=re.compile('backup.*'),
extension='tar.gz', lastdot=False))
Check if file has no extension and name matching README
(case sensitive):
INPUT(_type='file', _name='name',
requires=IS_FILE(filename='README',
extension='', case=0)
"""
def __init__(
self,
filename=None,
extension=None,
lastdot=True,
case=1,
error_message="Enter valid filename",
):
self.filename = filename
self.extension = extension
self.lastdot = lastdot
self.case = case
self.error_message = error_message
def match(self, value1, value2):
if isinstance(value1, (list, tuple)):
for v in value1:
if self.match(v, value2):
return True
return False
elif callable(getattr(value1, "match", None)):
return value1.match(value2)
elif isinstance(value1, str):
return value1 == value2
def validate(self, value, record_id=None):
try:
string = value.filename
except:
raise ValidationError(self.translator(self.error_message))
if self.case == 1:
string = string.lower()
elif self.case == 2:
string = string.upper()
if self.lastdot:
dot = string.rfind(".")
else:
dot = string.find(".")
if dot == -1:
dot = len(string)
if self.filename and not self.match(self.filename, string[:dot]):
raise ValidationError(self.translator(self.error_message))
elif self.extension and not self.match(self.extension, string[dot + 1 :]):
raise ValidationError(self.translator(self.error_message))
else:
return value
class IS_UPLOAD_FILENAME(Validator):
"""
For new applications, use IS_FILE().
Checks if name and extension of file uploaded through file input matches
given criteria.
Does *not* ensure the file type in any way. Returns validation failure
if no data was uploaded.
Args:
filename: filename (before dot) regex
extension: extension (after dot) regex
lastdot: which dot should be used as a filename / extension separator:
True means last dot, eg. file.png -> file / png
False means first dot, eg. file.tar.gz -> file / tar.gz
case: 0 - keep the case, 1 - transform the string into lowercase (default),
2 - transform the string into uppercase
If there is no dot present, extension checks will be done against empty
string and filename checks against whole value.
Examples:
Check if file has a pdf extension (case insensitive):
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(extension='pdf'))
Check if file has a tar.gz extension and name starting with backup:
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(filename='backup.*',
extension='tar.gz', lastdot=False))
Check if file has no extension and name matching README
(case sensitive):
INPUT(_type='file', _name='name',
requires=IS_UPLOAD_FILENAME(filename='^README$',
extension='^$', case=0)
"""
def __init__(
self,
filename=None,
extension=None,
lastdot=True,
case=1,
error_message="Enter valid filename",
):
if isinstance(filename, str):
filename = re.compile(filename)
if isinstance(extension, str):
extension = re.compile(extension)
self.filename = filename
self.extension = extension
self.lastdot = lastdot
self.case = case
self.error_message = error_message
def validate(self, value, record_id=None):
try:
string = value.filename
except:
raise ValidationError(self.translator(self.error_message))
if self.case == 1:
string = string.lower()
elif self.case == 2:
string = string.upper()
if self.lastdot:
dot = string.rfind(".")
else:
dot = string.find(".")
if dot == -1:
dot = len(string)
if self.filename and not self.filename.match(string[:dot]):
raise ValidationError(self.translator(self.error_message))
elif self.extension and not self.extension.match(string[dot + 1 :]):
raise ValidationError(self.translator(self.error_message))
else:
return value
class IS_IPV4(Validator):
"""
Checks if field's value is an IP version 4 address in decimal form. Can
be set to force addresses from certain range.
IPv4 regex taken from: http://regexlib.com/REDetails.aspx?regexp_id=1411
Args:
minip: lowest allowed address; accepts:
- str, eg. 192.168.0.1
- list or tuple of octets, eg. [192, 168, 0, 1]
maxip: highest allowed address; same as above
invert: True to allow addresses only from outside of given range; note
that range boundaries are not matched this way
is_localhost: localhost address treatment:
- None (default): indifferent
- True (enforce): query address must match localhost address (127.0.0.1)
- False (forbid): query address must not match localhost address
is_private: same as above, except that query address is checked against
two address ranges: 172.16.0.0 - 172.31.255.255 and
192.168.0.0 - 192.168.255.255
is_automatic: same as above, except that query address is checked against
one address range: 169.254.0.0 - 169.254.255.255
Minip and maxip may also be lists or tuples of addresses in all above
forms (str, int, list / tuple), allowing setup of multiple address ranges::
minip = (minip1, minip2, ... minipN)
| | |
| | |
maxip = (maxip1, maxip2, ... maxipN)
Longer iterable will be truncated to match length of shorter one.
Examples:
Check for valid IPv4 address:
INPUT(_type='text', _name='name', requires=IS_IPV4())
Check for valid IPv4 address belonging to specific range:
INPUT(_type='text', _name='name',
requires=IS_IPV4(minip='100.200.0.0', maxip='100.200.255.255'))
Check for valid IPv4 address belonging to either 100.110.0.0 -
100.110.255.255 or 200.50.0.0 - 200.50.0.255 address range:
INPUT(_type='text', _name='name',
requires=IS_IPV4(minip=('100.110.0.0', '200.50.0.0'),
maxip=('100.110.255.255', '200.50.0.255')))
Check for valid IPv4 address belonging to private address space:
INPUT(_type='text', _name='name', requires=IS_IPV4(is_private=True))
Check for valid IPv4 address that is not a localhost address:
INPUT(_type='text', _name='name', requires=IS_IPV4(is_localhost=False))
>>> IS_IPV4()('1.2.3.4')
('1.2.3.4', None)
>>> IS_IPV4()('255.255.255.255')
('255.255.255.255', None)
>>> IS_IPV4()('1.2.3.4 ')
('1.2.3.4 ', 'enter valid IPv4 address')
>>> IS_IPV4()('1.2.3.4.5')
('1.2.3.4.5', 'enter valid IPv4 address')
>>> IS_IPV4()('123.123')
('123.123', 'enter valid IPv4 address')
>>> IS_IPV4()('1111.2.3.4')
('1111.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('0111.2.3.4')
('0111.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('256.2.3.4')
('256.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4()('300.2.3.4')
('300.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(minip='1.2.3.4', maxip='1.2.3.4')('1.2.3.4')
('1.2.3.4', None)
>>> IS_IPV4(minip='1.2.3.5', maxip='1.2.3.9', error_message='Bad ip')('1.2.3.4')
('1.2.3.4', 'bad ip')
>>> IS_IPV4(maxip='1.2.3.4', invert=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPV4(maxip='1.2.3.4', invert=True)('1.2.3.4')
('1.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(is_localhost=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPV4(is_localhost=True)('1.2.3.4')
('1.2.3.4', 'enter valid IPv4 address')
>>> IS_IPV4(is_localhost=False)('127.0.0.1')
('127.0.0.1', 'enter valid IPv4 address')
>>> IS_IPV4(maxip='100.0.0.0', is_localhost=True)('127.0.0.1')
('127.0.0.1', 'enter valid IPv4 address')
"""
REGEX_IPV4 = re.compile(
r"^(([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.){3}([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$"
)
numbers = (16777216, 65536, 256, 1)
localhost = 2130706433
private = ((2886729728, 2886795263), (3232235520, 3232301055))
automatic = (2851995648, 2852061183)
def __init__(
self,
minip="0.0.0.0",
maxip="255.255.255.255",
invert=False,
is_localhost=None,
is_private=None,
is_automatic=None,
error_message="Enter valid IPv4 address",
):
for n, value in enumerate((minip, maxip)):
temp = []
if isinstance(value, str):
temp.append(value.split("."))
elif isinstance(value, (list, tuple)):
if (
len(value)
== len([item for item in value if isinstance(item, int)])
== 4
):
temp.append(value)
else:
for item in value:
if isinstance(item, str):
temp.append(item.split("."))
elif isinstance(item, (list, tuple)):
temp.append(item)
numbers = []
for item in temp:
number = 0
for i, j in zip(self.numbers, item):
number += i * int(j)
numbers.append(number)
if n == 0:
self.minip = numbers
else:
self.maxip = numbers
self.invert = invert
self.is_localhost = is_localhost
self.is_private = is_private
self.is_automatic = is_automatic
self.error_message = error_message
def validate(self, value, record_id=None):
if re.match(self.REGEX_IPV4, value):
number = 0
for i, j in zip(self.numbers, value.split(".")):
number += i * int(j)
ok = False
for bottom, top in zip(self.minip, self.maxip):
if self.invert != (bottom <= number <= top):
ok = True
if (
ok
and self.is_localhost is not None
and self.is_localhost != (number == self.localhost)
):
ok = False
private = any(
[
private_number[0] <= number <= private_number[1]
for private_number in self.private
]
)
if ok and self.is_private is not None and self.is_private != private:
ok = False
automatic = self.automatic[0] <= number <= self.automatic[1]
if ok and self.is_automatic is not None and self.is_automatic != automatic:
ok = False
if ok:
return value
raise ValidationError(self.translator(self.error_message))
class IS_IPV6(Validator):
"""
Checks if field's value is an IP version 6 address.
Uses the ipaddress from the Python 3 standard library
and its Python 2 backport (in contrib/ipaddress.py).
Args:
is_private: None (default): indifferent
True (enforce): address must be in fc00::/7 range
False (forbid): address must NOT be in fc00::/7 range
is_link_local: Same as above but uses fe80::/10 range
is_reserved: Same as above but uses IETF reserved range
is_multicast: Same as above but uses ff00::/8 range
is_routeable: Similar to above but enforces not private, link_local,
reserved or multicast
is_6to4: Same as above but uses 2002::/16 range
is_teredo: Same as above but uses 2001::/32 range
subnets: value must be a member of at least one from list of subnets
Examples:
Check for valid IPv6 address:
INPUT(_type='text', _name='name', requires=IS_IPV6())
Check for valid IPv6 address is a link_local address:
INPUT(_type='text', _name='name', requires=IS_IPV6(is_link_local=True))
Check for valid IPv6 address that is Internet routeable:
INPUT(_type='text', _name='name', requires=IS_IPV6(is_routeable=True))
Check for valid IPv6 address in specified subnet:
INPUT(_type='text', _name='name', requires=IS_IPV6(subnets=['2001::/32'])
>>> IS_IPV6()('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6()('192.168.1.1')
('192.168.1.1', 'enter valid IPv6 address')
>>> IS_IPV6(error_message='Bad ip')('192.168.1.1')
('192.168.1.1', 'bad ip')
>>> IS_IPV6(is_link_local=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_link_local=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_link_local=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_multicast=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(is_multicast=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_routeable=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', None)
>>> IS_IPV6(is_routeable=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(subnets='2001::/32')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPV6(subnets='fb00::/8')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'enter valid IPv6 address')
>>> IS_IPV6(subnets=['fc00::/8','2001::/32'])('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPV6(subnets='invalidsubnet')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'invalid subnet provided')
"""
def __init__(
self,
is_private=None,
is_link_local=None,
is_reserved=None,
is_multicast=None,
is_routeable=None,
is_6to4=None,
is_teredo=None,
subnets=None,
error_message="Enter valid IPv6 address",
):
self.is_private = is_private
self.is_link_local = is_link_local
self.is_reserved = is_reserved
self.is_multicast = is_multicast
self.is_routeable = is_routeable
self.is_6to4 = is_6to4
self.is_teredo = is_teredo
self.subnets = subnets
self.error_message = error_message
def validate(self, value, record_id=None):
try:
ip = ipaddress.IPv6Address(to_unicode(value))
ok = True
except ipaddress.AddressValueError:
raise ValidationError(self.translator(self.error_message))
if self.subnets:
# iterate through self.subnets to see if value is a member
ok = False
if isinstance(self.subnets, str):
self.subnets = [self.subnets]
for network in self.subnets:
try:
ipnet = ipaddress.IPv6Network(to_unicode(network))
except (ipaddress.NetmaskValueError, ipaddress.AddressValueError):
raise ValidationError(self.translator("invalid subnet provided"))
if ip in ipnet:
ok = True
if self.is_routeable:
self.is_private = False
self.is_reserved = False
self.is_multicast = False
if ok and self.is_private is not None and self.is_private != ip.is_private:
ok = False
if (
ok
and self.is_link_local is not None
and self.is_link_local != ip.is_link_local
):
ok = False
if ok and self.is_reserved is not None and self.is_reserved != ip.is_reserved:
ok = False
if (
ok
and self.is_multicast is not None
and self.is_multicast != ip.is_multicast
):
ok = False
if ok and self.is_6to4 is not None and self.is_6to4 != bool(ip.sixtofour):
ok = False
if ok and self.is_teredo is not None and self.is_teredo != bool(ip.teredo):
ok = False
if ok:
return value
raise ValidationError(self.translator(self.error_message))
class IS_IPADDRESS(Validator):
"""
Checks if field's value is an IP Address (v4 or v6). Can be set to force
addresses from within a specific range. Checks are done with the correct
IS_IPV4 and IS_IPV6 validators.
Uses the ipaddress from the Python 3 standard library
and its Python 2 backport (in contrib/ipaddress.py).
Args:
minip: lowest allowed address; accepts:
str, eg. 192.168.0.1
list or tuple of octets, eg. [192, 168, 0, 1]
maxip: highest allowed address; same as above
invert: True to allow addresses only from outside of given range; note
that range boundaries are not matched this way
IPv4 specific arguments:
- is_localhost: localhost address treatment:
- None (default): indifferent
- True (enforce): query address must match localhost address
(127.0.0.1)
- False (forbid): query address must not match localhost address
- is_private: same as above, except that query address is checked against
two address ranges: 172.16.0.0 - 172.31.255.255 and
192.168.0.0 - 192.168.255.255
- is_automatic: same as above, except that query address is checked against
one address range: 169.254.0.0 - 169.254.255.255
- is_ipv4: either:
- None (default): indifferent
- True (enforce): must be an IPv4 address
- False (forbid): must NOT be an IPv4 address
IPv6 specific arguments:
- is_link_local: Same as above but uses fe80::/10 range
- is_reserved: Same as above but uses IETF reserved range
- is_multicast: Same as above but uses ff00::/8 range
- is_routeable: Similar to above but enforces not private, link_local,
reserved or multicast
- is_6to4: Same as above but uses 2002::/16 range
- is_teredo: Same as above but uses 2001::/32 range
- subnets: value must be a member of at least one from list of subnets
- is_ipv6: either:
- None (default): indifferent
- True (enforce): must be an IPv6 address
- False (forbid): must NOT be an IPv6 address
Minip and maxip may also be lists or tuples of addresses in all above
forms (str, int, list / tuple), allowing setup of multiple address ranges::
minip = (minip1, minip2, ... minipN)
| | |
| | |
maxip = (maxip1, maxip2, ... maxipN)
Longer iterable will be truncated to match length of shorter one.
>>> IS_IPADDRESS()('192.168.1.5')
('192.168.1.5', None)
>>> IS_IPADDRESS(is_ipv6=False)('192.168.1.5')
('192.168.1.5', None)
>>> IS_IPADDRESS()('255.255.255.255')
('255.255.255.255', None)
>>> IS_IPADDRESS()('192.168.1.5 ')
('192.168.1.5 ', 'enter valid IP address')
>>> IS_IPADDRESS()('192.168.1.1.5')
('192.168.1.1.5', 'enter valid IP address')
>>> IS_IPADDRESS()('123.123')
('123.123', 'enter valid IP address')
>>> IS_IPADDRESS()('1111.2.3.4')
('1111.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('0111.2.3.4')
('0111.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('256.2.3.4')
('256.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS()('300.2.3.4')
('300.2.3.4', 'enter valid IP address')
>>> IS_IPADDRESS(minip='192.168.1.0', maxip='192.168.1.255')('192.168.1.100')
('192.168.1.100', None)
>>> IS_IPADDRESS(minip='1.2.3.5', maxip='1.2.3.9', error_message='Bad ip')('1.2.3.4')
('1.2.3.4', 'bad ip')
>>> IS_IPADDRESS(maxip='1.2.3.4', invert=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPADDRESS(maxip='192.168.1.4', invert=True)('192.168.1.4')
('192.168.1.4', 'enter valid IP address')
>>> IS_IPADDRESS(is_localhost=True)('127.0.0.1')
('127.0.0.1', None)
>>> IS_IPADDRESS(is_localhost=True)('192.168.1.10')
('192.168.1.10', 'enter valid IP address')
>>> IS_IPADDRESS(is_localhost=False)('127.0.0.1')
('127.0.0.1', 'enter valid IP address')
>>> IS_IPADDRESS(maxip='100.0.0.0', is_localhost=True)('127.0.0.1')
('127.0.0.1', 'enter valid IP address')
>>> IS_IPADDRESS()('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_ipv4=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS()('fe80::126c:8ffa:fe22:b3af ')
('fe80::126c:8ffa:fe22:b3af ', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv4=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv6=True)('192.168.1.1')
('192.168.1.1', 'enter valid IP address')
>>> IS_IPADDRESS(is_ipv6=True, error_message='Bad ip')('192.168.1.1')
('192.168.1.1', 'bad ip')
>>> IS_IPADDRESS(is_link_local=True)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_link_local=False)('fe80::126c:8ffa:fe22:b3af')
('fe80::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_link_local=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_multicast=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(is_multicast=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_routeable=True)('2001::126c:8ffa:fe22:b3af')
('2001::126c:8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(is_routeable=True)('ff00::126c:8ffa:fe22:b3af')
('ff00::126c:8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(subnets='2001::/32')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(subnets='fb00::/8')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'enter valid IP address')
>>> IS_IPADDRESS(subnets=['fc00::/8','2001::/32'])('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', None)
>>> IS_IPADDRESS(subnets='invalidsubnet')('2001::8ffa:fe22:b3af')
('2001::8ffa:fe22:b3af', 'invalid subnet provided')
"""
def __init__(
self,
minip="0.0.0.0",
maxip="255.255.255.255",
invert=False,
is_localhost=None,
is_private=None,
is_automatic=None,
is_ipv4=None,
is_link_local=None,
is_reserved=None,
is_multicast=None,
is_routeable=None,
is_6to4=None,
is_teredo=None,
subnets=None,
is_ipv6=None,
error_message="Enter valid IP address",
):
self.minip = (minip,)
self.maxip = (maxip,)
self.invert = invert
self.is_localhost = is_localhost
self.is_private = is_private
self.is_automatic = is_automatic
self.is_ipv4 = is_ipv4 or is_ipv6 is False
self.is_private = is_private
self.is_link_local = is_link_local
self.is_reserved = is_reserved
self.is_multicast = is_multicast
self.is_routeable = is_routeable
self.is_6to4 = is_6to4
self.is_teredo = is_teredo
self.subnets = subnets
self.is_ipv6 = is_ipv6 or is_ipv4 is False
self.error_message = error_message
def validate(self, value, record_id=None):
IPAddress = ipaddress.ip_address
IPv6Address = ipaddress.IPv6Address
IPv4Address = ipaddress.IPv4Address
try:
ip = IPAddress(to_unicode(value))
except ValueError:
raise ValidationError(self.translator(self.error_message))
if self.is_ipv4 and isinstance(ip, IPv6Address):
raise ValidationError(self.translator(self.error_message))
elif self.is_ipv6 and isinstance(ip, IPv4Address):
raise ValidationError(self.translator(self.error_message))
elif self.is_ipv4 or isinstance(ip, IPv4Address):
return IS_IPV4(
minip=self.minip,
maxip=self.maxip,
invert=self.invert,
is_localhost=self.is_localhost,
is_private=self.is_private,
is_automatic=self.is_automatic,
error_message=self.error_message,
).validate(value, record_id)
elif self.is_ipv6 or isinstance(ip, IPv6Address):
return IS_IPV6(
is_private=self.is_private,
is_link_local=self.is_link_local,
is_reserved=self.is_reserved,
is_multicast=self.is_multicast,
is_routeable=self.is_routeable,
is_6to4=self.is_6to4,
is_teredo=self.is_teredo,
subnets=self.subnets,
error_message=self.error_message,
).validate(value, record_id)
else:
raise ValidationError(self.translator(self.error_message))
|
web2py/pydal
|
pydal/validators.py
|
Python
|
bsd-3-clause
| 158,798
|
[
"CASINO",
"Jaguar",
"MOE"
] |
19c9fe3710b49fa89a1e466cd96511a55b6f1e7deb8244abdd8986376ea123d1
|
'''
Reads, processes, and writes flat data types using the FeedReader, FeedWriter, and DataProcessor classes.
Visit their documentation for example configuration files.
Execution:
python feed_driver.py -c config.json
by Bereket Abraham
'''
from reader import *
from processor import *
from writer import *
import argparse, json
if __name__ == "__main__":
helpdesc = '''
Reads, processes, and writes flat data types using the FeedReader, FeedWriter, and DataProcessor classes.
Visit their documentation for example configuration files.
'''
parser = argparse.ArgumentParser(description=helpdesc)
# These will be captured from the command line when the script is run
parser._optionals.title = "For help"
required_group = parser.add_argument_group("REQUIRED")
required_group.add_argument('-c',dest='config', type=str, help='Configuration File, in JSON')
#optional_group = parser.add_argument_group("OPTIONAL")
#optional_group.add_argument('-l',dest='litem', type=int, nargs='?', default=None, help='Line item id')
# Parse the arguments and store the collection in 'args'
args = parser.parse_args()
config_file = args.config
# configurations to handle new aggregations, thresholds, and blacklists
with open(config_file) as f:
feeds = json.load(f)
feeds = feeds["feeds"]
for feed in feeds:
print
print 'Starting feed: ' + feed['name'] + ' ....'
# combine sources, must have same headers
df = None
for source in feed['sources']:
r = createReader(source)
df_part = r.read()
# merge dfs!!! by row or by column??
# only one source for now
if df is not None:
df = df.append(df_part, ignore_index=True)
else:
df = df_part
processor = DataProcessor(feed)
df = processor.operate(df)
df = processor.select(df)
for dest in feed['destinations']:
w = createWriter(dest)
result = w.write(df)
print result
##
|
babraham123/rpw
|
feed_driver.py
|
Python
|
mit
| 2,110
|
[
"VisIt"
] |
f95c7e60d337c80018d0791e33ba8f7a3fa4d8d3e71f845e59729071c7ca3a4f
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import tests_common
import numpy as np
import espressomd
import espressomd.interactions
import espressomd.magnetostatics
import espressomd.analyze
import espressomd.galilei
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["DIPOLES", "ROTATION", "LENNARD_JONES"])
class DDSGPUTest(ut.TestCase):
# Handle for espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
@ut.skipIf(system.cell_system.get_state()["n_nodes"] > 1,
"Skipping test: only runs for n_nodes == 1")
def test(self):
pf_dds_gpu = 2.34
pf_dawaanr = 3.524
ratio_dawaanr_dds_gpu = pf_dawaanr / pf_dds_gpu
self.system.box_l = 3 * [15]
self.system.periodicity = [0, 0, 0]
self.system.time_step = 1E-4
self.system.cell_system.skin = 0.1
for n in [128, 541]:
dipole_modulus = 1.3
part_dip = dipole_modulus * tests_common.random_dipoles(n)
part_pos = np.random.random((n, 3)) * self.system.box_l[0]
self.system.part.add(pos=part_pos, dip=part_dip)
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=10.0, sigma=0.5, cutoff=0.55, shift="auto")
self.system.thermostat.turn_off()
self.system.integrator.set_steepest_descent(
f_max=0.0, gamma=0.1, max_displacement=0.1)
self.system.integrator.run(500)
g = espressomd.galilei.GalileiTransform()
g.kill_particle_motion(rotation=True)
self.system.integrator.set_vv()
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0.0)
self.system.cell_system.skin = 0.0
self.system.time_step = 0.01
self.system.thermostat.turn_off()
# gamma should be zero in order to avoid the noise term in force
# and torque
self.system.thermostat.set_langevin(kT=1.297, gamma=0.0, seed=42)
dds_cpu = espressomd.magnetostatics.DipolarDirectSumCpu(
prefactor=pf_dawaanr)
self.system.actors.add(dds_cpu)
self.system.integrator.run(steps=0, recalc_forces=True)
dawaanr_f = np.copy(self.system.part.all().f)
dawaanr_t = np.copy(self.system.part.all().torque_lab)
dawaanr_e = self.system.analysis.energy()["total"]
del dds_cpu
for i in range(len(self.system.actors.active_actors)):
self.system.actors.remove(self.system.actors.active_actors[i])
self.system.integrator.run(steps=0, recalc_forces=True)
dds_gpu = espressomd.magnetostatics.DipolarDirectSumGpu(
prefactor=pf_dds_gpu)
self.system.actors.add(dds_gpu)
self.system.integrator.run(steps=0, recalc_forces=True)
ddsgpu_f = np.copy(self.system.part.all().f)
ddsgpu_t = np.copy(self.system.part.all().torque_lab)
ddsgpu_e = self.system.analysis.energy()["total"]
# compare
for i in range(n):
np.testing.assert_allclose(
np.array(dawaanr_t[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_t[i]),
err_msg=f'Torques do not match for particle {i}',
atol=3e-3)
np.testing.assert_allclose(
np.array(dawaanr_f[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_f[i]),
err_msg=f'Forces do not match for particle {i}',
atol=3e-3)
self.assertAlmostEqual(
dawaanr_e,
ddsgpu_e * ratio_dawaanr_dds_gpu,
places=2,
msg='Energies for dawaanr {0} and dds_gpu {1} do not match.'
.format(dawaanr_e, ratio_dawaanr_dds_gpu * ddsgpu_e))
self.system.integrator.run(steps=0, recalc_forces=True)
del dds_gpu
self.system.actors.clear()
self.system.part.clear()
if __name__ == '__main__':
ut.main()
|
espressomd/espresso
|
testsuite/python/dawaanr-and-dds-gpu.py
|
Python
|
gpl-3.0
| 4,900
|
[
"ESPResSo"
] |
057d9d07edbce31776f817a757e43483a506d3af556d71b0368e773a9e236593
|
import os
import shutil
import logging
import unittest
import tempfile
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestClassifier
logger = logging.getLogger(__name__)
class TestDrop(unittest.TestCase):
"""
Test how loading of malformed compounds is handled.
Called TestDrop since these compounds were silently and erroneously dropped.
"""
def test_drop(self):
"""Test on dataset where RDKit fails on some strings."""
# Set some global variables up top
reload = True
len_full = 25
current_dir = os.path.dirname(os.path.realpath(__file__))
logger.info("About to load emols dataset.")
dataset_file = os.path.join(current_dir, "mini_emols.csv")
# Featurize emols dataset
logger.info("About to featurize datasets.")
featurizer = dc.feat.CircularFingerprint(size=1024)
emols_tasks = ['activity']
loader = dc.data.CSVLoader(
tasks=emols_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert len(X) == len(y) == len(w) == len(ids)
|
miaecle/deepchem
|
deepchem/data/tests/test_drop.py
|
Python
|
mit
| 1,153
|
[
"RDKit"
] |
f595eb889034dfd55a92579add6266915def68855e2aa7d0899b696255aa87b8
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (aric.hagberg@gmail.com)
# Pieter Swart (swart@lanl.gov)
# Dan Schult (dschult@colgate.edu)
# Joel Miller (joel.c.miller.research@gmail.com)
# Nathan Lemons (nlemons@gmail.com)
# Brian Cloteaux (brian.cloteaux@nist.gov)
"""Generate graphs with a given degree sequence or expected degree sequence.
"""
import heapq
from itertools import chain
from itertools import combinations
# In Python 3, the function is `zip_longest`, in Python 2 `izip_longest`.
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import math
from operator import itemgetter
import networkx as nx
from networkx.utils import random_weighted_sample, py_random_state
__all__ = ['configuration_model',
'directed_configuration_model',
'expected_degree_graph',
'havel_hakimi_graph',
'directed_havel_hakimi_graph',
'degree_sequence_tree',
'random_degree_sequence_graph']
chaini = chain.from_iterable
def _to_stublist(degree_sequence):
"""Returns a list of degree-repeated node numbers.
``degree_sequence`` is a list of nonnegative integers representing
the degrees of nodes in a graph.
This function returns a list of node numbers with multiplicities
according to the given degree sequence. For example, if the first
element of ``degree_sequence`` is ``3``, then the first node number,
``0``, will appear at the head of the returned list three times. The
node numbers are assumed to be the numbers zero through
``len(degree_sequence) - 1``.
Examples
--------
>>> degree_sequence = [1, 2, 3]
>>> _to_stublist(degree_sequence)
[0, 1, 1, 2, 2, 2]
If a zero appears in the sequence, that means the node exists but
has degree zero, so that number will be skipped in the returned
list::
>>> degree_sequence = [2, 0, 1]
>>> _to_stublist(degree_sequence)
[0, 0, 2]
"""
return list(chaini([n] * d for n, d in enumerate(degree_sequence)))
def _configuration_model(deg_sequence, create_using, directed=False,
in_deg_sequence=None, seed=None):
"""Helper function for generating either undirected or directed
configuration model graphs.
``deg_sequence`` is a list of nonnegative integers representing the
degree of the node whose label is the index of the list element.
``create_using`` see :func:`~networkx.empty_graph`.
``directed`` and ``in_deg_sequence`` are required if you want the
returned graph to be generated using the directed configuration
model algorithm. If ``directed`` is ``False``, then ``deg_sequence``
is interpreted as the degree sequence of an undirected graph and
``in_deg_sequence`` is ignored. Otherwise, if ``directed`` is
``True``, then ``deg_sequence`` is interpreted as the out-degree
sequence and ``in_deg_sequence`` as the in-degree sequence of a
directed graph.
.. note::
``deg_sequence`` and ``in_deg_sequence`` need not be the same
length.
``seed`` is a random.Random or numpy.random.RandomState instance
This function returns a graph, directed if and only if ``directed``
is ``True``, generated according to the configuration model
algorithm. For more information on the algorithm, see the
:func:`configuration_model` or :func:`directed_configuration_model`
functions.
"""
n = len(deg_sequence)
G = nx.empty_graph(n, create_using)
# If empty, return the null graph immediately.
if n == 0:
return G
# Build a list of available degree-repeated nodes. For example,
# for degree sequence [3, 2, 1, 1, 1], the "stub list" is
# initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree
# 3 and thus is repeated 3 times, etc.
#
# Also, shuffle the stub list in order to get a random sequence of
# node pairs.
if directed:
pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0)
# Unzip the list of pairs into a pair of lists.
out_deg, in_deg = zip(*pairs)
out_stublist = _to_stublist(out_deg)
in_stublist = _to_stublist(in_deg)
seed.shuffle(out_stublist)
seed.shuffle(in_stublist)
else:
stublist = _to_stublist(deg_sequence)
# Choose a random balanced bipartition of the stublist, which
# gives a random pairing of nodes. In this implementation, we
# shuffle the list and then split it in half.
n = len(stublist)
half = n // 2
seed.shuffle(stublist)
out_stublist, in_stublist = stublist[:half], stublist[half:]
G.add_edges_from(zip(out_stublist, in_stublist))
return G
@py_random_state(2)
def configuration_model(deg_sequence, create_using=None, seed=None):
"""Returns a random graph with the given degree sequence.
The configuration model generates a random pseudograph (graph with
parallel edges and self loops) by randomly assigning edges to
match the given degree sequence.
Parameters
----------
deg_sequence : list of nonnegative integers
Each list entry corresponds to the degree of a node.
create_using : NetworkX graph constructor, optional (default MultiGraph)
Graph type to create. If graph instance, then cleared before populated.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : MultiGraph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequence does not have an even sum.
See Also
--------
is_graphical
Notes
-----
As described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequence does not have an even sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified.
The density of self-loops and parallel edges tends to decrease as
the number of nodes increases. However, typically the number of
self-loops will approach a Poisson distribution with a nonzero mean,
and similarly for the number of parallel edges. Consider a node
with *k* stubs. The probability of being joined to another stub of
the same node is basically (*k* - *1*) / *N*, where *k* is the
degree and *N* is the number of nodes. So the probability of a
self-loop scales like *c* / *N* for some constant *c*. As *N* grows,
this means we expect *c* self-loops. Similarly for parallel edges.
References
----------
.. [1] M.E.J. Newman, "The structure and function of complex networks",
SIAM REVIEW 45-2, pp 167-256, 2003.
Examples
--------
You can create a degree sequence following a particular distribution
by using the one of the distribution functions in
:mod:`~networkx.utils.random_sequence` (or one of your own). For
example, to create an undirected multigraph on one hundred nodes
with degree sequence chosen from the power law distribution:
>>> sequence = nx.random_powerlaw_tree_sequence(100, tries=5000)
>>> G = nx.configuration_model(sequence)
>>> len(G)
100
>>> actual_degrees = [d for v, d in G.degree()]
>>> actual_degrees == sequence
True
The returned graph is a multigraph, which may have parallel
edges. To remove any parallel edges from the returned graph:
>>> G = nx.Graph(G)
Similarly, to remove self-loops:
>>> G.remove_edges_from(nx.selfloop_edges(G))
"""
if sum(deg_sequence) % 2 != 0:
msg = 'Invalid degree sequence: sum of degrees must be even, not odd'
raise nx.NetworkXError(msg)
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXNotImplemented('not implemented for directed graphs')
G = _configuration_model(deg_sequence, G, seed=seed)
return G
@py_random_state(3)
def directed_configuration_model(in_degree_sequence,
out_degree_sequence,
create_using=None, seed=None):
"""Returns a directed_random graph with the given degree sequences.
The configuration model generates a random directed pseudograph
(graph with parallel edges and self loops) by randomly assigning
edges to match the given degree sequences.
Parameters
----------
in_degree_sequence : list of nonnegative integers
Each list entry corresponds to the in-degree of a node.
out_degree_sequence : list of nonnegative integers
Each list entry corresponds to the out-degree of a node.
create_using : NetworkX graph constructor, optional (default MultiDiGraph)
Graph type to create. If graph instance, then cleared before populated.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : MultiDiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequences do not have the same sum.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequences does not have the same sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
Random graphs with arbitrary degree distributions and their applications
Phys. Rev. E, 64, 026118 (2001)
Examples
--------
One can modify the in- and out-degree sequences from an existing
directed graph in order to create a new directed graph. For example,
here we modify the directed path graph:
>>> D = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
>>> din = list(d for n, d in D.in_degree())
>>> dout = list(d for n, d in D.out_degree())
>>> din.append(1)
>>> dout[0] = 2
>>> # We now expect an edge from node 0 to a new node, node 3.
... D = nx.directed_configuration_model(din, dout)
The returned graph is a directed multigraph, which may have parallel
edges. To remove any parallel edges from the returned graph:
>>> D = nx.DiGraph(D)
Similarly, to remove self-loops:
>>> D.remove_edges_from(nx.selfloop_edges(D))
"""
if sum(in_degree_sequence) != sum(out_degree_sequence):
msg = 'Invalid degree sequences: sequences must have equal sums'
raise nx.NetworkXError(msg)
if create_using is None:
create_using = nx.MultiDiGraph
G = _configuration_model(out_degree_sequence, create_using, directed=True,
in_deg_sequence=in_degree_sequence, seed=seed)
name = "directed configuration_model {} nodes {} edges"
return G
@py_random_state(1)
def expected_degree_graph(w, seed=None, selfloops=True):
r"""Returns a random graph with given expected degrees.
Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$
of length $n$ this algorithm assigns an edge between node $u$ and
node $v$ with probability
.. math::
p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
Parameters
----------
w : list
The list of expected degrees.
selfloops: bool (default=True)
Set to False to remove the possibility of self-loop edges.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
Graph
Examples
--------
>>> z=[10 for i in range(100)]
>>> G=nx.expected_degree_graph(z)
Notes
-----
The nodes have integer labels corresponding to index of expected degrees
input sequence.
The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the
number of nodes and $m$ is the expected number of edges.
The model in [1]_ includes the possibility of self-loop edges.
Set selfloops=False to produce a graph without self loops.
For finite graphs this model doesn't produce exactly the given
expected degree sequence. Instead the expected degrees are as
follows.
For the case without self loops (selfloops=False),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv}
= w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .
NetworkX uses the standard convention that a self-loop edge counts 2
in the degree of a node, so with self loops (selfloops=True),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu}
= w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .
References
----------
.. [1] Fan Chung and L. Lu, Connected components in random graphs with
given expected degree sequences, Ann. Combinatorics, 6,
pp. 125-145, 2002.
.. [2] Joel Miller and Aric Hagberg,
Efficient generation of networks with given expected degrees,
in Algorithms and Models for the Web-Graph (WAW 2011),
Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
pp. 115-126, 2011.
"""
n = len(w)
G = nx.empty_graph(n)
# If there are no nodes are no edges in the graph, return the empty graph.
if n == 0 or max(w) == 0:
return G
rho = 1 / sum(w)
# Sort the weights in decreasing order. The original order of the
# weights dictates the order of the (integer) node labels, so we
# need to remember the permutation applied in the sorting.
order = sorted(enumerate(w), key=itemgetter(1), reverse=True)
mapping = {c: u for c, (u, v) in enumerate(order)}
seq = [v for u, v in order]
last = n
if not selfloops:
last -= 1
for u in range(last):
v = u
if not selfloops:
v += 1
factor = seq[u] * rho
p = min(seq[v] * factor, 1)
while v < n and p > 0:
if p != 1:
r = seed.random()
v += int(math.floor(math.log(r, 1 - p)))
if v < n:
q = min(seq[v] * factor, 1)
if seed.random() < q / p:
G.add_edge(mapping[u], mapping[v])
v += 1
p = q
return G
def havel_hakimi_graph(deg_sequence, create_using=None):
"""Returns a simple graph with given degree sequence constructed
using the Havel-Hakimi algorithm.
Parameters
----------
deg_sequence: list of integers
Each integer corresponds to the degree of a node (need not be sorted).
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Directed graphs are not allowed.
Raises
------
NetworkXException
For a non-graphical degree sequence (i.e. one
not realizable by some simple graph).
Notes
-----
The Havel-Hakimi algorithm constructs a simple graph by
successively connecting the node of highest degree to other nodes
of highest degree, resorting remaining nodes by degree, and
repeating the process. The resulting graph has a high
degree-associativity. Nodes are labeled 1,.., len(deg_sequence),
corresponding to their position in deg_sequence.
The basic algorithm is from Hakimi [1]_ and was generalized by
Kleitman and Wang [2]_.
References
----------
.. [1] Hakimi S., On Realizability of a Set of Integers as
Degrees of the Vertices of a Linear Graph. I,
Journal of SIAM, 10(3), pp. 496-506 (1962)
.. [2] Kleitman D.J. and Wang D.L.
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
if not nx.is_graphical(deg_sequence):
raise nx.NetworkXError('Invalid degree sequence')
p = len(deg_sequence)
G = nx.empty_graph(p, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed graphs are not supported")
num_degs = [[] for i in range(p)]
dmax, dsum, n = 0, 0, 0
for d in deg_sequence:
# Process only the non-zero integers
if d > 0:
num_degs[d].append(n)
dmax, dsum, n = max(dmax, d), dsum + d, n + 1
# Return graph if no edges
if n == 0:
return G
modstubs = [(0, 0)] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while len(num_degs[dmax]) == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
raise nx.NetworkXError('Non-graphical integer sequence')
# Remove largest stub in list
source = num_degs[dmax].pop()
n -= 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while len(num_degs[k]) == 0:
k -= 1
target = num_degs[k].pop()
G.add_edge(source, target)
n -= 1
if k > 1:
modstubs[mslen] = (k - 1, target)
mslen += 1
# Add back to the list any nonzero stubs that were removed
for i in range(mslen):
(stubval, stubtarget) = modstubs[i]
num_degs[stubval].append(stubtarget)
n += 1
return G
def directed_havel_hakimi_graph(in_deg_sequence,
out_deg_sequence,
create_using=None):
"""Returns a directed graph with the given degree sequences.
Parameters
----------
in_deg_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_deg_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : NetworkX graph constructor, optional (default DiGraph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : DiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence
Raises
------
NetworkXError
If the degree sequences are not digraphical.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Kleitman and Wang [1]_.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence)
out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence)
# Process the sequences and form two heaps to store degree pairs with
# either zero or nonzero out degrees
sumin, sumout = 0, 0
nin, nout = len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
G = nx.empty_graph(maxn, create_using, default=nx.DiGraph)
if maxn == 0:
return G
maxin = 0
stubheap, zeroheap = [], []
for n in range(maxn):
in_deg, out_deg = 0, 0
if n < nout:
out_deg = out_deg_sequence[n]
if n < nin:
in_deg = in_deg_sequence[n]
if in_deg < 0 or out_deg < 0:
raise nx.NetworkXError(
'Invalid degree sequences. Sequence values must be positive.')
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1 * out_deg, -1 * in_deg, n))
elif out_deg > 0:
zeroheap.append((-1 * out_deg, n))
if sumin != sumout:
raise nx.NetworkXError(
'Invalid degree sequences. Sequences must have equal sums.')
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0, 0, 0)] * (maxin + 1)
# Successively reduce degree sequence by removing the maximum
while stubheap:
# Remove first value in the sequence with a non-zero in degree
(freeout, freein, target) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap) + len(zeroheap):
raise nx.NetworkXError('Non-digraphical integer sequence')
# Attach arcs from the nodes with the most stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
(stubout, stubsource) = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin, stubsource) = heapq.heappop(stubheap)
if stubout == 0:
raise nx.NetworkXError('Non-digraphical integer sequence')
G.add_edge(stubsource, target)
# Check if source is now totally connected
if stubout + 1 < 0 or stubin < 0:
modstubs[mslen] = (stubout + 1, stubin, stubsource)
mslen += 1
# Add the nodes back to the heaps that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, (stub[0], stub[2]))
if freeout < 0:
heapq.heappush(zeroheap, (freeout, target))
return G
def degree_sequence_tree(deg_sequence, create_using=None):
"""Make a tree for the given degree sequence.
A tree has #nodes-#edges=1 so
the degree sequence must have
len(deg_sequence)-sum(deg_sequence)/2=1
"""
# The sum of the degree sequence must be even (for any undirected graph).
degree_sum = sum(deg_sequence)
if degree_sum % 2 != 0:
msg = 'Invalid degree sequence: sum of degrees must be even, not odd'
raise nx.NetworkXError(msg)
if len(deg_sequence) - degree_sum // 2 != 1:
msg = ('Invalid degree sequence: tree must have number of nodes equal'
' to one less than the number of edges')
raise nx.NetworkXError(msg)
G = nx.empty_graph(0, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# Sort all degrees greater than 1 in decreasing order.
#
# TODO Does this need to be sorted in reverse order?
deg = sorted((s for s in deg_sequence if s > 1), reverse=True)
# make path graph as backbone
n = len(deg) + 2
nx.add_path(G, range(n))
last = n
# add the leaves
for source in range(1, n - 1):
nedges = deg.pop() - 2
for target in range(last, last + nedges):
G.add_edge(source, target)
last += nedges
# in case we added one too many
if len(G) > len(deg_sequence):
G.remove_node(0)
return G
@py_random_state(1)
def random_degree_sequence_graph(sequence, seed=None, tries=10):
r"""Returns a simple random graph with the given degree sequence.
If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the
algorithm produces almost uniform random graphs in $O(m d_m)$ time
where $m$ is the number of edges.
Parameters
----------
sequence : list of integers
Sequence of degrees
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
tries : int, optional
Maximum number of tries to create a graph
Returns
-------
G : Graph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in the sequence.
Raises
------
NetworkXUnfeasible
If the degree sequence is not graphical.
NetworkXError
If a graph is not produced in specified number of tries
See Also
--------
is_graphical, configuration_model
Notes
-----
The generator algorithm [1]_ is not guaranteed to produce a graph.
References
----------
.. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi,
A sequential algorithm for generating random graphs.
Algorithmica, Volume 58, Number 4, 860-910,
DOI: 10.1007/s00453-009-9340-1
Examples
--------
>>> sequence = [1, 2, 2, 3]
>>> G = nx.random_degree_sequence_graph(sequence, seed=42)
>>> sorted(d for n, d in G.degree())
[1, 2, 2, 3]
"""
DSRG = DegreeSequenceRandomGraph(sequence, seed)
for try_n in range(tries):
try:
return DSRG.generate()
except nx.NetworkXUnfeasible:
pass
raise nx.NetworkXError('failed to generate graph in %d tries' % tries)
class DegreeSequenceRandomGraph(object):
# class to generate random graphs with a given degree sequence
# use random_degree_sequence_graph()
def __init__(self, degree, rng):
if not nx.is_graphical(degree):
raise nx.NetworkXUnfeasible('degree sequence is not graphical')
self.rng = rng
self.degree = list(degree)
# node labels are integers 0,...,n-1
self.m = sum(self.degree) / 2.0 # number of edges
try:
self.dmax = max(self.degree) # maximum degree
except ValueError:
self.dmax = 0
def generate(self):
# remaining_degree is mapping from int->remaining degree
self.remaining_degree = dict(enumerate(self.degree))
# add all nodes to make sure we get isolated nodes
self.graph = nx.Graph()
self.graph.add_nodes_from(self.remaining_degree)
# remove zero degree nodes
for n, d in list(self.remaining_degree.items()):
if d == 0:
del self.remaining_degree[n]
if len(self.remaining_degree) > 0:
# build graph in three phases according to how many unmatched edges
self.phase1()
self.phase2()
self.phase3()
return self.graph
def update_remaining(self, u, v, aux_graph=None):
# decrement remaining nodes, modify auxiliary graph if in phase3
if aux_graph is not None:
# remove edges from auxiliary graph
aux_graph.remove_edge(u, v)
if self.remaining_degree[u] == 1:
del self.remaining_degree[u]
if aux_graph is not None:
aux_graph.remove_node(u)
else:
self.remaining_degree[u] -= 1
if self.remaining_degree[v] == 1:
del self.remaining_degree[v]
if aux_graph is not None:
aux_graph.remove_node(v)
else:
self.remaining_degree[v] -= 1
def p(self, u, v):
# degree probability
return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m)
def q(self, u, v):
# remaining degree probability
norm = float(max(self.remaining_degree.values()))**2
return self.remaining_degree[u] * self.remaining_degree[v] / norm
def suitable_edge(self):
"""Returns True if and only if an arbitrary remaining node can
potentially be joined with some other remaining node.
"""
nodes = iter(self.remaining_degree)
u = next(nodes)
return any(v not in self.graph[u] for v in nodes)
def phase1(self):
# choose node pairs from (degree) weighted distribution
rem_deg = self.remaining_degree
while sum(rem_deg.values()) >= 2 * self.dmax**2:
u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng))
if self.graph.has_edge(u, v):
continue
if self.rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase2(self):
# choose remaining nodes uniformly at random and use rejection sampling
remaining_deg = self.remaining_degree
rng = self.rng
while len(remaining_deg) >= 2 * self.dmax:
while True:
u, v = sorted(rng.sample(remaining_deg.keys(), 2))
if self.graph.has_edge(u, v):
continue
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v)
def phase3(self):
# build potential remaining edges and choose with rejection sampling
potential_edges = combinations(self.remaining_degree, 2)
# build auxiliary graph of potential edges not already in graph
H = nx.Graph([(u, v) for (u, v) in potential_edges
if not self.graph.has_edge(u, v)])
rng = self.rng
while self.remaining_degree:
if not self.suitable_edge():
raise nx.NetworkXUnfeasible('no suitable edges left')
while True:
u, v = sorted(rng.choice(list(H.edges())))
if rng.random() < self.q(u, v):
break
if rng.random() < self.p(u, v): # accept edge
self.graph.add_edge(u, v)
self.update_remaining(u, v, aux_graph=H)
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/generators/degree_seq.py
|
Python
|
mit
| 30,646
|
[
"Brian"
] |
26a34dbc8f2d997216f41376ce50dd4f5ddf6f3ffc199ca52c3345b65df9125e
|
import urllib
import bleach
import ssl
import socket
import re
import datetime, time
from OpenSSL import SSL
from django.db import models
from django.forms import ModelForm
from django.template.defaultfilters import slugify
from django.conf import settings
from django.core.urlresolvers import reverse
# User Profile / Authentication
from django.contrib.auth.models import User
# File Upload
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
# Django automatically creates a table for each "class" here, named "[app name]_[class name]"
# so the table of "class Page" is "msw_page"
# each attribute of a class corresponds to a column in its table
####################################################
##### Demo Pages ###################################
class Page(models.Model):
slug = models.SlugField()
title = models.CharField(max_length=100)
category = models.CharField(max_length=100)
summary = models.CharField(max_length=1000) # short summary
description = models.TextField() # more detailed description
prevents = models.TextField() # what dangers are prevented
resources = models.TextField() # url resources
def save(self, *args, **kwargs):
self.slug = slugify(self.slug)
super(Page, self).save(*args, **kwargs)
def reverse_url(self):
#return reverse('detail', args=['richtext_and_safe_url'])
return reverse('detail', args=[self.slug])
def __unicode__(self):
return self.title
##### Demo Pages ###################################
####################################################
####################################################
##### Safe URL / RichText ##########################
class SafeUrl(models.Model):
the_url = models.URLField()
is_safe = models.BooleanField(default=False)
class SafeUrlSimple(models.Model):
urlname = models.CharField(max_length=200)
class RichText(models.Model):
name = models.CharField(max_length=200)
comment = models.TextField()
def __unicode__(self):
return self.name + ": " + self.comment
# dividing RichText into separate models
class RichTextInput(models.Model):
text = models.TextField()
##### Safe URL / RichText ##########################
####################################################
####################################################
##### Access Control Members Post ##################
class MembersPostUser(models.Model):
user = models.CharField(max_length=50)
def __unicode__(self):
return self.user
class MembersPostText(models.Model):
text = models.TextField()
def __unicode__(self):
return self.text
class MembersPostSay(models.Model):
mpuser = models.ForeignKey(MembersPostUser)
mptext = models.ForeignKey(MembersPostText)
def __unicode__(self):
return str(self.user) + ": " + str(self.text)
##### Access Control Members Post ##################
####################################################
####################################################
##### URL CHECK ####################################
# returns True if url is checked to be non-malicious, else False
# the 1 url's format must be "http://..."
def urlCheck(url):
# following the google POST format
# for more info: http://code.google.com/apis/safebrowsing/lookup_guide.html#HTTPPOSTRequestResponseBody
urldata = "1\n" + url
if settings.GOOGLE_SAFEBROWSING_LOOKUP:
# Google SafeBrowsing Lookup: http://code.google.com/apis/safebrowsing/lookup_guide.html#AQuickExamplePOSTMethod
googleurl= settings.GSB_HOST + settings.GSB_PATH + "?client=api&apikey=" + settings.GSB_API_KEY + "&appver=1.5.2&pver=3.0"
# check that there is not a MITM of the host by validating the certificates
google_hosturl = getUrl(settings.GSB_HOST) # takes out "http://"
certValid = validateCert(google_hosturl)
# there is a MITM, don't bother connecting to the google url
if not certValid[0]:
return False
# Safe to visit the google lookup url
f = urllib.urlopen(googleurl, urldata)
response_code = f.code
# the url is safe
if response_code == 204:
return True
return False
# ValidateHTTPS Server certificate from http://wiki.python.org/moin/SSL
def validateCert(url):
#print url # should match sb-ssl.google.com
certValid = [False]
print "\n\n"
print "==========================================================================="
print "========= Validating Certificate for " + str(url) + " ========"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#The ca_certs file must be valid or all cert checks will fail
certFile = "apps/msw/files/ca-bundle.crt" # same idea as cacerts.txt, maybe same
ssl_sock = ssl.wrap_socket(s,
ca_certs=certFile,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv3) # hq added for certerror
ssl_sock.connect((url, 443))
peerCert=ssl_sock.getpeercert()
#print "PERR CERT:"
#print peerCert
print "\n------- Step 1: Validating certificate matches url hostname ----"
try :
# STEP 1. VERIFY THAT THERE IS A CERTIFICATE
# match_hostname url: https://bitbucket.org/brandon/backports.ssl_match_hostname/src/67f1340d302d/__init__.py
match_hostname(peerCert, url)
print "\r\n Certificate Check Step 1 Passed -- There is a certificate for URL"
except CertificateError, ce:
print "\r\n Certificate Check Step 1 FAILED -- No certificate for URL"
print "Certificate Error at Step 1: Validating that certificate matches url hostname: " + str(url)
print ce
print "\r\n Skipping rest of certification check"
return certValid # certValid[0] already set to False
print "\n------- Step 2: Verify that the certificate is not expired --------------"
for k, v in peerCert.iteritems():
# STEP 2. VERIFY CERTIFICATE IS NOT EXPIRED Certificate Expiration Checking
if k=='notAfter':
todayDate= datetime.date.today()
#http://docs.python.org/library/time.html
format='%b %d %H:%M:%S %Y %Z'
tempExpDate= time.strptime(v, format)
#print time.mktime(tempExpDate)
year = tempExpDate.__getattribute__('tm_year')
month = tempExpDate.__getattribute__('tm_mon')
day = tempExpDate.__getattribute__('tm_mday')
expDate=datetime.date(year, month, day)
tillExpiration= expDate-todayDate
if tillExpiration < datetime.timedelta (days = 30):
print "\r\n Certificate Check Step 2 FAILED -- Certificate HAS EXPIRED, expiring in %s" % tillExpiration
print "\r\n Skipping rest of certification check"
return certValid # certValid[0] already set to False
else:
print "\r\n Certificate Check Step 2 Passed -- Certificate not expired, expiring in %s" % tillExpiration
print "\n------- Step 3: Validating the chain of CAs --------------"
# crucially MODIFIED from: http://wiki.python.org/moin/SSL
#url = "sb-ssl.google.com" # what the url should be
#url = "www.google.com" # for testing
PORT = 443
host = url
print "For host = " + str(host)
# uses host
def verify_cb(conn, x509, errno, errdepth, retcode):
"""
callback for certificate validation
should return true if verification passes and false otherwise
"""
print " CA = " + str( x509.get_subject() )
if errno == 0:
if errdepth != 0:
# don't validate names of root certificates
print "\t---> GOOD (root certificate)"
certValid[0] = True
return True
else:
certComName = x509.get_subject().commonName
# the certComName might be like "*.google.com" ==> regex: .*\.google\.com
# Have to check that * does not contain any "." => regex: (.*)\.google\.com
# e.g. (.*) can be "x" but not "x.y" (that way host name is different)
# change "." --> "\." "*" --> "(.*)" order matters
starNum = certComName.count("*") # number of asterisks
certComName = certComName.replace(".", "\.")
certComName = certComName.replace("*", "(.*)")
result = re.match(certComName, host) # certComName == host with * interpretation
if result:
# If (.*) matches any dots, return False!
# reference: http://docs.python.org/library/re.html#re.MatchObject.group
for i in range(1, starNum+1):
if "." in result.group(i):
print "\t---> FAILED (cert commonName does not meet requirment)"
# if got out of that for loop, that means * regions don't have any dots :D
print "\t---> GOOD (cert commonName matched host name)"
certValid[0] = True
return True
else:
print "\tcertCommonName: \t" + str(certComName)
print "\thostName: \t\t" + str(host)
print "\t---> FAILED (cert commonName did not match host name)"
certValid[0] = False
return False
else:
print "\t---> FAILED"
certValid[0] = False
return False
context = SSL.Context(SSL.SSLv23_METHOD)
context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
context.load_verify_locations("apps/msw/files/cacerts.txt")
# create socket and connect to server
sock = socket.socket()
sock = SSL.Connection(context, sock)
sock.connect((host, PORT))
try:
sock.do_handshake()
except Exception as ec:
print ec
if certValid[0]:
print "\r\n Certificate Check Step 3 Passed -- Chain of CAs is valid"
print "\r\n HOST IS GOOD! :)"
else:
print "\r\n Certificate Check Step 3 FAILD -- Chain of CAs is NOT VALID"
print "\r\n HOST IS BAD! :("
print "\n=============== End Validating the Certificate of URL = " + str(url)
print "==========================================================================="
return certValid
# strips "http://" or "https://"
def getUrl(str):
if "http://" in str:
return str.replace("http://", "")
if "https://" in str:
return str.replace("https://", "")
return str
##### URL CHECK ####################################
####################################################
####################################################
##### User Profile / Authentication ################
# https://github.com/jbalogh/zamboni/blob/master/apps/users/models.py#L428
# google for the top 100 or 200 passwords and put them into
# your BlacklistedPassword table manually
class BlacklistedPassword(models.Model):
"""Blacklisted passwords"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
password = models.CharField(max_length=255, unique=True, blank=False)
def __unicode__(self):
return self.password
@classmethod
def blocked(cls, password):
return cls.objects.filter(password=password)
##### User Profile / Authentication ################
####################################################
####################################################
##### File Upload ##################################
# src: https://github.com/jsocol/kitsune/blob/master/apps/upload/models.py
class ImageAttachment(models.Model):
"""Save in dabatase image file path, creator, date created"""
# this models.ImageField does not use PIL
file = models.ImageField(upload_to=settings.IMAGE_UPLOAD_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
creator = models.ForeignKey(User, related_name='image_attachments')
created = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return self.file.name
def get_absolute_url(self):
return self.file.url
def get_delete_url(self):
"""Returns the URL to delete this object. Assumes the object has an
id."""
return reverse('upload.del_image_async', args=[self.id])
##### File Upload ##################################
####################################################
##########################################################################
##########################################################################
############# ssl_match_hostname #########################################
"""The match_hostname() function from Python 3.2, essential when using SSL."""
#import re
__version__ = '3.2a3'
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not san:
# The subject is only checked when subjectAltName is empty
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
haoqili/MozSecWorld
|
apps/msw/models.py
|
Python
|
bsd-3-clause
| 15,142
|
[
"VisIt"
] |
be12816a27019ffad993bcedc11f9f423a1c8a999173fdbecb8fb5223579ad1c
|
import chainerx
from chainerx import _docs
def set_docs():
_docs_creation()
_docs_indexing()
_docs_linalg()
_docs_logic()
_docs_manipulation()
_docs_math()
_docs_sorting()
_docs_statistics()
_docs_connection()
_docs_normalization()
_docs_pooling()
def _docs_creation():
_docs.set_doc(
chainerx.empty,
"""empty(shape, dtype, device=None)
Returns an array without initializing the elements.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type of the array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with elements not initialized.
.. seealso:: :func:`numpy.empty`
""")
_docs.set_doc(
chainerx.empty_like,
"""empty_like(a, device=None)
Returns a new array with same shape and dtype of a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with same shape and dtype as ``a`` \
with elements not initialized.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.empty_like`
""")
_docs.set_doc(
chainerx.eye,
"""eye(N, M=None, k=0, dtype=float64, device=None)
Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
""")
_docs.set_doc(
chainerx.tri,
"""tri(N, M=None, k=0, dtype=float32, device=None)
Returns a 2-D array with ones at and below the given diagonal
and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled ones at and
below the given diagonal and zeros elsewhere.
.. seealso:: :func:`numpy.tri`
""")
_docs.set_doc(
chainerx.tril,
"""tril(m, k=0)
Lower triangle of an array.
Returns a copy of an array with elements above the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Lower triangle of ``m``.
.. seealso:: :func:`numpy.tril`
""")
_docs.set_doc(
chainerx.triu,
"""triu(m, k=0)
Upper triangle of an array.
Returns a copy of an array with elements below the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Upper triangle of ``m``.
.. seealso:: :func:`numpy.triu`
""")
_docs.set_doc(
chainerx.identity,
"""identity(n, dtype=None, device=None)
Returns a 2-D identity array.
It is equivalent to ``eye(n, n, dtype)``.
Args:
n (int): Number of rows and columns.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D identity array.
.. seealso:: :func:`numpy.identity`
""")
_docs.set_doc(
chainerx.ones,
"""ones(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with ones.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.ones`
""")
_docs.set_doc(
chainerx.ones_like,
"""ones_like(a, device=None)
Returns an array of ones with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.ones_like`
""")
_docs.set_doc(
chainerx.zeros,
"""zeros(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with zeros.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.zeros`
""")
_docs.set_doc(
chainerx.zeros_like,
"""zeros_like(a, device=None)
Returns an array of zeros with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.zeros_like`
""")
_docs.set_doc(
chainerx.full,
"""full(shape, fill_value, dtype, device=None)
Returns a new array of given shape and dtype, filled with a given value.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.full`
""")
_docs.set_doc(
chainerx.full_like,
"""full_like(a, fill_value, dtype=None, device=None)
Returns a full array with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.full_like`
""")
_docs.set_doc(
chainerx.array,
"""array(object, dtype=None, copy=True, device=None)
Creates an array.
Args:
object: A :class:`~chainerx.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type. If omitted, it's inferred from the input.
copy (bool): If ``True``, the object is always copied. Otherwise, a copy
will only be made if it is needed to satisfy any of the other
requirements (dtype, device, etc.).
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.array`
""")
_docs.set_doc(
chainerx.asarray,
"""asarray(a, dtype=None, device=None)
Converts an object to an array.
Args:
a: The source object.
dtype: Data type. If omitted, it's inferred from the input.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: Array interpretation of ``a``. If ``a`` is already an \
ndarray on the given device with matching dtype, no copy is performed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.asarray`
""")
_docs.set_doc(
chainerx.ascontiguousarray,
"""ascontiguousarray(a, dtype=None, device=None)
Returns a C-contiguous array.
Args:
a (~chainerx.ndarray): Source array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: C-contiguous array. A copy will be made only if needed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.ascontiguousarray`
""")
_docs.set_doc(
chainerx.copy,
"""copy(a)
Creates a copy of a given array.
Args:
a (~chainerx.ndarray): Source array.
Returns:
~chainerx.ndarray: A copy array on the same device as ``a``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.copy`
""")
_docs.set_doc(
chainerx.frombuffer,
"""frombuffer(buffer, dtype=float, count=-1, offset=0, device=None)
Returns a 1-D array interpretation of a buffer.
The given ``buffer`` memory must be usable on the given device, otherwise,
an error is raised.
Note:
The ``native`` backend requires a buffer of main memory, and
the ``cuda`` backend requires a buffer of CUDA memory.
No copy is performed.
Args:
buffer: An object that exposes the buffer interface.
dtype: Data type of the returned array.
count (int): Number of items to read. -1 means all data in the buffer.
offset (int): Start reading the buffer from this offset (in bytes).
device (~chainerx.Device): Device of the returned array.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: 1-D array interpretation of ``buffer``.
.. seealso:: :func:`numpy.frombuffer`
""")
_docs.set_doc(
chainerx.arange,
"""arange([start=0, ]stop, [step=1, ]dtype=None, device=None)
Returns an array with evenly spaced values within a given interval.
Values are generated within the half-open interval [``start``, ``stop``).
The first three arguments are mapped like the ``range`` built-in function,
i.e. ``start`` and ``step`` are optional.
Args:
start: Start of the interval.
stop: End of the interval.
step: Step width between each pair of consecutive values.
dtype: Data type specifier. It is inferred from other arguments by
default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of range values.
.. seealso:: :func:`numpy.arange`
""")
_docs.set_doc(
chainerx.linspace,
"""linspace(start, stop, num=50, endpoint=True, dtype=None, device=None)
Returns an array with evenly spaced numbers over a specified interval.
Instead of specifying the step width like :func:`chainerx.arange()`,
this function requires the total number of elements specified.
Args:
start: Start of the interval.
stop: End of the interval.
num: Number of elements.
endpoint (bool): If ``True``, the stop value is included as the last
element. Otherwise, the stop value is omitted.
dtype: Data type specifier. It is inferred from the start and stop
arguments by default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of ranged values.
.. seealso:: :func:`numpy.linspace`
""") # NOQA
_docs.set_doc(
chainerx.diag,
"""diag(v, k=0, device=None)
Returns a diagonal or a diagonal array.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: If ``v`` is a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` is a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`chainerx.ndarray` object, then its view is
returned.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diag`
""")
_docs.set_doc(
chainerx.diagflat,
"""diagflat(v, k=0, device=None)
Creates a diagonal array from the flattened input.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. See :func:`chainerx.diag`.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D diagonal array with the diagonal copied
from ``v``.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diagflat`
""")
def _docs_indexing():
_docs.set_doc(
chainerx.take,
"""take(a, indices, axis)
Takes elements from an array along an axis.
Args:
a (~chainerx.ndarray): Source array.
indices (~chainerx.ndarray):
The indices of the values to extract. When indices are out of bounds,
they are wrapped around.
axis (int): The axis over which to select values.
Returns:
:func:`~chainerx.ndarray`: Output array.
Note:
This function currently only supports indices of int64 array.
Note:
This function currently does not support ``axis=None``
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.take`
""")
_docs.set_doc(
chainerx.where,
"""where(condition, x, y)
Return elements chosen from ``x`` or ``y`` depending on condition.
Args:
condition (~chainerx.ndarray): Where True, yield ``x``, otherwise
yield ``y``.
x (~chainerx.ndarray): Values from which to choose.
y (~chainerx.ndarray): Values from which to choose.
Returns:
:func:`~chainerx.ndarray`: An array with elements
from ``x`` where condition is True, and elements from ``y`` elsewhere.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x`` and ``y``.
.. seealso:: :func:`numpy.where`
""")
def _docs_linalg():
_docs.set_doc(
chainerx.dot,
"""dot(a, b)
Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the last
axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix
product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis
as an axis to take dot product over.
Args:
a (~chainerx.ndarray): The left argument.
b (~chainerx.ndarray): The right argument.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support N > 2 dimensional arrays.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
""")
_docs.set_doc(
chainerx.linalg.solve,
"""solve(a, b)
Solves a linear matrix equation, or system of linear scalar equations.
It computes the exact solution of ``x`` in ``ax = b``,
where ``a`` is a square and full rank matrix,
``b`` can be a vector, or a rectangular matrix.
When ``b`` is matrix, its columns are treated as separate vectors
representing multiple right-hand sides.
Args:
a (~chainerx.ndarray): Coefficient matrix.
b (~chainerx.ndarray): "dependent variable" values.
Returns:
:class:`~chainerx.ndarray`:
Solution to the system ``ax = b``.
Shape is identical to ``b``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.solve`
""")
_docs.set_doc(
chainerx.linalg.inv,
"""inv(a)
Computes the inverse of a matrix.
This function computes matrix ``a_inv`` from square matrix
``a`` such that ``dot(a, a_inv) = dot(a_inv, a) = eye(a.shape[0])``.
Args:
a (~chainerx.ndarray): The matrix to be inverted.
Returns:
:class:`~chainerx.ndarray`: The inverse of a matrix.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.inv`
""")
def _docs_logic():
_docs.set_doc(
chainerx.all,
"""all(x)
Test whether all array elements along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which AND reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.all`
""")
_docs.set_doc(
chainerx.any,
"""any(x)
Test whether any array element along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which OR reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.any`
""")
_docs.set_doc(
chainerx.logical_not,
"""logical_not(x)
Returns an array of NOT x element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_not`
""")
_docs.set_doc(
chainerx.logical_and,
"""logical_and(x1, x2)
Returns an array of x1 AND x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_and`
""")
_docs.set_doc(
chainerx.logical_or,
"""logical_or(x1, x2)
Returns an array of x1 OR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_or`
""")
_docs.set_doc(
chainerx.logical_xor,
"""logical_xor(x1, x2)
Returns an array of x1 XOR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_xor`
""")
_docs.set_doc(
chainerx.greater,
"""greater(x1, x2)
Returns an array of (x1 > x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater`
""")
_docs.set_doc(
chainerx.greater_equal,
"""greater_equal(x1, x2)
Returns an array of (x1 >= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater_equal`
""")
_docs.set_doc(
chainerx.less,
"""less(x1, x2)
Returns an array of (x1 < x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less`
""")
_docs.set_doc(
chainerx.less_equal,
"""less_equal(x1, x2)
Returns an array of (x1 <= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less_equal`
""")
_docs.set_doc(
chainerx.equal,
"""equal(x1, x2)
Returns an array of (x1 == x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.equal`
""")
_docs.set_doc(
chainerx.not_equal,
"""not_equal(x1, x2)
Returns an array of (x1 != x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.not_equal`
""")
def _docs_loss():
_docs.set_doc(
chainerx.absolute_error,
"""Element-wise absolute error function.
Computes the element-wise absolute error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = |x_1 - x_2|
Args:
x1 (~chainerx.ndarray): Input variable.
x2 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the absolute error of two inputs.
.. seealso:: :func:`chainer.functions.absolute_error`
""")
_docs.set_doc(
chainerx.squared_error,
"""Element-wise squared error function.
Computes the element-wise squared error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = (x_1 - x_2)^2
Can be used to compute mean squared error by just calling `mean()`
on the output array.
Args:
x0 (~chainerx.ndarray): Input variable.
x1 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the squared error of two inputs.
.. seealso:: :func:`chainer.functions.squared_error`
""")
_docs.set_doc(
chainerx.huber_loss,
"""Element-wise Huber loss.
The Huber loss is similar to the squared error but is less sensitive to
outliers in the data. It is defined as
.. math::
L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
\\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
\\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
\\end{array} \\right.
where :math:`a = x - t` is the difference between the input :math:`x`
and the target :math:`t`.
See: `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
Args:
x (~chainerx.ndarray): Input variable.
t (~chainerx.ndarray): Target variable for regression.
delta (float): Constant variable for Huber loss function as used in
definition.
Returns:
:class:`~chainerx.ndarray`:
A variable object holding an array representing the Huber loss
:math:`L_{\\delta}` of the two inputs.
.. seealso:: :func:`chainer.functions.huber_loss`
""")
_docs.set_doc(
chainerx.gaussian_kl_divergence,
"""Element-wise KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function calculates
the element-wise KL-divergence between the given multi-dimensional
Gaussian :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
Args:
mean (~chainerx.ndarray):
A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (~chainerx.ndarray):
A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
:class:`~chainerx.ndarray`:
A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
.. seealso:: :func:`chainer.functions.gaussian_kl_divergence`
""")
def _docs_manipulation():
_docs.set_doc(
chainerx.reshape,
"""reshape(a, newshape)
Returns a reshaped array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
newshape (int or tuple of ints): The new shape of the array to return.
If it is an integer, then it is treated as a tuple of length one.
It should be compatible with ``a.size``. One of the elements can be
-1, which is automatically replaced with the appropriate value to
make the shape compatible with ``a.size``.
Returns:
:class:`~chainerx.ndarray`: A reshaped view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.reshape`
""")
_docs.set_doc(
chainerx.transpose,
"""transpose(a, axes=None)
Permutes the dimensions of an array.
Args:
a (~chainerx.ndarray): Array to permute the dimensions.
axes (tuple of ints): Permutation of the dimensions. This function reverses
the shape by default.
Returns:
~chainerx.ndarray: A view of ``a`` with the dimensions permuted.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.transpose`
""")
_docs.set_doc(
chainerx.broadcast_to,
"""broadcast_to(array, shape)
Broadcasts an array to a given shape.
Args:
array (~chainerx.ndarray): Array to broadcast.
shape (tuple of ints): The shape of the desired array.
Returns:
~chainerx.ndarray: Broadcasted view.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``array``.
.. seealso:: :func:`numpy.broadcast_to`
""")
_docs.set_doc(
chainerx.squeeze,
"""squeeze(a, axis=None)
Removes size-one axes from the shape of an array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
axis (int or tuple of ints): Axes to be removed. This function removes all
size-one axes by default. If one of the specified axes is not of size
one, an exception is raised.
Returns:
~chainerx.ndarray: An array without (specified) size-one axes.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.squeeze`
""")
_docs.set_doc(
chainerx.concatenate,
"""concatenate(arrays, axis=0)
Joins arrays along an axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be joined.
All of these should have the same dimensionalities except the specified
axis.
axis (int): The axis to join arrays along.
Returns:
~chainerx.ndarray: Joined array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.concatenate`
""")
_docs.set_doc(
chainerx.stack,
"""stack(arrays, axis=0)
Stacks arrays along a new axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.stack`
""")
_docs.set_doc(
chainerx.hstack,
"""hstack(arrays)
Stack arrays in sequence horizontally (column wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.hstack`
""")
_docs.set_doc(
chainerx.vstack,
"""vstack(arrays)
Stack arrays in sequence vertically (row wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.vstack`
""")
_docs.set_doc(
chainerx.dstack,
"""dstack(arrays)
Stack arrays in sequence depth wise (along third axis).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.dstack`
""")
_docs.set_doc(
chainerx.atleast_2d,
"""atleast_2d(a)
View inputs as arrays with at least two dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 2.
Copies are avoided where possible, and views with
two or more dimensions are returned.
Note:
* Arrays that already have two or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_2d`
""")
_docs.set_doc(
chainerx.atleast_3d,
"""atleast_3d(a)
View inputs as arrays with at least three dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 3.
Copies are avoided where possible, and views with
three or more dimensions are returned.
Note:
* Arrays that already have three or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_3d`
""")
_docs.set_doc(
chainerx.split,
"""split(ary, indices_or_sections, axis=0)
Splits an array into multiple sub arrays along a given axis.
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
axis (int): Axis along which the array is split.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.split`
""")
_docs.set_doc(
chainerx.dsplit,
"""dsplit(ary, indices_or_sections)
Split array into multiple sub-arrays along the 3rd axis (depth).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.dsplit`
""")
_docs.set_doc(
chainerx.swapaxes,
"""swapaxes(a, axis1, axis2)
Interchange two axes of an array.
Args:
a (~chainerx.ndarray): Array to swapaxes.
axis1 (int): First Axis
axis2 (int): Second Axis
Returns:
~chainerx.ndarray: Swaped array.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.swapaxes`
""")
_docs.set_doc(
chainerx.repeat,
"""repeat(a, repeats, axis=None)
Constructs an array by repeating a given array.
Args:
a (~chainerx.ndarray): Array to repeat.
repeats (int or tuple of ints): The number of times which each
element of a is repeated.
axis (int): The axis along which to repeat values.
Returns:
~chainerx.ndarray: The repeated output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.repeat`
""")
_docs.set_doc(
chainerx.expand_dims,
"""expand_dims(a, axis)
Expand the shape of an array.
Args:
a (~chainerx.ndarray): Input Array.
axis (int): Position in the expanded axes where the new axis is placed.
Returns:
~chainerx.ndarray: Output array.
Note:
* Output array may or may not be a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.expand_dims`
""")
_docs.set_doc(
chainerx.flip,
"""flip(m, axis)
Reverse the order of elements in an array along the given axis.
Args:
m (~chainerx.ndarray): Input Array.
axis (int or tuple of ints): Axis or axes along which to flip over.
The default, axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the
axes specified in the tuple.
Returns:
~chainerx.ndarray: A view of m with the entries of axis reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flip`
""")
_docs.set_doc(
chainerx.fliplr,
"""fliplr(m)
Flip array in the left/right direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the columns reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.fliplr`
""")
_docs.set_doc(
chainerx.flipud,
"""flipud(m)
Flip array in the up/down direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the rows reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flipud`
""")
_docs.set_doc(
chainerx.moveaxis,
"""moveaxis(a, source, destination)
Move axes of an array to new positions.
Other axes remain in their original order.
Args:
a (~chainerx.ndarray): Input Array.
source (int or tuple of ints): Original positions of the axes to move.
These must be unique.
destintation (int or tuple of ints): Destination positions for each of
the original axes. These must also be unique.
Returns:
~chainerx.ndarray: Array with moved axes. This array is a view of the
input array.
Note:
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.moveaxis`
""")
def _docs_math():
_docs.set_doc(
chainerx.negative,
"""negative(x)
Numerical negative, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = -x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.negative`
""")
_docs.set_doc(
chainerx.add,
"""add(x1, x2)
Add arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 + x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.add`
""")
_docs.set_doc(
chainerx.subtract,
"""subtract(x1, x2)
Subtract arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 - x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.subtract`
""")
_docs.set_doc(
chainerx.multiply,
"""multiply(x1, x2)
Multiply arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\times x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.multiply`
""")
_docs.set_doc(
chainerx.divide,
"""divide(x1, x2)
Divide arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\frac{x_1}{x_2}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.divide`
""")
_docs.set_doc(
chainerx.sum,
"""sum(a, axis=None, keepdims=False)
Sum of array elements over a given axis.
Args:
a (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The sum of input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.sum`
""")
_docs.set_doc(
chainerx.maximum,
"""maximum(x1, x2)
Maximum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = max(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
Note:
maximum of :class:`~chainerx.ndarray` and :class:`~chainerx.ndarray` is
not supported yet.
.. seealso:: :data:`numpy.maximum`
""")
_docs.set_doc(
chainerx.exp,
"""exp(x)
Numerical exponential, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\exp x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.exp`
""")
_docs.set_doc(
chainerx.log,
"""log(x)
Natural logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\ln x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log`
""")
_docs.set_doc(
chainerx.log10,
"""log10(x)
Base 10 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{10} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log10`
""")
_docs.set_doc(
chainerx.log2,
"""log2(x)
Base 2 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{2} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log2`
""")
_docs.set_doc(
chainerx.log1p,
"""log1p(x)
Natural logarithm of one plus the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log(1 + x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log1p`
""")
_docs.set_doc(
chainerx.logsumexp,
"""logsumexp(x, axis=None, keepdims=False)
The log of the sum of exponentials of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The log of the sum of exponentials of
input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.log_softmax,
"""log_softmax(x, axis=None)
The log of the softmax of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
Returns:
:class:`~chainerx.ndarray`: The log of the softmax of input elements
over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.square,
"""square(x)
Returns the element-wise square of the input.
Args:
x (~chainerx.ndarray or scalar): Input data
Returns:
~chainerx.ndarray: Returned array: :math:`y = x * x`.
A scalar is returned if ``x`` is a scalar.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``x``.
.. seealso:: :data:`numpy.square`
""")
_docs.set_doc(
chainerx.sqrt,
"""sqrt(x)
Non-negative square-root, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sqrt x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sqrt`
""")
_docs.set_doc(
chainerx.sinh,
"""sinh(x)
Hyperbolic Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sinh`
""")
_docs.set_doc(
chainerx.cosh,
"""cosh(x)
Hyperbolic Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cosh`
""")
_docs.set_doc(
chainerx.tanh,
"""tanh(x)
Element-wise hyperbolic tangent function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tanh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tanh`
""")
_docs.set_doc(
chainerx.sigmoid,
"""sigmoid(x)
Element-wise sigmoid logistic function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array:
:math:`f(x) = (1 + \\exp(-x))^{-1}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :func:`chainer.functions.sigmoid`
""")
_docs.set_doc(
chainerx.sin,
"""sin(x)
Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sin`
""")
_docs.set_doc(
chainerx.cos,
"""cos(x)
Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cos`
""")
_docs.set_doc(
chainerx.ceil,
"""ceil(x)
Return the ceiling of the input, element-wise..
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The ceiling of each element in array.
.. seealso:: :data:`numpy.ceil`
""")
_docs.set_doc(
chainerx.tan,
"""tan(x)
Tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tan`
""")
_docs.set_doc(
chainerx.relu,
"""Rectified Linear Unit function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\max (0, x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.arcsin,
"""arcsin(x)
Inverse sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsin`
""")
_docs.set_doc(
chainerx.arccos,
"""arccos(x)
Trigonometric inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccos`
""")
_docs.set_doc(
chainerx.arctan,
"""arctan(x)
Trigonometric inverse tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arctan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arctan`
""")
_docs.set_doc(
chainerx.arctan2,
"""arctan2(x1, x2)
Element-wise arc tangent of :math:`\\frac{x_1}{x_2}` choosing the quadrant
correctly.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returns an array where each element
represents :math:`\\theta` in the range :math:`[-\\pi, \\pi]`, such
that :math:`x_1 = r \\sin(\\theta)` and :math:`x_2 = r \\cos(\\theta)`
for some :math:`r > 0`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x1`` and/or ``x2``.
.. seealso:: :data:`numpy.arctan2`
""")
_docs.set_doc(
chainerx.arcsinh,
"""arcsinh(x)
Inverse hyperbolic sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsinh`
""")
_docs.set_doc(
chainerx.arccosh,
"""arccosh(x)
Inverse hypberbolic inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccosh`
""")
_docs.set_doc(
chainerx.fabs,
"""fabs(x)
Compute the absolute values element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The absolute values of x, the returned values
are always floats.
.. seealso:: :data:`numpy.fabs`
""")
_docs.set_doc(
chainerx.sign,
"""sign(x)
Returns an element-wise indication of the sign of a number.
The sign function returns :math:`-1 if x < 0, 0 if x==0, 1 if x > 0`.
``nan`` is returned for ``nan`` inputs.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The sign of x.
.. seealso:: :data:`numpy.sign`
""")
_docs.set_doc(
chainerx.floor,
"""floor(x)
Return the floor of the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The floor of each element in array.
.. seealso:: :data:`numpy.floor`
""")
_docs.set_doc(
chainerx.isnan,
"""isnan(x)
Test element-wise for NaN and return result as a boolean array.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is NaN, false otherwise
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isnan`
""")
_docs.set_doc(
chainerx.isfinite,
"""isfinite(x)
Test element-wise for finiteness (not infinity or not Not a Number).
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where x is not positive infinity,
negative infinity, or NaN; false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isfinite`
""")
_docs.set_doc(
chainerx.isinf,
"""isinf(x)
Test element-wise for positive or negative infinity.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is positive or negative
infinity, false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isinf`
""")
_docs.set_doc(
chainerx.bitwise_and,
"""bitwise_and(x1, x2)
Compute the bit-wise AND of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\& x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_and`
""")
_docs.set_doc(
chainerx.bitwise_or,
"""bitwise_or(x1, x2)
Compute the bit-wise OR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 | x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_or`
""")
_docs.set_doc(
chainerx.bitwise_xor,
"""bitwise_xor(x1, x2)
Compute the bit-wise XOR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\oplus x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_xor`
""")
_docs.set_doc(
chainerx.left_shift,
"""left_shift(x1, x2)
Shift the bits of an integer to the left.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the left.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.left_shift`
""") # NOQA
_docs.set_doc(
chainerx.right_shift,
"""right_shift(x1, x2)
Shift the bits of an integer to the right.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the right.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.right_shift`
""") # NOQA
def _docs_sorting():
_docs.set_doc(
chainerx.argmax,
"""argmax(a, axis=None)
Returns the indices of the maximum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the maximum of.
axis (None or int): Along which axis to compute the maximum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the maximum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmax`
""")
_docs.set_doc(
chainerx.argmin,
"""argmin(a, axis=None)
Returns the indices of the minimum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the minimum of.
axis (None or int): Along which axis to compute the minimum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the minimum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmin`
""")
def _docs_statistics():
_docs.set_doc(
chainerx.amax,
"""amax(a, axis=None, keepdims=False)
Returns the maximum of an array or the maximum along an axis.
Note:
When at least one element is NaN, the corresponding max value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the maximum.
axis (None or int or tuple of ints): Along which axis to take the maximum.
The flattened array is used by default.
If this is a tuple of ints, the maximum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The maximum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amax`
""")
_docs.set_doc(
chainerx.amin,
"""amin(a, axis=None, keepdims=False)
Returns the minimum of an array or the minimum along an axis.
Note:
When at least one element is NaN, the corresponding min value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the minimum.
axis (None or int or tuple of ints): Along which axis to take the minimum.
The flattened array is used by default.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The minimum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amin`
""")
_docs.set_doc(
chainerx.mean,
"""mean(a, axis=None, keepdims=False)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the mean of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the mean. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The mean of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.mean`
""")
_docs.set_doc(
chainerx.var,
"""var(a, axis=None, keepdims=False)
Compute the arithmetic var along the specified axis.
Returns the var of the array elements. The var is taken over the flattened
array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the var of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the var. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The var of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.var`
""")
def _docs_connection():
_docs.set_doc(
chainerx.conv,
"""conv(x, w, b=None, stride=1, pad=0, cover_all=False)
N-dimensional convolution.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three arrays: the
input ``x``, the filter weight ``w`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``conv`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use ``cuda`` backend.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
In ``cuda`` backend, this function uses cuDNN implementation for its
forward and backward computation.
Note:
In ``cuda`` backend, this function has following limitations yet:
- The ``cover_all=True`` option is not supported yet.
- The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.convolution_nd`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> w = chainerx.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(1, 3, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
""")
_docs.set_doc(
chainerx.conv_transpose,
"""conv_transpose(x, w, b=None, stride=1, pad=0, outsize=None)
N-dimensional transposed convolution.
This is an implementation of N-dimensional transposed convolution, which is
previously known as **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three arrays: the input ``x``, the filter weight ``w``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.deconvolution_nd`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.array(np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32))
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.array(np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32))
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.array(np.random.uniform(0, 1, (c_o)).astype(np.float32))
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
""")
_docs.set_doc(
chainerx.linear,
"""linear(x, W, b=None, n_batch_axis=1)
Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: Y = xW^\\top + b.
Args:
x (~chainerx.ndarray):
Input array, which is a :math:`(s_1, s_2, ..., s_n)`-shaped array.
W (~chainerx.ndarray):
Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (~chainerx.ndarray):
Bias variable (optional) of shape :math:`(M,)`.
n_batch_axes (int):
The number of batch axes. The default is 1. The input variable is
reshaped into (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional
tensor. This should be greater than 0.
Returns:
:class:`~chainerx.ndarray`:
Output array with shape of
:math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``W`` and ``b``.
""")
def _docs_normalization():
_docs.set_doc(
chainerx.batch_norm,
"""batch_norm(x, gamma, beta, running_mean, running_var, eps=2e-5, \
decay=0.9, axis=None)
Batch normalization function.
It takes the input array ``x`` and two parameter arrays ``gamma`` and
``beta``. The parameter arrays must both have the same size.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
running_mean (~chainerx.ndarray):
Running average of the mean. This is a running average of
the mean over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
running_var (~chainerx.ndarray):
Running average of the variance. This is a running average of
the variance over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
eps (float): Epsilon value for numerical stability.
decay (float): Decay rate of moving average. It is used during training.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x``, ``gamma`` and ``beta``.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
""")
_docs.set_doc(
chainerx.fixed_batch_norm,
"""fixed_batch_norm(x, gamma, beta, mean, var, eps=2e-5, axis=None)
Batch normalization function with fixed statistics.
This is a variant of :func:`~chainerx.batch_norm`, where the mean
and array statistics are given by the caller as fixed variables.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
mean (~chainerx.ndarray): Shifting parameter of input.
var (~chainerx.ndarray): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function does not propagate gradients.
""")
def _docs_pooling():
_docs.set_doc(
chainerx.max_pool,
"""max_pool(x, ksize, stride=None, pad=0, cover_all=False)
Spatial max pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the maximum
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``. This function is only
differentiable up to the second order.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
_docs.set_doc(
chainerx.average_pool,
"""average_pool(x, ksize, stride=None, pad=0, pad_mode='ignore')
Spatial average pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the average
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_mode ({'zero', 'ignore'}): Specifies how padded region is treated.
* 'zero' -- the values in the padded region are treated as 0
* 'ignore' -- padded region is ignored (default)
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
|
okuta/chainer
|
chainerx/_docs/routines.py
|
Python
|
mit
| 76,765
|
[
"Gaussian"
] |
2b455d13101af7232c3c5cf0fbdc4ba5a21b9d0182aebb9526d57cf372a5b2fb
|
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import socket
import collections
import datetime
import warnings
import zlib
import regex
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
try:
article_number = int(article_number)
except ValueError as e:
continue
valid = True
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
# don't throw an exception here, because it blows away everything
# we want to keep any valid headers, so just skip the ones that die
valid = False
break
#raise NNTPDataError("OVER/XOVER response doesn't include "
# "names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
if not valid:
continue
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
# v3 has since been killed too
context.options |= ssl.OP_NO_SSLv3
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline()
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _getcompresp(self, file=None):
"""Modified _getlongresp for reading gzip data from the
XOVER command.
Note: The file variable has not been tested.
"""
# Get the response.
resp = self._getresp()
# Check the response.
if resp[:3] != '224':
raise NNTPReplyError(resp)
lines = b''
terminator = False
while 1:
# Check if we found a possible terminator (.\r\n)
if terminator:
# The socket is non blocking, so it throws an
# exception if the server sends back nothing.
try:
# The server sent back something.
line = self._getline(False)
# So set back the socket to blocking.
self.sock.settimeout(120)
# And reset the terminator check.
terminator = False
# The socket buffer was empty.
except Exception as e:
# This was the final line, so remove the
# terminator and append it.
lines += termline[:-3]
# Set the socket back to blocking.
self.sock.settimeout(120)
# And break out of the loop.
break
# The buffer was not empty, so write the last line.
lines += termline
# And write the current line.
lines += line
else:
# We didn't find a terminator, so fetch the next line.
line = self._getline(False)
# We found a terminator.
if line[-3:] == b'.\r\n':
# So add the line to a temp line for later.
termline = line
# And set the socket to non blocking.
self.sock.settimeout(0)
# And mark that we found a terminator.
terminator = True
else:
# Add the current line to the final buffer.
lines += line
try:
# Try to decompress.
dc_obj = zlib.decompressobj()
decomp = dc_obj.decompress(lines)
# Remove the last crlf and split the line into a list @crlf's
if decomp[-2:] == b'\r\n':
decomp = decomp[:-2].split(b'\r\n')
else:
decomp = decomp.split(b'\r\n')
except Exception as e:
raise NNTPDataError('Data from NNTP could not be decompressed.')
# Check if the decompressed string is not empty.
if decomp[0] == b'':
decomp = []
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
# Write the lines to the file.
if file is not None:
for header in decomp:
file.write("%s\n" % header)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, decomp
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _compressedcmd(self, line, file=None):
"""Identical to _loncmdstring, but uses __getcompresp to
read gzip data from the XOVER command.
"""
self._putcmd(line)
resp, list = self._getcompresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = regex.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = regex.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def compression(self):
"""Process an XFEATURE GZIP COMPRESS command.
Returns:
- bool: Did the server understand the command?
"""
try:
resp = self._shortcmd('XFEATURE COMPRESS GZIP')
if resp[:3] == '290':
return True
else:
return False
except Exception as e:
return False
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
if self.compressionstatus:
resp, lines = self._compressedcmd('XOVER {0}-{1}'.format(start, end), file)
else:
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end), file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
if self.compressionstatus:
resp, lines = self._compressedcmd(cmd, file)
else:
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = regex.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT, compression=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
- compression: To try to enable header compression or not.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
if compression:
self.compressionstatus = self.compression()
else:
self.compressionstatus = False
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT, compression=True):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
if compression:
self.compressionstatus = self.compression()
else:
self.compressionstatus = False
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
|
Herkemer/pynab
|
lib/nntplib.py
|
Python
|
gpl-2.0
| 47,585
|
[
"Brian"
] |
2d5d8eeb9998fec7730584aad10b08fa91f5651f6cd306f5a22bd001f1ff04f6
|
# (c) 2017, Florian P. Bayer <f.bayer@tum.de>
#
# sequences.py is part of the PyOmics project.
# It contains all kinds of Sequence objects that provide useful functionality.
# Import standard library modules
import re
import math
import warnings
from collections import Counter
import random
# Import from other non-standard libraries
import matplotlib.pyplot as pyplot
import numpy as np
# Import internal PyOmics modules
from .constants import *
# list of classes that can be imported from the sequences.py module
__all__ = ['DNASequence', 'RNASequence', 'ProteinSequence', 'Primer']
class Sequence(object):
"""
A Sequence is a data structure for biological strings and its associated metainformation.
The Sequence type is the core class in this module from which all other classes descend.
A Sequence is simultaneously an immutable string-like object and an mutable metainformation-storing dictionary that
holds additional and often important metainformation about the sequence and makes the mere string much more
meaningful, without loosing track of what belongs together and what dose not. Changes upon the underlying sequence
implies that a new sequence object has to be generated since the underlying sequence is indivisibly connected to the
existence of an Sequence object. This, however, is only true for the sequence. Metainformation can be added,
changed, and deleted without changing the overall meaning of the object.
Attributes
----------
seq : str
sequence represents the biological sequence as string
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
count()
Analyze the composition of alphabet characters within the sequence
find(motif)
Find the occurrences of an string motif in the sequence
digest(means)
Digest the sequence into smaller sequence fragments by chosen means
read_*(path)
read the sequence from a file
"""
__slots__ = ['_seq', '_meta', '_counter', '_length']
def __init__(self, sequence, **metainformation):
"""
Initialization of an Sequence instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute
metainformation : **dict, optional
metainformation contains all other data that gets passed as keyword arguments. There is no limitation as
to how many items can be passed.
"""
self._seq = sequence.upper()
self._meta = metainformation
self._counter = Counter(self._seq)
self._length = len(self._seq)
def __repr__(self):
"""
A block representation for a sequence-like object into the terminal as plain text
Returns
-------
str
entire block representation of a sequence-like object as plain text
"""
def printer(dictionary, lst):
"""
print the dictionary information as formatted string into the list
"""
for key, value in dictionary.items():
# special formatting for dictionaries
if isinstance(value, dict):
fdict = ' '.join(["'{}':{}".format(k, v) for k, v in value.items()])
lst.append(' {0:<15}{1}'.format(key, fdict))
else:
lst.append(' {0:<15}{1}'.format(key, value))
def chunk(seq, start, end):
"""
iterate over chunks of 60 nt and inject a ' ' separator between every 10th to 11th position
"""
lst = []
for i in range(start, end, 60):
line = ' '.join([seq[j:j + 10] for j in range(i, i + 60, 10)])
lst.append('{0:>5} {1}'.format(i, line))
return lst
slist = []
separator = 71 * '-'
# Header block with class name
slist.append(self.__class__.__name__)
slist.append(separator)
# Information block containing Metadata
if self._meta:
slist.append('Metadata:')
printer(self._meta, slist)
# statistics block about the length, composition, gc
slist.append('Summary:')
printer(self._getstats(), slist)
# Sequence block
slist.append(separator)
slist.append('Sequence:')
# show all lines for 'small' sequences <= 360nt
if self._length <= 360:
slist.extend(chunk(self._seq, start=0, end=self._length))
# only show the first 3 lines and the last 3 lines for 'large' sequences
else:
# define starting and ending points
end_of_first, begin_of_last, = 3 * 60, (self._length // 60 - 2) * 60
# first 3 lines
slist.extend(chunk(self._seq, start=0, end=end_of_first))
# ... separator
slist.append('{:>5}'.format('...'))
# last 3 lines
slist.extend(chunk(self._seq, start=begin_of_last, end=self._length))
# concat strings to block of lines
slist.append(separator)
return '\n'.join(slist)
# TODO: representation of the sequence-like objects
# other _repr_*_() methods: svg, png, jpeg, javascript, latex, pdf, ...
# None moves it back to __repr__ by default
def _repr_html_(self):
"""
A block representation for a sequence-like object into the terminal as html
Returns
-------
str
entire block representation of a sequence-like object as html
"""
# get the stats of the object
stats = self._getstats()
del stats['Composition']
# check if sequence can be double-stranded
if self._isdoublestranded():
complementary_seq = self.complement()
else:
complementary_seq = ""
html = """
<div id="SequenceObject{token}">
<script>
// global variables
var sequence{token} = "{seq}";
var complement{token} = "{compl_seq}";
var matcherSequence{token} = "";
var positions{token} = [];
var currentPosition{token} = 0;
var currentMotif{token} = "";
// dynamically generate elements at the beginning
function generate{token}() {{
// Statistics part 1: Composition
var composition = {comp};
var sortedCompKeys = Object.keys(composition).sort();
var compDiv = document.createElement('div');
compDiv.setAttribute("id", "showcomposition{token}");
compDiv.innerHTML = "Composition:";
document.getElementById('Statistics{token}').appendChild(compDiv);
for (let i in sortedCompKeys){{
let iDiv = document.createElement('div');
let key = sortedCompKeys[i];
let value = composition[key];
iDiv.innerHTML = '[' + String(key) + ' : ' + String(value) + ']';
iDiv.style.display = "inline-block";
iDiv.style.margin = "1px 5px";
document.getElementById('showcomposition{token}').appendChild(iDiv);
}};
// Statistics part 2: Rest
var statistics = {stats};
var sortedStatsKeys = Object.keys(statistics).sort();
for (let i in sortedStatsKeys){{
let iDiv = document.createElement('div');
let key = sortedStatsKeys[i];
let value = statistics[key];
iDiv.innerHTML = String(key) + ' : ' + String(value);
document.getElementById('Statistics{token}').appendChild(iDiv);
}};
document.getElementById('Statistics{token}').appendChild(document.createElement('br'));
// Metainformation part
var metainfo = {metainfo};
var sortedMetaKeys = Object.keys(metainfo).sort();
for (let i in sortedMetaKeys){{
let iDiv = document.createElement('div');
let key = sortedMetaKeys[i];
let value = metainfo[key];
iDiv.innerHTML = String(key) + ' : ' + String(value);
document.getElementById('Metainformation{token}').appendChild(iDiv);
}};
document.getElementById('Metainformation{token}').appendChild(document.createElement('br'));
// Positions at Sequence
document.getElementById('showposition{token}').innerHTML = chunkPos(0);
showSequence(sequence{token}, complement{token}, matcherSequence{token}, 0, '{token}');
}};
generate{token}();
// chunk the sequence into chunks of 10
function chunkSeq(seq, pos) {{
var a = [];
for (let i = 0; i < {block_length}; i++) {{
var start = pos + i*10;
var end = pos + (i+1)*10;
a.push(seq.slice(start, end));
}};
return a.join(' ')
}};
// chunk the position points into a nice string
function chunkPos(position) {{
var a = [];
for (let i = 0; i < {block_length}; i++) {{
var posString = (position + i*10).toString();
var spaces = ' '.repeat({block_length} - posString.length + 1);
a.push(posString);
a.push(spaces);
}};
return a.join(' ')
}};
// make a list of all occurrences of a motif in the sequence
function getIndicesOf(seq, motif) {{
motif = motif.toUpperCase();
if (motif.length == 0) {{return [];}};
var startIndex{token} = 0;
var index{token};
var indices{token} = [];
while ((index{token} = seq.indexOf(motif, startIndex{token})) > -1) {{
indices{token}.push(index{token});
startIndex{token} = index{token} + 1;
}};
return indices{token};
}};
// make a matching string that displays the positions of the motif
function matchMotif(listOfMotifs, motifLength, seqLength) {{
var blocks = [];
var start = 0;
for (var i = 0; i < listOfMotifs.length; i++) {{
var end = listOfMotifs[i];
var gap = end - start;
if (gap >= 0) {{
blocks.push(' '.repeat(gap));
blocks.push('#'.repeat(motifLength));
}} else {{
//repeats
blocks.push('#'.repeat(motifLength + gap));
}};
start = end + motifLength;
}};
blocks.push(' '.repeat(seqLength-start));
return blocks.join('')
}};
// displays the sequence and its positions
function showSequence(sequence, complement, matchseq, position, token) {{
document.getElementById('setposition' + token).value = position;
document.getElementById('showsequence1' + token).innerHTML = chunkSeq(sequence, position);
document.getElementById('showsequence2' + token).innerHTML = chunkSeq(complement, position);
document.getElementById('showposition' + token).innerHTML = chunkPos(position);
document.getElementById('matcher' + token).innerHTML = chunkSeq(matchseq, position);
}};
// display the next motif number
function showMotifNumber(token, current, max) {{
current += 1;
var object = document.getElementById('motifnumber' + token);
object.style.visibility = 'visible';
object.innerHTML = current.toString() + '/' + max.toString();
}};
// enable the view to the current navigation site
function enable(activate, token){{
document.getElementById('Sequence' + token).style.display = 'none';
document.getElementById('Statistics' + token).style.display = 'none';
document.getElementById('Metainformation' + token).style.display = 'none';
document.getElementById(activate + token).style.display = 'block';
}};
// On change sequence Range: Change the sequence view
document.getElementById("setposition{token}").oninput = function() {{
currentPosition{token} = parseInt(document.getElementById("setposition{token}").value);
showSequence(sequence{token}, complement{token}, matcherSequence{token},
currentPosition{token}, '{token}');
}};
// On Input Motif: Find all positions of motif and show the first occurence
document.getElementById("motif{token}").oninput = function() {{
currentMotif{token} = document.getElementById("motif{token}").value;
currentPosition{token} = 0;
positions{token} = getIndicesOf(sequence{token}, currentMotif{token});
if (positions{token}.length != 0) {{
document.getElementById("matcher{token}").style.display = 'block';
matcherSequence{token} = matchMotif(positions{token}, currentMotif{token}.length,
sequence{token}.length);
var goTo = positions{token}[currentPosition{token}];
// handle right sequence boundary (maximum range_max)
if (goTo > {range_max}) {{
goTo = {range_max};
}};
showSequence(sequence{token}, complement{token}, matcherSequence{token}, goTo, '{token}');
showMotifNumber('{token}', currentPosition{token}, positions{token}.length);
}} else {{
document.getElementById("showposition{token}").innerHTML = 'motif not found';
showMotifNumber('{token}', -1, 0);
document.getElementById("matcher{token}").style.display = 'none';
}};
if (currentMotif{token}.length == 0) {{
showSequence(sequence{token}, complement{token}, matcherSequence{token}, 0, '{token}');
document.getElementById("motifnumber{token}").style.visibility = 'hidden';
document.getElementById("matcher{token}").style.display = 'none';
}};
}};
// On Enter: go to the first match position
document.getElementById("motif{token}").onkeypress = function(event) {{
// move to start with <Enter>(13).
if (event.keyCode == 13) {{
currentPosition{token} = 0;
if (positions{token}.length != 0) {{
var goTo = positions{token}[currentPosition{token}];
// handle right sequence boundary (maximum range_max)
if (goTo > {range_max}) {{
goTo = {range_max};
}};
showSequence(sequence{token}, complement{token}, matcherSequence{token}, goTo, '{token}');
showMotifNumber('{token}', currentPosition{token}, positions{token}.length);
}} else {{
showSequence(sequence{token}, complement{token}, matcherSequence{token}, 0, '{token}');
}};
}};
}};
// On click right: set the match position one to the right
document.getElementById("nextmotif{token}").onclick = function() {{
if (positions{token}.length != 0) {{
currentPosition{token} = (currentPosition{token} + 1) % positions{token}.length;
var goTo = positions{token}[currentPosition{token}];
// handle right sequence boundary (maximum range_max)
if (goTo > {range_max}) {{
goTo = {range_max};
}};
showSequence(sequence{token}, complement{token}, matcherSequence{token}, goTo, '{token}');
showMotifNumber('{token}', currentPosition{token}, positions{token}.length);
}} else if (currentMotif{token}.length == 0) {{
currentPosition{token} = (currentPosition{token} + 1) % ({range_max} + 1);
showSequence(sequence{token}, complement{token}, matcherSequence{token},
currentPosition{token}, '{token}');
}} else {{
document.getElementById("showposition{token}").innerHTML = 'motif not found';
}};
}};
// On click left: set the match position one to the left
document.getElementById("prevmotif{token}").onclick = function() {{
if (positions{token}.length != 0) {{
currentPosition{token} = (currentPosition{token} - 1) % positions{token}.length;
// handle negative array indexing
if (currentPosition{token} < 0) {{
currentPosition{token} = positions{token}.length - 1;
}};
var goTo = positions{token}[currentPosition{token}];
// handle right sequence boundary (maximum range_max)
if (goTo > {range_max}) {{
goTo = {range_max};
}};
showSequence(sequence{token}, complement{token}, matcherSequence{token}, goTo, '{token}');
showMotifNumber('{token}', currentPosition{token}, positions{token}.length);
}} else if (currentMotif{token}.length == 0) {{
currentPosition{token} = (currentPosition{token} - 1) % {range_max};
if (currentPosition{token} < 0) {{
currentPosition{token} = {range_max};
}};
showSequence(sequence{token}, complement{token}, matcherSequence{token},
currentPosition{token}, '{token}');
}} else {{
document.getElementById("showposition{token}").innerHTML = 'Weird error';
}};
}};
</script>
<style>
div.navwrepper {{ padding-left: 20px;
display: none;
border-color: rgb(220, 220, 220);
border-style: none solid solid solid;
border-width: 1px;
}}
h2 {{ color: rgb(46, 123, 179);}}
pre.match {{
}}
</style>
<h2>{object}</h2>
<br>
<div name="Navigation Bar" >
<ul class="nav nav-tabs">
<li class="">
<a data-toggle="tab" onclick="enable('Sequence', '{token}')">Sequence</a>
</li>
<li class="">
<a data-toggle="tab" onclick="enable('Statistics', '{token}')">Statistics</a>
</li>
<li class="">
<a data-toggle="tab" onclick="enable('Metainformation', '{token}')">Metainformation</a>
</li>
</ul>
</div>
<div name="Sequence" id="Sequence{token}" class="navwrepper">
<br>
<div name="Motif Search">
Motif:
<input type="text" name="motif" id="motif{token}" autocomplete="off">
<button name="prevmotif" id="prevmotif{token}">
<i class="fa-arrow-left fa"></i>
</button>
<button name="Next Motif" id="nextmotif{token}">
<i class="fa-arrow-right fa"></i>
</button>
<button name="Motif Number" id="motifnumber{token}" style="visibility:hidden;" disabled>#/#</button>
</div>
<br>
<div name="Sequence Slider">
Sequence:
<input type="range" name="Set Position" id="setposition{token}" min="0" max="{range_max}" step="1"
value="0" style="width:66.6%;display:inline-block;">
</div>
<br>
<pre name="Show Position" id="showposition{token}"></pre>
<pre name="Show Sequence 1" id="showsequence1{token}"></pre>
<pre name="Show Match" id="matcher{token}" class="match" style="color:red;display:none;"></pre>
<pre name="Show Sequence 2" id="showsequence2{token}"></pre>
<br>
</div>
<div name="Statistics" id="Statistics{token}" class="navwrepper">
<br>
</div>
<div name="Metainformation" id="Metainformation{token}" class="navwrepper">
<br>
</div>
</div>
""".format(seq=self._seq,
compl_seq=complementary_seq,
block_length=8,
range_max=len(self._seq) - 10 * 8,
token=random.getrandbits(32),
comp=dict(self._counter),
stats=stats,
metainfo=dict(self._meta),
object=self.__class__.__name__,
)
return html
def __str__(self):
"""
A formatted string representation for a particular object when used in a print statement
Returns
-------
str
sequence string that represents the biological sequence of that object
Examples
--------
>>> print(Sequence('ABC'))
"""
return self._seq
def __setitem__(self, key, value):
"""
Dictionary-like setting of new additional metainformation to the object
Parameters
----------
key : str
The `key` to which the `value` is matched
value : any
The `value` can be anything one desires to store in connection with that instance
Examples
--------
>>> Sequence('ABC')['key'] = 'value'
"""
self._meta[key] = value
def __delitem__(self, key):
"""
Dictionary-like deleting of deprecated metainformation from the object
Parameters
----------
key : str
The `key` that shell be removed from the additional metainformation
Examples
--------
>>> del Sequence('ABC')['key']
"""
del self._meta[key]
def __missing__(self, key):
"""
Called when user tries to access a piece of metainformation that does not exist in the object
Parameters
----------
key : str
The `key` that was mistakenly used to pull metainformation
"""
msg = 'You try to access metainformation that does not exist! Check your key: {}'.format(key)
warnings.warn(msg, UserWarning)
return None
def __getitem__(self, key):
"""
Sequence slicing and dictionary-like access to sequence-associated metainformation
If `key` is a slice object or int, it will retrieve the specified sequence part according to the slice
operation. If `key` is a string object it will return sequence-associated metainformation specified with the.
Parameters
----------
key : slice, int, str
The `key` which shell be used to retrieve the desired metainformation from the object
Returns
-------
any, str
Anything that was stored to the `key` gets returned or a string view on the Sequence
Raises
------
IndexError
If slicing or integer is out of range for that underlying sequence
TypeError
If `key` is of a type other than slice, integer, string
Examples
--------
>>> a_base = Sequence('ABCD')[2] # answer is 'C'
>>> a_slice = Sequence('ABCD')[1:3] # answer is 'BC'
>>> id_number = Sequence('ABCD', id=123456789)['id'] # answer is 123456789
"""
# Handle sequence slicing with python's slicing object. Return a string view on the sequence
if isinstance(key, slice) or isinstance(key, int) or isinstance(key, np.integer):
try:
return self._seq[key]
except IndexError:
raise IndexError('sequence index out of range')
# Handle dict-like metainformation access from self._meta
elif isinstance(key, str):
if key in self._meta:
return self._meta[key]
else:
return self.__missing__(key)
else:
raise TypeError("key must be of int, slice, or str")
def __eq__(self, other):
"""
Compare two objects of same kind with each other whether they are equal or not
Sequence equality is based upon equality of the underlying sequence and the objects have to be of similar type.
Parameters
----------
other : Sequence-like
An `other` instance of an Sequence-like class to which the underlying sequence gets tested
Returns
-------
bool
True if equal or False if unequal
Examples
--------
>>> Sequence('ABC') == Sequence('ABC')
"""
if self.__class__ != other.__class__:
return False
return self._seq == str(other)
def __ne__(self, other):
"""
Compare two objects of same kind with each other whether they are unequal or not
Parameters
----------
other : Sequence-like
An `other` instance of an Sequence-like class to which the underlying sequence gets tested
Returns
-------
bool
True if unequal or False if equal
Examples
--------
>>> Sequence('ABC') != Sequence('DEF')
"""
if self.__class__ != other.__class__:
return True
return self._seq != str(other)
def __add__(self, other):
"""
Concatenation of two similar Sequence-like objects
Parameters
----------
other : Sequence-like
An `other` instance of an similar Sequence-like class
Returns
-------
Sequence-like object
A new Sequence-like object with the concatenated sequence. No metainformation gets transferred whatsoever
Raises
------
TypeError
If `other` is not an instance of the same type as self
Examples
--------
>>> new = Sequence('ABC') + Sequence('DEF')
"""
if self.__class__ != other.__class__:
error_message = "Sequence concatenation is only possible if both sequences are of similar type. " \
"Found: {} + {}".format(self.__class__.__name__, other.__class__.__name__)
raise TypeError(error_message)
return self.__class__(''.join([self._seq, str(other)]))
def __radd__(self, other):
"""
Concatenation of two similar Sequence-like objects
Parameters
----------
other : Sequence-like
An `other` instance of an similar Sequence-like class
Returns
-------
Sequence-like object
A new Sequence-like object with the concatenated sequence. No metainformation gets transferred whatsoever
Raises
------
TypeError
If `other` is not an instance of the same type as self
Examples
--------
>>> new = Sequence('ABC') + Sequence('DEF')
"""
if self.__class__ != other.__class__:
error_message = "Sequence concatenation is only possible if both sequences are of similar type. " \
"Found: {} + {}".format(other.__class__.__name__, self.__class__.__name__, )
raise TypeError(error_message)
return self.__class__(''.join([self._seq, str(other)]))
def __iadd__(self, other):
"""
Concatenation of two similar Sequence-like objects
Parameters
----------
other : Sequence-like
An `other` instance of an similar Sequence-like class
Returns
-------
Sequence-like object
A new Sequence-like object with the concatenated sequence. No metainformation gets transferred whatsoever
Raises
------
TypeError
If `other` is not an instance of the same type as self
Examples
--------
>>> var = Sequence('ABC')
>>> var += Sequence('DEF')
"""
if self.__class__ != other.__class__:
error_message = "Sequence concatenation is only possible if both sequences are of similar type. " \
"Found: {} =+ {}".format(self.__class__.__name__, other.__class__.__name__)
raise TypeError(error_message)
return self.__class__(''.join([self._seq, str(other)]))
def __mul__(self, n):
"""
Repeating a Sequence multiple times
Parameters
----------
n : int
The number 'n' times the sequence shell be repeated
Returns
-------
Sequence-like object
A new Sequence-like object with the `n` times repeated sequence.
No metainformation gets transferred whatsoever.
Raises
------
TypeError
If `n` is not an integer
Examples
--------
>>> new = Sequence('ABC') * 4
"""
if not isinstance(n, int):
error_message = "Sequence repetition is only possible with an integer. " \
"Found: {} * {}".format(self.__class__.__name__, n.__class__.__name__)
raise TypeError(error_message)
return self.__class__(n * self._seq)
def __rmul__(self, n):
"""
Repeating a Sequence multiple times
Parameters
----------
n : int
The number 'n' times the sequence shell be repeated
Returns
-------
Sequence-like object
A new Sequence-like object with the `n` times repeated sequence.
No metainformation gets transferred whatsoever.
Raises
------
TypeError
If `n`is not an integer
Examples
--------
>>> new = 4 * Sequence('ABC')
"""
if not isinstance(n, int):
error_message = "Sequence repetition is only possible with an integer. " \
"Found: {} * {}".format(n.__class__.__name__, self.__class__.__name__)
raise TypeError(error_message)
return self.__class__(n * self._seq)
def __imul__(self, n):
"""
Repeating a Sequence multiple times
Parameters
----------
n : int
The number 'n' times the sequence shell be repeated
Returns
-------
Sequence-like object
A new Sequence-like object with the `n` times repeated sequence.
No metainformation gets transferred whatsoever.
Raises
------
TypeError
If `n`is not an integer
Examples
--------
>>> var = Sequence('ABC')
>>> var *= 4
"""
if not isinstance(n, int):
error_message = "Sequence repetition is only possible with an integer. " \
"Found: {} =* {}".format(self.__class__.__name__, n.__class__.__name__)
raise TypeError(error_message)
return self.__class__(n * self._seq)
def __len__(self):
"""
Length determination of the sequence
Returns
-------
int
number of characters in the sequence (length)
Examples
--------
>>> len(Sequence('ABC'))
"""
return self._length
def __contains__(self, other):
"""
Pythonic way to check whether a Sequence-like object is a substring of an other Sequence-like object
Parameters
----------
other : Sequence-like
An `other` instance of an similar Sequence-like class
Returns
-------
bool
True if `other` is indeed in the sequence; else False
Raises
------
TypeError
If `other` is not an instance of the same type as self
Examples
--------
>>> Sequence('A') in Sequence('ABC')
"""
if self.__class__ != other.__class__:
error_message = "Subsequence search is only possible if both sequences are of similar type. " \
"Found: {} in {}".format(other.__class__.__name__, self.__class__.__name__)
raise TypeError(error_message)
return str(other) in self._seq
def __iter__(self):
"""
Efficient iteration over characters of the underlying sequence
Yields
------
str
A single character of the sequence
Examples
--------
>>> for char in Sequence('ABC'): print(char)
"""
for char in self._seq:
yield char
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [char]': self._length,
'Composition': self._counter}
@staticmethod
def _isdoublestranded():
"""
Can the object be double-stranded
Returns
-------
None
A generic sequence does not know a double-stranded state
"""
return None
def count(self):
"""
Analysis of the composition of alphabet characters within the sequence
Returns
-------
Counter
A Counter object storing the composition
Examples
--------
>>> c = Sequence('ABC').count()
"""
return self._counter
def find(self, motif, overlap=True, start=0):
"""
Find the occurrences of an string motif in the sequence
Parameters
----------
motif : str
A motif that is searched against the sequence
overlap : bool, optional
A flag parameter to specify whether to include overlaps or not (default is True)
start : int, optional
A starting index other then the beginning of the sequence (default is 0)
Returns
-------
list
A list of integers referencing the starting indexes of each motif occurrence in the sequence
Examples
--------
>>> lst = Sequence('ABCD').find('CD')
"""
# use an iterator for each next str.find index (efficient!!)
def motif_iterator(inner_motif, dna, start_at=0, shift_to=1):
while True:
found_at = dna.find(inner_motif, start_at)
if found_at == -1:
break
else:
yield found_at
start_at = found_at + shift_to
# handle overlap to set the shift_to courser to the right next position
# return a list of all found_at indexes of a motif's beginning in seq
if overlap:
return [pos for pos in motif_iterator(motif, self._seq, start, shift_to=1)]
else:
return [pos for pos in motif_iterator(motif, self._seq, start, shift_to=len(motif))]
def digest(self, means, sort=True, as_str=False):
"""
Sequence digestion into smaller sequence fragments
Parameters
----------
means : pattern
The 'means' by which the sequence gets fragmented is a regex pattern to define cutting sites in the sequence
sort : bool, optional
A flag parameter to specify whether the retruned list shell be sorted or not (default is True)
as_str : bool, optional
A flag parameter to specify the type of the fragment Sequence as string (default is False -> Sequence-like)
Returns
-------
list
a list of sequence fragments either as string or sequence-like object
Examples
--------
>>> fragments = Sequence('ABCDE').digest(r'[C]')
"""
# a sorted list of all unique fragment strings
fragments = list({*re.split(means, self._seq)})
# if sorted is True: sort the fragments by length in reversed order
if sort:
fragments = sorted(fragments, key=len, reverse=True)
# if as string is True: return the fragments simply as strings; else: as *Sequence objects
if as_str:
return fragments
return [self.__class__(f) for f in fragments]
@classmethod
def read_fasta(cls, filepath):
"""
Reads the specified sequences from fasta file
Parameters
----------
filepath : str
the filepath to the *.fasta file
Returns
-------
list
A list of sequences that were parsed in
Examples
--------
>>> list_of_sequences = Sequence.read_fasta('path/to/file.fasta')
"""
# regex for finding a sequence block consisting of head and sequence
block_pattern = re.compile(r"(?P<head> >.+) \n (?P<seq> [^>]*)", re.X)
# regex for extracting all information from head
head_pattern = re.compile(r"""# head token
>
# non-capturing and optional group for the following identifiers
(?:
# Gene index
(=?gi)
\| (?P<gi_number>[\d]+) \|
# GenBank
(?: gb
\| (?P<gb_accession>[\w\.]+)
\| (?P<gb_locus>[\w\.]+)? \s
# EMBL Data Library
| emb
\| (?P<emb_accession>[\w\.]+)
\| (?P<emb_locus>[\w\.]+)? \s
# DDBJ, DNA Database of Japan
| dbj
\| (?P<dbj_accession>[\w\.]+)
\| (?P<locus_accession>[\w\.]+)? \s
)
# NBRF PIR
|(=?pir)
\|\| (?P<pir_entry>.+?) \s
# Protein Research Foundation
|(=?prf)
\|\| (?P<prf_name>.+?) \s
# SWISS-PROT
|(=?sp)
\| (?P<sp_accession>.+)
\| (?P<sp_name>.+?) \s
# TrEMBL
|(=?tr)
\| (?P<tr_accession>.+)
\| (?P<tr_name>.+?) \s
# Protein Data Bank
|(=?pdb)
\| (?P<pdb_entry>.+)
\| (?P<pdb_chain>.+?) \s
# Patents
|(=?pat)
\| (?P<pat_country>.+)
\| (?P<pat_number>.+?) \s
# test reference
|(=?test)(?P<test_id>.+) \.
)?
# Isoform check
(?: (=?\bIsoform\b) (?P<isoform>.+) of)?
# name will consume everything to the end$ if known identifiers are not provided
# that are inparticular the tokens (OS=, GN=, PE=, SV=)
(?:(?P<name>.+?(?=(?:OS|GN|PE|SV)=|$)))
# OrganismName is the scientific name of the organism of the UniProtKB entry
(?:OS=(?P<species>.+?(?=(?:GN|PE|SV)=|$)))?
# GeneName is the first gene name of the UniProtKB entry
(?:GN=(?P<gene_name>.+?(?=(?:PE|SV)=|$)))?
# ProteinExistence is describing the evidence for the existence of the protein
(?:PE=(?P<evidence>.+?(?=(?:SV=|$))))?
# SequenceVersion is the version number of the sequence
(?:SV=(?P<version>.+))?
""", re.X)
# stream the file located at filepath
with open(filepath) as file:
lst = []
# iterate over all matched sequence blocks
for block_match in re.finditer(block_pattern, file.read()):
# extract the head information
head_match = re.match(head_pattern, block_match.group('head'))
if head_match:
# filter all None items out of the head match
head = {k: v.strip() for k, v in head_match.groupdict().items() if v is not None}
# clean the sequence from whitespace and change alphabet symbols to upper case
seq = block_match.group('seq').replace('\n', '').replace(' ', '').upper()
lst.append(cls(seq, **head))
# if not a single sequence got found return None
if not lst:
return None
# else return list of sequences
return lst
def write_fasta(self, path, mode='w'):
"""
write the sequence to file in fasta format
Parameters
----------
path: str
relative path to the file
mode: str, optional
writing mode (default 'w')
'w' -> Overwrites the file if the file exists. If the file does not exist, creates a new file for writing.
'a' -> Appends sequence to the file's end if the file exists. If the file does not exit, creates a new file.
Examples
--------
>>> Sequence.write_fasta("path/to/file.fasta")
"""
def write_token(lst, name, symbol):
if name in self._meta:
lst.append(' {}={}'.format(symbol, self._meta[name]))
# identifiers:
identifier = []
# Gene index:
if 'gi_number' in self._meta:
identifier = ['gi|{}'.format(self._meta['gi_number'])]
# from GenBank
if 'gb_accession' in self._meta:
identifier.append('|{}'.format(self._meta['gb_accession']))
if 'gb_locus' in self._meta:
identifier.append('|{}'.format(self._meta['gb_locus']))
# from EMBL Data Library
elif 'emb_accession' in self._meta:
identifier.append('|{}'.format(self._meta['emb_accession']))
if 'emb_locus' in self._meta:
identifier.append('|{}'.format(self._meta['emb_locus']))
# from DDBJ (DNA Database of Japan)
elif 'emb_accession' in self._meta:
identifier.append('|{}'.format(self._meta['emb_accession']))
if 'emb_locus' in self._meta:
identifier.append('|{}'.format(self._meta['emb_locus']))
# NBRF PIR
elif 'pir_entry' in self._meta:
identifier.append('pir||{}'.format(self._meta['pir_entry']))
# Protein Research Foundation
elif 'prf_name' in self._meta:
identifier.append('prf||{}'.format(self._meta['prf_name']))
# SWISS-PROT
elif 'sp_accession' in self._meta:
identifier.append('sp|{}|{}'.format(self._meta['sp_accession'], self._meta['sp_name']))
# TrEMBL
elif 'tr_accession' in self._meta:
identifier.append('tr|{}|{}'.format(self._meta['tr_accession'], self._meta['tr_name']))
# Protein Data Bank
elif 'pdb_entry' in self._meta:
identifier.append('pdb|{}|{}'.format(self._meta['pdb_entry'], self._meta['pdb_chain']))
# Patents
elif 'pat_country' in self._meta:
identifier.append('pat|{}|{}'.format(self._meta['pat_country'], self._meta['pat_number']))
# test reference
elif 'test_id' in self._meta:
identifier.append('test{}.'.format(self._meta['test']))
# Isoform
isoform = []
if 'isoform' in self._meta:
isoform.append(' Isoform {} of'.format(self._meta['isoform']))
# special token
token = []
# OrganismName is the scientific name of the organism of the UniProtKB entry
write_token(token, 'species', 'OS')
# GeneName is the first gene name of the UniProtKB entry
write_token(token, 'gene_name', 'GN')
# ProteinExistence is describing the evidence for the existence of the protein
write_token(token, 'evidence', 'PE')
# SequenceVersion is the version number of the sequence
write_token(token, 'version', 'SV')
head = ['>']
head.extend(identifier)
head.extend(isoform)
head.extend(' {}'.format(self._meta['name']))
head.extend(token)
with open(path, mode) as file:
file.write(''.join(head))
file.write('\n')
file.write(self._seq)
file.write('\n')
class NucleotideSequence(Sequence):
"""
A Nucleotide Sequence is a data structure for DNA and RNA sequences and its associated metainformation.
The Nucleotide Sequence is the parent class for DNA and RNA Sequences as well as is a descendant of Sequence.
This means that a Nucleotide Sequence summarizes methods common to both DNA and RNA. It also behaves as one would
expect from a regular Sequence type.
Attributes
----------
seq : str
sequence represents the biological sequence as string
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
atgc()
Analysis of the guanine and cytosine extent (GC) of the nucleotide sequence
count()
Analyze the composition of alphabet characters within the nucleotide sequence
find(motif)
Find the occurrences of an string motif in the nucleotide sequence
digest(means)
Digest the nucleotide sequence into smaller nucleotide sequence fragments by chosen means
gc()
Analyze the guanine and cytosine content (GC) of the nucleotide sequence
reverse()
Reverse the nucleotide sequence
complement(base_pairing)
Complement the nucleotide sequence based on base pairing mapper
reverse_complement(base_pairing)
Reverse and Complement the nucleotide sequence based on base pairing
translate(codon)
Translate nucleotide Sequence into a protein Sequence
read_*(path)
read the sequence from a file
"""
def __init__(self, sequence, **metainformation):
"""
Initialization of an NucleotideSequence instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute
metainformation : **dict, optional
metainformation contains all other data that gets passed as keyword arguments. There is no limitation as
to how many items can be passed.
"""
super(NucleotideSequence, self).__init__(sequence, **metainformation)
alphabet = NUCLEOTIDE_ALPHABET
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [bp]': self._length,
'Composition': self._counter,
'GC [%]': self.gc(ndigits=4) * 100,
'AT/GC': self.atgc()}
def gc(self, ndigits=2):
"""
Analysis of the guanine and cytosine content (GC) of the nucleotide sequence
Returns
-------
float
the ratio of guanine and cytosine in the nucleotide sequence
References
----------
https://en.wikipedia.org/wiki/GC-content
Examples
--------
>>> gc = NucleotideSequence('ACGT').gc()
"""
if not self._length:
msg = "Your sequence can't be used to calculated a at/gc ration. REASON: length is 0"
warnings.warn(msg, UserWarning)
return None
return round((self._counter['C'] + self._counter['G']) / self._length, ndigits)
def atgc(self, ndigits=2):
"""
Analysis of the GC-base pairs excess relative to the amount of AT-bases in the nucleotide sequence
Returns
-------
[float, None]
the ratio of guanine and cytosine relative to adenine and thymine
None, if its not applicable to the sequence
References
----------
https://en.wikipedia.org/wiki/GC-content
Examples
--------
>>> gc = NucleotideSequence('ACGT').atgc()
"""
# TODO: Think about possibilities to include ambiguous bases. Maybe as flag? including them or not T/F?
numerator = (self._counter['A'] + self._counter['T'] + self._counter['U'])
denominator = (self._counter['C'] + self._counter['G'])
if not numerator or not denominator:
msg = "Your sequence can't be used to calculated a at/gc ration." \
"REASON: sequence does not contain any (AT) or (GC)"
warnings.warn(msg, UserWarning)
return None
return round(numerator / denominator, ndigits)
def reverse(self):
"""
Building of the reverse nucleotide sequence from the nucleotide sequence
Returns
-------
Sequence-like
The reversed nucleotide sequence as a new object, but metainformation is copied to the new object
Examples
--------
>>> rev = NucleotideSequence('ACGT').reverse()
"""
return self.__class__(self._seq[::-1], **self._meta)
def complement(self, base_pairing):
"""
Building of the complement nucleotide sequence from the nucleotide sequence
Parameters
----------
base_pairing : dict
A mapping that maps one base to its corresponding base
Returns
-------
Sequence-like
The complement nucleotide sequence as a new object, but metainformation is copied to the new object
Examples
--------
>>> dna = NucleotideSequence('ACGT').complement(DNA_BASE_PAIRING)
>>> rna = NucleotideSequence('ACGU').complement(RNA_BASE_PAIRING)
"""
table = str.maketrans(base_pairing)
return self.__class__(self._seq.translate(table), **self._meta)
def reverse_complement(self, base_pairing):
"""
Building of the reverse complement of the nucleotide sequence
Parameters
----------
base_pairing : dict
A mapping that maps one base to its corresponding base
Returns
-------
Sequence-like
The reverse-complement nucleotide sequence as a new object, but metainformation is copied to the new object
Examples
--------
>>> dna = NucleotideSequence('ACGT').reverse_complement(DNA_BASE_PAIRING)
>>> rna = NucleotideSequence('ACGU').reverse_complement(RNA_BASE_PAIRING)
"""
table = str.maketrans(base_pairing)
return self.__class__(self._seq.translate(table)[::-1], **self._meta)
def translate(self, codon, from_start=False):
"""
Translation of the nucleotide sequence into a protein Sequence
Parameters
----------
codon : dict
A mapping that maps a nucleotide tripplet to its corresponding amino acid
from_start : bool, optional
A flag parameter to specify whether translation shell start at index 0 or at the start codon Methionine
(default is False which means that translation starts at the start codon)
Returns
-------
ProteinSequence
A new protein sequence object that got all metainformation passed, though
Examples
--------
>>> prot1 = NucleotideSequence('ATGC').translate(DNA_CODONS)
>>> prot2 = NucleotideSequence('AUGC').translate(RNA_CODONS)
"""
start = -1
# start from the beginning
if from_start:
start = 0
# start with first occurring start codon for Met: AUG or ATG
else:
if isinstance(self, DNASequence):
start = self._seq.find('ATG')
if isinstance(self, RNASequence):
start = self._seq.find('AUG')
if start == -1:
return ProteinSequence('')
# only iterate to the last possible codon
end = self._length - (self._length - start) % 3
protein_seq = ''
for i in range(start, end, 3):
# map the triplet to amino acid sequence
amino_acid = codon[self._seq[i:i + 3]]
# if a stop codon has been reached: break
if amino_acid == '*':
break
protein_seq += amino_acid
return ProteinSequence(protein_seq, **self._meta)
class ProteinSequence(Sequence):
"""
A Protein Sequence is a data structure for protein sequences and its associated metainformation.
The Protein Sequence class is a descendant of Sequence.
This means that a Protein Sequence behaves as one would expect from a regular Sequence type.
Attributes
----------
seq : str
sequence represents the biological sequence as string
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
count()
Analyze the composition of alphabet characters within the sequence
find(motif)
Find the occurrences of an string motif in the sequence
digest(means)
Digest the sequence into smaller sequence fragments by chosen means
mass()
Calculate the monoisotopic mass of the entire protein Sequence
read_*(path)
read the sequence from a file
"""
alphabet = PROTEIN_ALPHABET
def __init__(self, sequence, **metainformation):
"""
Initialization of an ProteinSequence instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute
metainformation : **dict, optional
metainformation contains all other data that gets passed as keyword arguments. There is no limitation as
to how many items can be passed.
"""
super(ProteinSequence, self).__init__(sequence, **metainformation)
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [aa]': self._length,
'Composition': self._counter,
'Mass [Da]': self.mass(AMINOACIDS_MONO_MASS),
'pI': self.pI(AMINOACIDS_PKAS['Wikipedia'])}
@staticmethod
def _isdoublestranded():
"""
Can the object be double stranded
Returns
-------
bool
"""
return False
def mass(self, weight, ndigits=5):
"""
Calculation of the mass of the entire protein Sequence
Parameters
----------
weight : dict
a dictionary containing all masses of all amino acids.
ndigits : int, optional
the number of digits to round the mass float (default is 5 digits)
Returns
-------
float
the total mass of the protein sequence
Examples
--------
>>> peptide_mass = ProteinSequence('PEPTIDE').mass(AMINOACIDS_MONO_MASS)
"""
# The weight of an empty sequence is 0 Dalton
if self._length == 0:
return 0
# Refuse to calculate the weight when B or Z is present in the sequence
if 'B' in self._counter or 'Z' in self._counter:
msg = 'You sequence <{}> contains "B" ans/or "Z" chars. ' \
'It is not possible to calculate the mass from those.'.format(str(self))
warnings.warn(msg, UserWarning)
return None
# mass of terminal water
totalmass = weight['H2O']
# Calculate the residue masses
totalmass += sum([weight[aminoacid] * n for aminoacid, n in self._counter.items()])
return round(totalmass, ndigits)
def pI(self, pka_values, delta=10 ** -4, ndigits=2):
"""
Calculate the theoretical isoelectric point for the protein sequence
Calculate the pI is the pH value where the net charge of the sequence is 0. This pH value is found through a
bisect search algorithm. The net charge function calculates the protonation state according to the Henderson-
Hasselbalch equation [1] for a moiety that can protonate and deprotonate. The assumption here is that each
moiety acts independently from each other in an acid-base reaction, so that the molecule's net charge is the
mere sum of all acid-base moieties, which are the N-terminus:(RNH2), C-terminus:(RCOOH), C:(RSH), D:(RCOOH),
E:(RCOOH), H:(R2NH), K(RNH2), R(Guanidinyl), Y(ROH).
Parameters
----------
pka_values : dict
dict with pka values for the acid-base reaction
delta: float, optional
the cutoff value `delta` specifies who close the net charge is allowed to differ from 0
ndigits: int
the number of digits that will be returned
Returns
-------
float
the pI value for the given protein sequence
References
----------
[1] https://en.wikipedia.org/wiki/Henderson–Hasselbalch_equation
Examples
--------
>>> pI = ProteinSequence('DEGK').pI(AMINOACIDS_PKAS['Wikipedia'])
"""
def charge_state(ph, pkas, n):
"""
the charge of the protein is equivalent to the sum of the fractional charges of the protein’s charged groups
"""
def alpha(pka_value, ph_value):
"""
determine the degree of dissociation (=alpha value) according to the Henderson-Hasselbalch equation
"""
return 1 / (1 + pow(10, (pka_value - ph_value)))
# For ionizable groups that are able to deprotonate to a charge of -1 (e.g., OH & COOH),
# multiply the calculated dissociation constant by -1.
# For ionizable groups that are able to deprotonate to a charge of 0 (e.g, NH3+), take the complement of the
# dissociation constant(1-alpha) and multiply the constant by +1.
# The net charge of the amino acid will be the sum of the charges of all of the ionizable groups.
return sum([+ (1 - alpha(pkas['NH2'], ph)),
- alpha(pkas['COOH'], ph),
- alpha(pkas['C'], ph) * n['C'],
- alpha(pkas['D'], ph) * n['D'],
- alpha(pkas['E'], ph) * n['E'],
+ (1 - alpha(pkas['H'], ph)) * n['H'],
+ (1 - alpha(pkas['K'], ph)) * n['K'],
+ (1 - alpha(pkas['R'], ph)) * n['R'],
- alpha(pkas['Y'], ph) * n['Y'],
])
# define pH boundaries and set initial charge state at pH = 7.0
low, mid, high = 0.0, 7.0, 14.0
z = charge_state(mid, pka_values, self._counter)
# perform the bisect search until: z in (0 ± delta)
while abs(z) >= delta:
# positive charge means that mid pH is too low
if z > 0:
low, mid = mid, (mid + high) / 2
# negative charge means that mid pH is too high
else:
high, mid = mid, (mid + low) / 2
# calculate the new charge based on the new middle pH
z = charge_state(mid, pka_values, self._counter)
return round(mid, ndigits)
def scale(self, score, window, degree=0, edge=0, ndigits=3, normalized=False, plot=True):
"""
Scale is a function that applies a scoring schema to the sequence
Parameters
----------
score : dict
a score for every amino acid
window : int
the length of the window. Has to be an odd number
degree : int, optional
the degree describes the shape of the model that will be applied to the windows,
(default is 0 which means that each residue contributes equally to the window)
edge : float [0-1], optional
the edge sets the border value of the model in the window,
(default is 0 which means that the outermost value contributes with factor 0 to the window)
ndigits : int, optional
the number of decimals in the return array, (default is 3 which means x.xxx numbers)
normalized : bool, optional
flag to specify whether window should be normalization to [0:1] score range,
(default is False which means that the window value are not normalized)
plot : bool, optional
flag to activate plot output,
(default is True which means the window values gets plotted)
Returns
-------
np.array
all computed window values that have been weighted with the model(degree, edge)
"""
# generate the simple model
model_dist = np.ones(window)
# if degree is given shape the model according to model function
if degree:
mid_value = window // 2
compression_factor = (1 - edge)
for i in range(0, window):
model_dist[i] = 1 - (np.abs(i - mid_value) / mid_value) ** degree * compression_factor
# compute the model weight. If normalization is desired
model_weight = np.sum(model_dist)
# if 0 to 1 normalization is desired, normalize scores to 0 to 1 range:
if normalized:
max_value = max(score.values())
min_value = min(score.values())
z = dict()
for k, v in score.items():
z[k] = (v - min_value) / (max_value - min_value)
score = z
# map the string to scale array
s = np.zeros(self._length)
for i, c in enumerate(self._seq):
s[i] = score[c]
# apply the window over the scale array
w = np.zeros(self._length - window + 1)
for i in range(self._length - window + 1):
w[i] = round(np.sum(model_dist * s[i:i + window]) / model_weight, ndigits=ndigits)
# plot the scale over sequence
if plot:
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.set(xlabel='Sequence', ylabel='Scale')
ax.plot(w, label='name')
pyplot.show()
return w
class DNASequence(NucleotideSequence):
"""
A DNA Sequence is a data structure for DNA sequences and its associated metainformation.
The DNA Sequence class is a descendant of Sequence and more specific of Nucleotide Sequence.
This means that a DNA Sequence behaves as one would expect from a regular Nucleotide Sequence type.
Attributes
----------
seq : str
sequence represents the biological sequence as string
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
count()
Analyze the composition of alphabet characters within the DNA sequence
find(motif)
Find the occurrences of an string motif in the DNA sequence
digest(means)
Digest the DNA sequence into smaller nucleotide sequence fragments by chosen means
gc()
Analyze the guanine and cytosine content (GC) of the DNA sequence
reverse()
Reverse the DNA sequence
complement()
Complement the DNA sequence based on base pairing mapper
reverse_complement()
Reverse and Complement the DNA sequence based on base pairing
translate()
Translate DNA Sequence into a protein Sequence
read_*(path)
read the sequence from a file
"""
def __init__(self, sequence, **metainformation):
"""
Initialization of an DNASequence instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute
metainformation : **dict, optional
metainformation contains all other data that gets passed as keyword arguments. There is no limitation as
to how many items can be passed.
"""
super(DNASequence, self).__init__(sequence, **metainformation)
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [bp]': self._length,
'Composition': self._counter,
'GC [%]': self.gc(ndigits=4) * 100,
'AT/GC': self.atgc()}
@staticmethod
def _isdoublestranded():
"""
Can the object be double stranded
Returns
-------
bool
"""
return True
def complement(self, **kwargs):
return super(DNASequence, self).complement(DNA_BASE_PAIRING)
def reverse_complement(self, **kwargs):
return super(DNASequence, self).reverse_complement(DNA_BASE_PAIRING)
def translate(self, **kwargs):
return super(DNASequence, self).translate(DNA_CODONS, **kwargs)
def transcribe(self):
return RNASequence(self._seq.replace("T", "U"), **self._meta)
class RNASequence(NucleotideSequence):
"""
A RNA Sequence is a data structure for RNA sequences and its associated metainformation.
The RNA Sequence class is a descendant of Sequence and more specific of Nucleotide Sequence.
This means that a RNA Sequence behaves as one would expect from a regular Nucleotide Sequence type.
Attributes
----------
seq : str
sequence represents the biological sequence as string
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
count()
Analyze the composition of alphabet characters within the RNA sequence
find(motif)
Find the occurrences of an string motif in the RNA sequence
digest(means)
Digest the RNA sequence into smaller nucleotide sequence fragments by chosen means
gc()
Analyze the guanine and cytosine content (GC) of the RNA sequence
reverse()
Reverse the RNA sequence
complement()
Complement the RNA sequence based on base pairing mapper
reverse_complement()
Reverse and Complement the RNA sequence based on base pairing
translate()
Translate RNA Sequence into a protein Sequence
read_*(path)
read the sequence from a file
"""
def __init__(self, sequence, **metainformation):
"""
Initialization of an RNASequence instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute
metainformation : **dict, optional
metainformation contains all other data that gets passed as keyword arguments. There is no limitation as
to how many items can be passed.
"""
super(RNASequence, self).__init__(sequence, **metainformation)
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [bp]': self._length,
'Composition': self._counter,
'GC [%]': self.gc(ndigits=4) * 100,
'AT/GC': self.atgc()}
@staticmethod
def _isdoublestranded():
"""
Can the object be double stranded
Returns
-------
bool
"""
return False
def complement(self, **kwargs):
return super(RNASequence, self).complement(RNA_BASE_PAIRING)
def reverse_complement(self, **kwargs):
return super(RNASequence, self).reverse_complement(RNA_BASE_PAIRING)
def translate(self, **kwargs):
return super(RNASequence, self).translate(RNA_CODONS, **kwargs)
def reverse_transcribe(self):
return DNASequence(self._seq.replace("U", "T"), **self._meta)
class Primer(DNASequence):
"""
A Primer is a short oligonucleotid DNA Sequence data structure for Primers and its associated metainformation.
The Primer class is a descendant of DNA Sequence.
This means that a Primer Sequence behaves as one would expect from a regular DNA Sequence type.
Attributes
----------
seq : str
sequence represents the biological sequence as string. Not allowed to be longer than 50nts
meta : dict, optional
additional metainformation that describes the sequence in various ways
Methods
-------
count()
Analyze the composition of alphabet characters within the DNA sequence
find(motif)
Find the occurrences of an string motif in the DNA sequence
digest(means)
Digest the Primer sequence into smaller nucleotide sequence fragments by chosen means
gc()
Analyze the guanine and cytosine content (GC) of the DNA sequence
reverse()
Reverse the Primer sequence
complement()
Complement the Primer sequence based on base pairing mapper
reverse_complement()
Reverse and Complement the Primer sequence based on base pairing
translate()
Translate Primer Sequence into a protein Sequence
melting_temp()
Calculate the theoretical melting temperature
read_*(path)
read the sequence from a file
"""
def __init__(self, sequence, **metainformation):
"""
Initialization of Primer instance
Parameters
----------
sequence : str
sequence is the biological sequence as string that gets stored as `seq` attribute.
Not allowed to be longer than 50nts
metainformation : **dict, optional
metainformation contains all other metainformation that gets passed as keyword arguments.
There is no limitation as to how many items can be passed.
Raises
------
ValueError
If sequence is longer that 50nt
"""
if len(sequence) > 50:
raise ValueError('Sequence length is not allowed to be longer than 50nt. Use DNA Sequence class instead.')
super(Primer, self).__init__(sequence, **metainformation)
def _getstats(self):
"""
A class dependent collection of important data
Returns
-------
dict
a collection of relevant data
"""
return {'Length [bp]': self._length,
'Composition': self._counter,
'GC [%]': self.gc(ndigits=4) * 100,
'AT/GC': self.atgc(),
'Melting Temperatur [°C]': self.melting_temp()}
@staticmethod
def _isdoublestranded():
"""
Can the object be double stranded
Returns
-------
bool
"""
return False
def melting_temp(self, concentration=200, sodium=50, method='nearest-neighbor'):
"""
Calculate the theoretical melting temperature
The theoretical melting temperature TM in C° is the temperature at which half of the strands are in the
double-helical state and half are in the “random-coil” state. There exists several methods to calculate the TM
that all use different strategies to calculate the TM. Depending on the sequence, the values obtained from
different methods can drastically. Pleas make sure that you know why you use which method. The thermodynamical
nearest-neighbor approach is the most-widely accepted method to use.
Parameters
----------
concentration : float, optional
Primer concentration in nmol, (default is 200 nM)
sodium : float, optional
Na+ concentration in mMol, (default is 50 mM)
method : str, optional
Choose one of the following methods to calculate the TM:
['marmur', 'wallace', 'salt-adjusted', 'nearest-neighbor'], (default is 'nearest-neighbor')
Returns
-------
float
The theoretical melting temperature TM in C°
Raises
------
ValueError
If method is not one of the specified methods
Notes
-----
The melting temperature is defined as the temperature at which half of the strands are in the double-helical
state and half are in the “random-coil” state. It is an important parameter in Polymerase chain reactions (PCR).
It is critical to determine a proper temperature for the annealing step because efficiency and specificity are
strongly affected by the annealing temperature. This temperature must be low enough to allow for hybridization
of the primer to the strand, but high enough for the hybridization to be specific, i.e., the primer should bind
only to a perfectly complementary part of the strand, and nowhere else. If the temperature is too low, the
primer may bind imperfectly. If it is too high, the primer may not bind at all. A typical annealing temperature
is about 3–5 °C below the Tm of the primers used.
References
----------
[1] https://en.wikipedia.org/wiki/Polymerase_chain_reaction
[2] Marmur J and Doty P (1962) J Mol Biol 5:109-118
[3] Wallace RB et al. (1979) Nucleic Acids Res 6:3543-3557, PMID 158748
[4] Schildkraut et al. 1965, PMID 5889540 salt correction formulae
[5] SantaLucia 1998, PMID 9465037 thermodynamics & salt correction
Examples
--------
>>> tm = Primer('CATGCCATGGAAAAACGGGCGATTTATCC').melting_temp()
"""
n = self._counter
# Alignment:
gas_constant = 1.987 # gas constant in cal/(K*mol)
salt_correction_factor = 0.114 # kcal/(K*mol) at T=310K
concentration *= 10 ** -9 # transfer from nmol to mol
sodium *= 10 ** -3 # transfer from mmol to mol
# ΔH° in cal/mol from http://www.pnas.org/content/95/4/1460/T2.expansion.html
formation_enthalpy = {'AA': -7900, 'AT': -7200, 'AG': -7800, 'AC': -8400,
'TA': -7200, 'TT': -7900, 'TG': -8500, 'TC': -8200,
'GA': -8200, 'GT': -8400, 'GG': -8000, 'GC': -9800,
'CA': -8500, 'CT': -7800, 'CC': -8000, 'CG': -10600,
}
initial_enthalpy = {'A': 2300, 'T': 2300, 'G': 100, 'C': 100}
# ΔS° cal/k·mol from http://www.pnas.org/content/95/4/1460/T2.expansion.html
formation_entropy = {'AA': -22.2, 'AT': -20.4, 'AG': -21.0, 'AC': -22.4,
'TA': -21.3, 'TT': -22.2, 'TG': -22.7, 'TC': -22.2,
'GA': -22.2, 'GT': -22.4, 'GG': -19.9, 'GC': -24.4,
'CA': -22.7, 'CT': -21.0, 'CC': -19.9, 'CG': -27.2,
}
initial_entropy = {'A': 4.1, 'T': 4.1, 'G': -2.8, 'C': -2.8}
# calculate according to Marmur
# not recommended for more than 13nt; assumes 50mM monovalent cations
if method == 'marmur':
if self._length > 13:
warnings.warn('not recommended for more than 13nt', UserWarning)
elif sodium != 50 * 10 ** -3:
warnings.warn('assumes 50mM monovalent cations', UserWarning)
t = 2 * (n['A'] + n['T']) + 4 * (n['G'] + n['C'])
# calculate according to Wallace
elif method == 'wallace':
t = 64.9 + 41 * (n['G'] + n['C'] - 16.4) / (n['G'] + n['C'] + n['A'] + n['T'])
# calculate according to the salt adjusted method
elif method == 'salt-adjusted':
total = n['G'] + n['C'] + n['A'] + n['T']
t = 100.5 + (41 * (n['G'] + n['C']) / total) - (820 / total) + (16.6 * math.log10(sodium))
# calculate according to the thermodynamical nearest neighbor model
elif method == 'nearest-neighbor':
# calculate the initial enthalpy and entropy
enthalpy = initial_enthalpy[self._seq[0]] + initial_enthalpy[self._seq[-1]]
entropy = initial_entropy[self._seq[0]] + initial_entropy[self._seq[-1]]
# add the salt enthalpy factor
entropy += salt_correction_factor / 310 * 1000 * math.log(sodium, math.e) * self._length
for i in range(self._length - 1):
enthalpy += formation_enthalpy[self._seq[i:i + 2]]
entropy += formation_entropy[self._seq[i:i + 2]]
t = enthalpy / (entropy + gas_constant * math.log(concentration / 4, math.e)) - 273.15
else:
raise ValueError('method <{}> is not implemented. Use help to see available methods'.format(method))
return round(t, 2)
if __name__ == '__main__':
pass
|
FloBay/PyOmics
|
PyOmics/Sequence/sequences.py
|
Python
|
bsd-3-clause
| 82,389
|
[
"Dalton"
] |
1b72cdcff2ecbac940f5fcd1e83dbd203b14f553e604c4c8746ef4d50f4e153b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
'''
SigmaPiConnection
'''
from dana import *
class SigmaPiConnection(Connection):
def __init__(self, source=None, modulator=None, target=None, scale=1.0):
Connection.__init__(self, source, target)
self._scale = scale
# Get actual modulator
names = modulator.dtype.names
if names == None:
self._actual_modulator = modulator
else:
self._actual_modulator = (modulator[names[0]])
def output(self):
src = self._actual_source
mod = self._actual_modulator
tgt = self._actual_target
if len(tgt.shape) == len(src.shape) == len(mod.shape) == 1:
return convolve1d(src,mod[::1])*self._scale
elif len(tgt.shape) == len(src.shape) == len(mod.shape) == 2:
return convolve2d(src,mod[::-1,::-1])*self._scale
else:
raise NotImplemented
# 1 dimension
# -----------
n = 100
src = 1.00*gaussian((n,), 10.0/float(n), +0.5) \
+ 1.00*gaussian((n,), 5.0/float(n), -0.5) \
+ 0.05*rnd.random((n,))
cmd = gaussian((n,), 3.0/float(n), 0.25)
tgt = np.zeros((n,))
SigmaPiConnection(src,cmd,tgt,scale=0.1).propagate()
plt.subplot(3,1,1), plt.plot(src), plt.title('Input')
plt.subplot(3,1,2), plt.plot(cmd), plt.title('Command')
plt.subplot(3,1,3), plt.plot(tgt), plt.title('Output')
plt.show()
# 2 dimensions
# ------------
n = 50
src = 1.00*gaussian((n,n), 10.0/float(n), (+0.5,+0.5)) \
+ 0.50*gaussian((n,n), 5.0/float(n), (-0.5,-0.5)) \
+ 0.05*rnd.random((n,n))
cmd = gaussian((n,n), 5.0/float(n), (0.5,0.25))
tgt = np.zeros((n,n))
SigmaPiConnection(src,cmd,tgt,scale=0.1).propagate()
plt.figure(figsize=(18,6))
plt.subplot(1,3,1), plt.imshow(src), plt.title('Input')
plt.subplot(1,3,2), plt.imshow(cmd), plt.title('Command')
plt.subplot(1,3,3), plt.imshow(tgt), plt.title('Output')
plt.show()
|
rougier/dana
|
examples/sigmapi.py
|
Python
|
bsd-3-clause
| 3,695
|
[
"Gaussian"
] |
86b69b7642bfe18f388bdeb1b3a4dbd29e7251e4fe0f35769192c069b7fbaca5
|
#!/usr/bin/python
# coding: utf-8
import os
from django.conf import settings
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from sauceclient import SauceClient
from selenium import webdriver
class ProcurementTestCase(StaticLiveServerTestCase):
def setUp(self):
# this is how you set up a test to run on Sauce Labs
username = settings.SAUCELABS_USER
try:
key = os.environ["SAUCE_ACCESS_KEY"]
except KeyError:
key = settings.SAUCELABS_KEY
# print("DEBUG: [{0}] and [{1}]".format(key, settings.ON_TRAVIS_CI))
if key != '' and settings.ON_TRAVIS_CI:
desired_cap = {
'platform': "Mac OS X 10.11",
'browserName': "firefox",
'tunnel-identifier': os.environ["TRAVIS_JOB_NUMBER"],
"build": os.environ["TRAVIS_BUILD_NUMBER"],
"tags": [os.environ["TRAVIS_PYTHON_VERSION"], "CI"]
}
self.browser = webdriver.Remote(
command_executor='http://{0}:{1}@localhost:4445/wd/hub'.format(username, key),
desired_capabilities=desired_cap
)
self.sauce_client = SauceClient(username, key)
else:
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(10)
def tearDown(self):
# If all goes well, and this is remote, update...
try:
self.sauce_client.jobs.update_job(self.browser.session_id, passed=True)
except AttributeError:
pass
# AttributeError: 'ProcurementTestCase' object has no attribute 'sauce_client'
self.browser.quit()
# pass
def test_access_admin(self):
base_uri = 'http://localhost:4445/wd/hub' if settings.ON_TRAVIS_CI else self.live_server_url
print("DEBUG: TESTING: base_uri={0}".format(base_uri))
# Visit the website
home_page = self.browser.get(base_uri + '/admin/')
brand_element = self.browser.find_element_by_id('site-name')
self.assertEqual('Django administration', brand_element.text)
def test_external_access(self):
# EXTERNAL TESTING
# self.browser.implicitly_wait(10)
self.browser.get("http://www.google.com")
if "Google" not in self.browser.title:
raise Exception("Unable to load google page!")
# elem = self.browser.find_element_by_name("q")
# elem.send_keys("Sauce Labs")
# elem.submit()
# print(self.browser.title)
# Alice locates the holiday booking website
# Alice logs into the website
# Alice can see how many days of leave she has available
# Alice enters the start and end dates for time off
# Alice can see how many days are being requested
# Alice is told her request is now pending
# David is alerted to Alice's request
# David authorises Alice's request
# Alice is notified of the request confirmation
# Alice is notified of how many days of leave she has remaining
|
marshalc/guerdon
|
guerdon/holiday/tests/test_ui.py
|
Python
|
gpl-2.0
| 3,098
|
[
"VisIt"
] |
3b4655fb56f215af32061bc045fad4843c69381cdc84976fc1522d0be3ef5c57
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.