text stringlengths 0 1.05M | meta dict |
|---|---|
from dummyprobe import DummyProbe
import subprocess
import re
import platform
import org.joda.time.DateTime as DateTime
import logging
logger = logging.getLogger(__name__)
class LinuxNetstatS(DummyProbe):
def initialize(self):
self.regexClass = re.compile(r'^([a-zA-Z0-9]+):$')
self.regexSubClass = re.compile(r'([a-zA-Z0-9 ,]+):$')
self.regexValueColon = re.compile(r'^[\t ]+(\w+):[\t ]+(\d+)')
self.regexValueAtStart = re.compile(r'^[\t ]+(\d+)[\t ]+(.*)')
self.regexMatchDigits = re.compile(r'(\d+)')
if (self.getInputProperty("command") != None):
self.cmd = self.getInputProperty("command")
else:
self.cmd = "netstat -s"
def tick(self):
stream = subprocess.Popen(self.cmd, shell=True, bufsize=0, stdout=subprocess.PIPE)
dt = str(DateTime())
ps = 0 #parser state
fields = []
state = 0
out = {}
nowStr = self.getCycleProperty("startdt")
for line in stream.stdout:
line = line.rstrip()
out["@timestamp"] = nowStr
out["host"] = platform.node()
matchClass = re.search(self.regexClass, line)
if (matchClass):
logger.debug("Matched stats group")
out["class"] = matchClass.group(1).lstrip()
nowStr = str(DateTime())
state = 10
logger.debug(out)
elif (state == 10):
out["value"] = None
matchColon = re.search(self.regexValueColon, line)
if (matchColon):
out["metric"] = matchColon.group(1).lstrip()
out["value"] = float(matchColon.group(2))
else:
matchValueAtStart = re.search(self.regexValueAtStart, line)
if (matchValueAtStart):
out["value"] = float(matchValueAtStart.group(1))
out["metric"] = matchValueAtStart.group(2).lstrip()
else:
matchValueAnywhere = re.search(self.regexMatchDigits, line)
if (matchValueAnywhere):
out["value"] = float(matchValueAnywhere.group(1))
out["metric"] = re.sub(self.regexMatchDigits, "", line).lstrip()
if (out["value"] != None):
self.processData(out)
| {
"repo_name": "filipealmeida/probespawner",
"path": "netstats.py",
"copies": "1",
"size": "2058",
"license": "unlicense",
"hash": -2877970635223615000,
"line_mean": 30.1818181818,
"line_max": 84,
"alpha_frac": 0.656462585,
"autogenerated": false,
"ratio": 3.013177159590044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.894520929638877,
"avg_score": 0.044886089640254714,
"num_lines": 66
} |
from dummyprobe import DummyProbe
import subprocess
import re
import platform
import org.joda.time.DateTime as DateTime
import logging
logger = logging.getLogger(__name__)
#qActive Internet connections (w/o servers)
#Proto Recv-Q Send-Q Local Address Foreign Address State
class LinuxNetstatNTC(DummyProbe):
def initialize(self):
self.regexHeader = re.compile(r'^Proto[\t ]+Recv-Q[\t ]+Send-Q[\t ]+Local Address[\t ]+Foreign Address[\t ]+State')
self.regexActiveHeader = re.compile(r'^Active (\w+)')
self.regexBoundary = re.compile(r'[\t ]+')
self.regexIpSplit = re.compile(r'(.*):(.+)$')
self.fields = ['protocol', 'receive-q', 'send-q', 'local', 'foreign', 'state', 'user', 'inode']
if (self.getInputProperty("command") != None):
self.cmd = self.getInputProperty("command")
else:
self.cmd = "netstat -ntc"
def tick(self):
stream = subprocess.Popen(self.cmd, shell=True, bufsize=0, stdout=subprocess.PIPE)
dt = str(DateTime())
ps = 0 #parser state
fields = []
state = 0
out = {}
nowStr = self.getCycleProperty("startdt")
for line in stream.stdout:
line = line.rstrip()
matchActiveHeader = re.search(self.regexActiveHeader, line)
if (matchActiveHeader):
out = {}
if (matchActiveHeader.group(1) == 'Internet'):
state = 5
else:
state = 0
elif (state == 5):
matchHeader = re.search(self.regexHeader, line)
if (matchHeader):
state = 10
out["@timestamp"] = nowStr
out["host"] = platform.node()
out["class"] = "tcpconnections"
elif (state == 10):
idx = 0
values = re.split(self.regexBoundary, line)
for value in values:
field = self.fields[idx]
if ((field == 'receive-q') or (field == 'send-q')):
values[idx] = float(value)
elif (field == 'local'):
pair = re.search(self.regexIpSplit, value)
out['localip'] = pair.group(1)
out['localport'] = float(pair.group(2))
elif (field == 'foreign'):
pair = re.search(self.regexIpSplit, value)
out['foreignip'] = pair.group(1)
out['foreignport'] = float(pair.group(2))
out[field] = values[idx]
idx+=1
self.processData(out)
| {
"repo_name": "filipealmeida/probespawner",
"path": "netstatntc.py",
"copies": "1",
"size": "2282",
"license": "unlicense",
"hash": 4101649128434704400,
"line_mean": 30.2602739726,
"line_max": 117,
"alpha_frac": 0.6393514461,
"autogenerated": false,
"ratio": 3.079622132253711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4218973578353711,
"avg_score": null,
"num_lines": null
} |
from dummyprobe import DummyProbe
import subprocess
import re
import platform
import org.joda.time.DateTime as DateTime
#top - 19:39:32 up 2 days, 21:42, 7 users, load average: 1,99, 1,34, 0,92
#Threads: 811 total, 2 running, 776 sleeping, 0 stopped, 33 zombie
#%Cpu(s): 5,7 us, 3,8 sy, 0,0 ni, 89,4 id, 1,0 wa, 0,0 hi, 0,2 si, 0,0 st
#KiB Mem: 3071096 total, 2722196 used, 348900 free, 11716 buffers
#KiB Swap: 3111932 total, 680060 used, 2431872 free. 438492 cached Mem
#
# PID PPID nTH USER PR NI VIRT RES SHR SWAP S %CPU P %MEM TIME+ WCHAN COMMAND
# 2165 2092 7 fra 20 0 770068 108952 17272 107068 R 4,3 0 3,5 93:38.70 wait_seqno cinnamon
import logging
logger = logging.getLogger(__name__)
class LinuxTopProbe(DummyProbe):
def initialize(self):
#top - 10:55:53 up 40 min, 3 users, load average: 1,79, 1,06, 0,82
self.regexHeader = re.compile(r'^top\ +-\ +(\d+:\d+:\d+)')
self.regexLoad = re.compile(r'(\d+)[\t ]+users,[\t ]+load average:[\t ]+(\d+[.,]?\d+),[\t ]+(\d+[.,]?\d+),[\t ]+(\d+[.,]?\d+)')
self.regexEmptyline = re.compile(r'^$')
self.regexTasks = re.compile(r'^Tasks:')
self.regexThreads = re.compile(r'^Threads:')
self.regexCpu = re.compile(r'^%?Cpu')
self.regexMemory = re.compile(r'Mem:')
self.regexSwap = re.compile(r'Swap:')
self.regexFields = re.compile(r'PID|PPID|USER|COMMAND')
self.regexCommaBoundary = re.compile(r',[\t ]+')
self.regexComma = re.compile(r',')
self.regexColon = re.compile(r':')
self.regexLtrim = re.compile(r'^[\t ]+')
self.regexRtrim = re.compile(r'[\t ]+$')
self.regexBoundary = re.compile(r'[\t %]+')
self.regexHeaderDesc = re.compile(r'.+:')
self.processMetrics = {'TIME+':1, '%MEM':1, 'SHR':1, 'NI':1, 'RES':1, 'SWAP':1, 'VIRT':1, 'P':1, '%CPU':1, 'CODE':1, 'DATA':1, 'USED':1, 'nDRT':1, 'nMaj':1, 'nMin':1, 'nTH':1, 'vMj':1, 'vMn':1, 'CPU':1, 'MEM':1}
if (self.getInputProperty("command") != None):
self.cmd = self.getInputProperty("command")
else:
self.cmd = "top -Hbi -n20 -d5 -w512"
def computeValue(self, value):
if (isinstance(value, str)):
if (value[-1] == 'k'):
value = float(value[:-1]) * 1024
elif (value[-1] == 'm'):
value = float(value[:-1]) * 1024 * 1024
elif (value[-1] == 'g'):
value = float(value[:-1]) * 1024 * 1024 * 1024
elif (value[-1] == '%'):
value = float(value[:-1])
else:
value = float(value)
return value
def storeHeaderLine(self, line, out):
data = {}
line = re.sub(self.regexHeaderDesc, "", line)
line = re.sub(self.regexLtrim, "", line)
line = re.sub(self.regexRtrim, "", line)
pairs = re.split(self.regexCommaBoundary, line)
for pair in pairs:
values = re.split(self.regexBoundary, pair)
val = float(self.computeValue(re.sub(self.regexComma, ".", values[0])));
key = values[1]
for k in out:
data[k] = out[k]
data["metric"] = key
data["value"] = val
self.processData(data)
def tick(self):
stream = subprocess.Popen(self.cmd, shell=True, bufsize=0, stdout=subprocess.PIPE)
#stream = subprocess.Popen('cat top.command.out.txt', shell=True, bufsize=0, stdout=subprocess.PIPE)
dt = str(DateTime())
ps = 0 #parser state
fields = []
state = 0
nowStr = self.getCycleProperty("startdt")
for line in stream.stdout:
line = line.rstrip()
out = {}
out["@timestamp"] = nowStr
out["host"] = platform.node()
matchHeader = re.match(self.regexHeader, line)
if (matchHeader):
logger.debug("Matched header")
matchLoad = re.search(self.regexLoad, line)
out["class"] = "Uptime"
nowStr = str(DateTime())
if (matchLoad):
self.processData({ "@timestamp": out['@timestamp'], "host": out["host"], "class": out["class"], "metric": "users", "value": float(re.sub(self.regexComma, ".", matchLoad.group(1))) })
self.processData({ "@timestamp": out['@timestamp'], "host": out["host"], "class": out["class"], "metric": "load1", "value": float(re.sub(self.regexComma, ".", matchLoad.group(2))) })
self.processData({ "@timestamp": out['@timestamp'], "host": out["host"], "class": out["class"], "metric": "load5", "value": float(re.sub(self.regexComma, ".", matchLoad.group(3))) })
self.processData({ "@timestamp": out['@timestamp'], "host": out["host"], "class": out["class"], "metric": "load15", "value": float(re.sub(self.regexComma, ".", matchLoad.group(4))) })
state = 10
if (state == 10):
matchTasks = re.match(self.regexTasks, line)
if (matchTasks):
logger.debug("Matched Tasks")
out["class"] = "Tasks"
self.storeHeaderLine(line, out)
state = 20
if (state == 10):
matchThreads = re.match(self.regexThreads, line)
if (matchThreads):
logger.debug("Matched Threads")
out["class"] = "Threads"
self.storeHeaderLine(line, out)
state = 20
if (state == 20):
matchCpu = re.match(self.regexCpu, line)
if (matchCpu):
logger.debug("Matched Cpu")
out["class"] = "Cpu"
self.storeHeaderLine(line, out)
state = 30
elif (state == 30):
matchMem = re.search(self.regexMemory, line)
if (matchMem):
logger.debug("Matched Memory")
out["class"] = "Memory"
self.storeHeaderLine(line, out)
state = 40
elif (state == 40):
matchSwap = re.search(self.regexSwap, line)
if (matchSwap):
logger.debug("Matched Swap")
out["class"] = "Swap"
line = re.sub(r'free\.', "free,", line)
self.storeHeaderLine(line, out)
state = 50
elif (state == 50):
matchFields = re.search(self.regexFields, line)
if (matchFields):
logger.debug("Matched Fields")
line = re.sub(self.regexLtrim, "", line)
fields = re.split(self.regexBoundary, line)
state = 60
elif (state == 60):
if (re.match(self.regexEmptyline, line)):
logger.debug("Empty line found")
else:
line = re.sub(self.regexLtrim, "", line)
values = re.split(self.regexBoundary, line)
idx = 0
data = {}
out["class"] = "Process"
logger.debug(fields)
for field in fields:
if (field not in self.processMetrics):
out[field] = values[idx]
else:
if (field == 'TIME+'):
value = 0
parts = re.split(self.regexColon, re.sub(self.regexComma, ".", values[idx]))
multiplier = 1
while (len(parts) > 0):
seconds = float(parts.pop())
value = value + seconds * multiplier
multiplier = multiplier * 60
data[field] = value
else:
data[field] = self.computeValue(re.sub(self.regexComma, ".", values[idx]))
idx += 1
for metric in data:
out["metric"] = metric
out["value"] = float(data[metric])
self.processData(out)
| {
"repo_name": "filipealmeida/probespawner",
"path": "linuxtopprobe.py",
"copies": "1",
"size": "6816",
"license": "unlicense",
"hash": -1879543326747436800,
"line_mean": 37.5084745763,
"line_max": 213,
"alpha_frac": 0.6088615023,
"autogenerated": false,
"ratio": 2.823529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39323909140647056,
"avg_score": null,
"num_lines": null
} |
from fbuild.builders.c import guess_static
from fbuild.builders import find_program
from fbuild.config import c as c_test
from fbuild.target import register
from fbuild.record import Record
from fbuild.path import Path
import fbuild.db
from optparse import make_option
import re
def pre_options(parser):
group = parser.add_option_group('config options')
group.add_options((
make_option('--buildtype', help='The build type',
choices=['debug', 'release'], default='debug'),
make_option('--lua', help='Use the given Lua executable'),
make_option('--cc', help='Use the given C compiler'),
make_option('--cflag', help='Pass the given flag to the C++ compiler',
action='append', default=[]),
make_option('--use-color', help='Use colored output',
action='store_true'),
make_option('--release', help='Build in release mode',
action='store_true', default=False),
))
class Libcut_h(c_test.Test):
libcut_h = c_test.header_test('libcut.h')
class DynAsmBuilder(fbuild.db.PersistentObject):
def __init__(self, ctx, *, exe=None, defs=[]):
self.ctx = ctx
self.exe = find_program(ctx, exe or ['lua', 'luajit'])
self.defs = defs
@fbuild.db.cachemethod
def translate(self, src: fbuild.db.SRC, dst) -> fbuild.db.DST:
dst = Path.addroot(dst, self.ctx.buildroot)
dst.parent.makedirs()
cmd = [self.exe, 'dynasm/dynasm.lua']
for d in self.defs:
cmd.extend(('-D', d))
cmd.extend(('-o', dst))
cmd.append(src)
self.ctx.execute(cmd, 'dynasm', '%s -> %s' % (src, dst), color='yellow')
return dst
def get_target_arch(ctx, c):
ctx.logger.check('determining target architecture')
prog = '''
#include <stdio.h>
#include "rejit.h"
#if RJ_X86
#define ARCH "i386"
#elif RJ_X64
#define ARCH "x86_64"
#else
#error unsupported target architecture
#endif
int main() {
puts(ARCH);
return 0;
}
'''
try:
stdout, stderr = c.tempfile_run(prog, stderr=None)
except fbuild.ExecutionError as e:
ctx.logger.failed()
raise fbuild.ConfigFailed('cannot determine target architecture')
else:
arch = stdout.decode().strip()
ctx.logger.passed(arch)
return arch
def find_headerdoc(ctx):
try:
headerdoc2html = find_program(ctx, ['headerdoc2html'])
gatherheaderdoc = find_program(ctx, ['gatherheaderdoc'])
except fbuild.ConfigFailed:
ctx.logger.failed('cannot find headerdoc; will not generate docs')
return None
else:
return Record(headerdoc2html=headerdoc2html,
gatherheaderdoc=gatherheaderdoc)
@fbuild.db.caches
def configure(ctx):
flags = ['-Wall', '-Werror']+ctx.options.cflag
testflags = []
defs = []
kw = {}
if ctx.options.use_color:
flags.append('-fdiagnostics-color')
if ctx.options.release:
kw['optimize'] = True
kw['macros'] = ['NDEBUG']
else:
kw['debug'] = True
kw['macros'] = ['DEBUG']
c = guess_static(ctx, exe=ctx.options.cc, flags=flags, includes=['utf', 'src'],
platform_options=[
({'posix'}, {'external_libs+': ['rt']}),
({'gcc'}, {'flags+': ['-Wno-maybe-uninitialized']}),
({'clang'}, {'flags+': ['-Wno-unknown-warning-option']}),
],
**kw)
arch = get_target_arch(ctx, c)
if arch == 'x86_64':
defs.append('X64')
elif re.match('i\d86', arch):
# X86 is implemented in the x86_64.dasc file.
arch = 'x86_64'
else:
assert 0, "get_target_arch returned invalid architecture '%s'" % arch
dasm = DynAsmBuilder(ctx, exe=ctx.options.lua, defs=defs)
if Libcut_h(c).libcut_h:
ctx.logger.passed('found libcut.h; will build tests')
tests = True
testflags.append('-std=gnu11')
else:
ctx.logger.failed('cannot find libcut.h; will not build tests')
tests = False
headerdoc = find_headerdoc(ctx)
return Record(dasm=dasm, c=c, arch=arch, tests=tests, testflags=testflags,
headerdoc=headerdoc)
def build(ctx):
rec = configure(ctx)
src = rec.dasm.translate('src/x86_64.dasc', 'codegen.c')
rejit = rec.c.build_lib('rejit', Path.glob('src/*.c') + Path.glob('utf/*.c'),
includes=['.', ctx.buildroot])
rec.c.build_exe('bench', ['bench.c'], libs=[rejit])
rec.c.build_exe('ex', ['ex.c'], libs=[rejit])
if rec.tests:
rec.c.build_exe('tst', ['tst.c'], cflags=rec.testflags, libs=[rejit])
@register()
def docs(ctx):
rec = configure(ctx)
if rec.headerdoc is None:
raise fbuild.Error("Fbuild wasn't configured with headerdoc support.")
output = ctx.buildroot / 'docs'
rejit_h = Path('src/rejit.h')
ctx.execute([rec.headerdoc.headerdoc2html, '-o', output, rejit_h],
'headerdoc2html', '%s -> %s' % (rejit_h, output),
color='compile')
ctx.execute([rec.headerdoc.gatherheaderdoc, output], 'gatherheaderdoc',
output, color='link')
| {
"repo_name": "kirbyfan64/rejit",
"path": "fbuildroot.py",
"copies": "1",
"size": "5316",
"license": "mpl-2.0",
"hash": 3925079143127997000,
"line_mean": 31.4146341463,
"line_max": 83,
"alpha_frac": 0.5949962378,
"autogenerated": false,
"ratio": 3.4452365521710955,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45402327899710954,
"avg_score": null,
"num_lines": null
} |
from __future__ import with_statement
from pprint import pprint
#from databaseprobe import DatabaseProbe
from threading import Thread
from java.lang import Thread as JThread, InterruptedException
from java.util.concurrent import ExecutionException
from java.util.concurrent import Executors, TimeUnit
from java.util.concurrent import Executors, ExecutorCompletionService
from time import sleep
import time
import datetime
import getopt, sys
import traceback
import com.xhaus.jyson.JysonCodec as json
import sys
import logging
from logging.config import fileConfig
fileConfig('probespawner.ini')
logger = logging.getLogger(__name__)
configurationLiterals = ["verbose", "help", "configuration="];
configurationHelp = ["Increase verbosity of messages", "Display help", "JSON configuration file"];
configurationFile = "some.feeder.config.json";
probeListStr = "databaseprobe.py,jmxprobe.py";
json_string = "theres is no JSON"
def usage():
print "USAGE:"
for index in range(len(configurationLiterals)):
print " --" + configurationLiterals[index] + " : " + configurationHelp[index];
def shutdown_and_await_termination(pool, timeout):
pool.shutdown()
try:
if not pool.awaitTermination(timeout, TimeUnit.SECONDS):
pool.shutdownNow()
if (not pool.awaitTermination(timeout, TimeUnit.SECONDS)):
print >> sys.stderr, "Pool did not terminate"
except InterruptedException, ex:
# (Re-)Cancel if current thread also interrupted
pool.shutdownNow()
# Preserve interrupt status
Thread.currentThread().interrupt()
try:
opts, args = getopt.getopt(sys.argv[1:], "vhc:", configurationLiterals)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = True
if o in ("-h", "--help"):
usage()
elif o in ("-c", "--configuration"):
configurationFile = a
#TODO: split and load libs
else:
assert False, "Unknown option"
try:
with open(configurationFile) as data_file:
json_string = data_file.read()
except EnvironmentError, err:
print str(err)
usage()
sys.exit(3)
try:
config = json.loads(json_string.decode('utf-8'))
except:
print "JSON from file '" + configurationFile + "' is malformed."
e = sys.exc_info()[0]
print str(e)
sys.exit(4)
pool = Executors.newFixedThreadPool(len(config["input"]))
ecs = ExecutorCompletionService(pool)
def scheduler(roots):
for inputConfig in roots:
yield inputConfig
def getClassByName(module, className):
if not module:
if className.startswith("services."):
className = className.split("services.")[1]
l = className.split(".")
m = __services__[l[0]]
return getClassByName(m, ".".join(l[1:]))
elif "." in className:
l = className.split(".")
m = getattr(module, l[0])
return getClassByName(m, ".".join(l[1:]))
else:
return getattr(module, className)
for inputConfig in scheduler(config["input"]):
outputs = {}
for output in config[inputConfig]["output"]:
outputs[output] = config[output]
module = config[inputConfig]['probemodule']['module']
name = config[inputConfig]['probemodule']['name']
#TODO: consider input singletons
#from config[inputConfig]['probemodule']['module'] import config[inputConfig]['probemodule']['name']
probemodule = __import__(module, globals(), locals(), [''], -1)
probeclass = getClassByName(probemodule, name)
config[inputConfig]["__inputname__"] = inputConfig
obj = probeclass(config[inputConfig], outputs)
ecs.submit(obj)
workingProbes = len(config["input"])
#TODO: handle signals for stopping, thread exits, etc.
while workingProbes > 0:
result = "No result"
try:
result = ecs.take().get()
except InterruptedException, ex:
traceback.print_exc()
pprint(ex)
except ExecutionException, ex:
traceback.print_exc()
pprint(ex)
print result
workingProbes -= 1
print "shutting threadpool down..."
shutdown_and_await_termination(pool, 5)
print "done"
sys.exit(1)
| {
"repo_name": "filipealmeida/probespawner",
"path": "probespawner.py",
"copies": "1",
"size": "4245",
"license": "unlicense",
"hash": -2014425140416496000,
"line_mean": 29.9854014599,
"line_max": 104,
"alpha_frac": 0.6815076561,
"autogenerated": false,
"ratio": 3.675324675324675,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9731380660437823,
"avg_score": 0.02509033419737038,
"num_lines": 137
} |
from __future__ import with_statement
import os.path
import threading
import sys
import logging
from logging.config import fileConfig
from java.util.concurrent import Callable
from com.ziclix.python.sql import PyConnection
from com.ziclix.python.sql import zxJDBC
import com.xhaus.jyson.JysonCodec as json
import org.apache.tomcat.jdbc.pool as dbpool
import java.util.Properties as Properties
import traceback
from dummyprobe import DummyProbe
import org.joda.time.DateTime as DateTime
logger = logging.getLogger(__name__)
class DatabaseProbe(DummyProbe):
def initialize(self):
#Instanciate tomcat's connection pool
p = dbpool.PoolProperties()
p.setUrl(self.getInputProperty("url"))
p.setDriverClassName(self.getInputProperty("driverClassName"))
p.setUsername(self.getInputProperty("username"))
p.setPassword(self.getInputProperty("password"))
#TODO: check encoding and charset select * from nls_database_parameters where parameter='NLS_NCHAR_CHARACTERSET' - {"PARAMETER":"NLS_NCHAR_CHARACTERSET","VALUE":"AL16UTF16"}
try:
p.setMinIdle(int(self.getInputProperty("minIdle")))
except:
p.setMinIdle(2)
try:
p.setMaxIdle(int(self.getInputProperty("maxIdle")))
except:
p.setMaxIdle(2)
try:
p.setMaxAge(int(self.getInputProperty("maxAge")))
except:
p.setMaxAge(86400)
try:
p.setValidationQuery(self.getInputProperty("validationQuery"))
except:
logger.debug("Dude, no validation query for connection pool")
try:
p.setInitSQL(self.getInputProperty("initSQL"))
except:
logger.debug("Dude, no initial query for connection pool")
if self.getInputProperty("dbProperties") != None:
dbProperties = Properties()
for prop in self.getInputProperty("dbProperties"):
value = str(self.getInputProperty("dbProperties")[prop])
dbProperties.setProperty(prop, value)
logger.debug("Dude, database property found: %s = %s", prop, value)
p.setDbProperties(dbProperties)
self.metrics = {}
if self.getInputProperty("metrics") != None:
for metric in self.getInputProperty("metrics"):
self.metrics[metric] = 1
self.terms = {}
if self.getInputProperty("terms") != None:
for term in self.getInputProperty("terms"):
self.terms[term] = 1
self.jdbcPoolProperties = p
try:
self.interval = int(self.getInputProperty("interval"))
except:
self.interval = 60
try:
#open file if exists
if isinstance(self.getInputProperty("sql"), basestring) and os.path.isfile(self.getInputProperty("sql")):
with open(self.getInputProperty("sql"), "r") as ins:
self.sql = []
for line in ins:
self.sql.append({ "statement": line.rstrip() })
else:
self.sql = self.getInputProperty("sql")
except:
self.sql = []
idx = 0
for phrase in self.sql:
if "id" not in phrase:
phrase["id"] = "q" + str(idx)
idx += 1
logger.debug("Dude, queries setup: %s", self.sql)
logger.debug("Dude, connection pool setup: %s", p)
logger.debug("Dude, connection pool dbProperties: %s", p.getDbProperties())
p.setJdbcInterceptors('org.apache.tomcat.jdbc.pool.interceptor.ConnectionState;' + 'org.apache.tomcat.jdbc.pool.interceptor.StatementFinalizer')
self.datasource = dbpool.DataSource()
self.datasource.setPoolProperties(self.jdbcPoolProperties)
self.cycle["queryParameters"] = {}
def startQueryCycle(self, queryId):
self.runtime[queryId + "jodaStart"] = DateTime()
self.cycle["queryParameters"][queryId + "qstart"] = self.runtime[queryId + "jodaStart"].getMillis()
self.cycle["queryParameters"][queryId + "qstartdt"] = str(self.runtime[queryId + "jodaStart"])
def finishQueryCycle(self, queryId):
self.runtime[queryId + "jodaEnd"] = DateTime()
self.cycle["queryParameters"][queryId + "qend"] = self.runtime[queryId + "jodaEnd"].getMillis()
self.cycle["queryParameters"][queryId + "qenddt"] = str(self.runtime[queryId + "jodaEnd"])
self.cycle["queryParameters"][queryId + "qlaststart"] = self.cycle["queryParameters"][queryId + "qstart"]
self.cycle["queryParameters"][queryId + "qlaststartdt"] = self.cycle["queryParameters"][queryId + "qstartdt"]
self.cycle["queryParameters"][queryId + "qelapsed"] = self.runtime[queryId + "jodaEnd"].getMillis() - self.runtime[queryId + "jodaStart"].getMillis()
logger.info("Finished query %s cycle in %d", queryId, self.cycle["queryParameters"][queryId + "qelapsed"])
def getQueryParameter(self, queryId, paramName):
if queryId + paramName in self.cycle["queryParameters"]:
return self.cycle["queryParameters"][queryId + paramName]
else:
return None
def tick(self):
for phrase in self.sql:
self.startQueryCycle(phrase["id"])
preparedStatementParams = []
preparedStatementParamsDict = {}
logger.info(phrase["statement"])
numrows = 0
conn = PyConnection(self.datasource.getConnection())
try:
with conn.cursor(1) as cursor:
#TODO: review, index out of range very possible
if "parameter" in phrase:
logger.debug("Dude, got parameters!!")
for parameter in phrase["parameter"]:
strlist = parameter.split("$cycle.")
if len(strlist) > 0:
if strlist[1] in ["qend","qstart","qlaststart","qenddt","qstartdt","qlaststartdt","qelapsed"]:
value = self.getQueryParameter(phrase["id"], strlist[1])
if value == None:
logger.debug("Dude, couldn't fetch local parameter %s = %s, going global", parameter, value)
value = self.getCycleProperty(strlist[1][1:])
preparedStatementParams.append(value)
preparedStatementParamsDict[parameter] = value
logger.debug("Dude, got local parameter %s = %s", parameter, value)
else:
value = self.getCycleProperty(strlist[1])
if value == None:
value = phrase[strlist[1]]
preparedStatementParams.append(value)
preparedStatementParamsDict[parameter] = value
logger.debug("Dude, got global parameter %s = %s", parameter, value)
logger.debug(strlist[1])
logger.debug("Dude, preparing statement: %s", phrase["id"])
query = cursor.prepare(phrase["statement"])
logger.debug("Dude, executing statement: %s", phrase["id"])
cursor.execute(query, preparedStatementParams)
row = None
logger.debug("Dude, starting fetch for statement: %s", phrase["id"])
if cursor.description != None:
fields = [i[0] for i in cursor.description]
row = cursor.fetchone()
else:
fields = []
conn.commit()
self.processData({ '@timestamp':self.getQueryParameter(phrase["id"], "qstart"), "statement": phrase["statement"], "parameters": preparedStatementParamsDict } )
#TODO: process data with commit timestamp and whatnot
while row is not None:
idx = 0
out = {}
rowDict = {}
metrics = {}
terms = {}
for field in fields:
if isinstance(row[idx], str):
if self.getInputProperty("ignoreFieldIfEmptyString") and len(row[idx]) == 0:
logger.warning("Ignoring key %s due to empty value", field)
else:
rowDict[field] = row[idx]
elif isinstance(row[idx], unicode):
if self.getInputProperty("ignoreFieldIfEmptyString") and len(row[idx]) == 0:
logger.warning("Ignoring key %s due to empty value", field)
else:
rowDict[field] = row[idx]
else:
rowDict[field] = row[idx]
idx = idx + 1
for key in rowDict:
if key in self.metrics:
metrics[key] = rowDict[key]
elif key in self.terms:
terms[key] = rowDict[key]
else:
out[key] = rowDict[key]
self.processData(out)
for key in metrics:
try:
out["metric"] = key
if self.getInputProperty("decimalMark"):
metrics[key] = metrics[key].replace(self.getInputProperty("decimalMark"), ".")
out["value"] = float(metrics[key])
self.processData(out)
except Exception, ex:
logger.warning("Failure to parse %s as float for metric %s", key, metrics[key])
#self.processData(out)
if 'value' in out:
del out['value']
for key in terms:
out["metric"] = key
out["term"] = str(terms[key])
self.processData(out)
row = cursor.fetchone()
query.close()
assert query.closed
except Exception, ex:
logger.debug("\n _ _ _ \n __| |_ _ __| | ___| |\n / _` | | | |/ _` |/ _ \ |\n| (_| | |_| | (_| | __/_|\n \__,_|\__,_|\__,_|\___(_)\n")
logger.debug(ex)
raise
finally:
conn.close()
self.finishQueryCycle(phrase["id"])
| {
"repo_name": "filipealmeida/probespawner",
"path": "cooldbprobe.py",
"copies": "1",
"size": "11251",
"license": "unlicense",
"hash": 1964503222536436200,
"line_mean": 49.4529147982,
"line_max": 183,
"alpha_frac": 0.511421207,
"autogenerated": false,
"ratio": 4.609176566980746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5620597773980746,
"avg_score": null,
"num_lines": null
} |
import mozdevice
import mozlog
import unittest
from sut import MockAgent
class MkDirsTest(unittest.TestCase):
def test_mkdirs(self):
subTests = [{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('info os', 'android'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
('mkdr /mnt/sdcard/baz',
'/mnt/sdcard/baz successfully created'),
('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('mkdr /mnt/sdcard/baz/boop',
'/mnt/sdcard/baz/boop successfully created')],
'expectException': False},
{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('info os', 'android'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
('mkdr /mnt/sdcard/baz',
'##AGENT-WARNING## Could not create the directory /mnt/sdcard/baz')],
'expectException': True},
]
for subTest in subTests:
a = MockAgent(self, commands=subTest['cmds'])
exceptionThrown = False
try:
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/mnt/sdcard/baz/boop/bip')
except mozdevice.DMError:
exceptionThrown = True
self.assertEqual(exceptionThrown, subTest['expectException'])
a.wait()
def test_repeated_path_part(self):
"""
Ensure that all dirs are created when last path part also found
earlier in the path (bug 826492).
"""
cmds = [('isdir /mnt/sdcard/foo', 'FALSE'),
('info os', 'android'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/foo', 'FALSE'),
('mkdr /mnt/sdcard/foo',
'/mnt/sdcard/foo successfully created')]
a = MockAgent(self, commands=cmds)
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/mnt/sdcard/foo/foo')
a.wait()
def test_mkdirs_on_root(self):
cmds = [('isdir /', 'TRUE')]
a = MockAgent(self, commands=cmds)
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=mozlog.DEBUG)
d.mkDirs('/foo')
a.wait()
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "vladikoff/fxa-mochitest",
"path": "tests/mozbase/mozdevice/tests/sut_mkdir.py",
"copies": "3",
"size": "3002",
"license": "mpl-2.0",
"hash": -6885986192066412000,
"line_mean": 38.5,
"line_max": 100,
"alpha_frac": 0.4570286476,
"autogenerated": false,
"ratio": 4.024128686327078,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5981157333927077,
"avg_score": null,
"num_lines": null
} |
import platform
from random import randint
import atexit
import errno
#import fcntl
import os
import random
import re
import signal
import socket
import subprocess
import sys
import threading
import time
import sys
import logging
logger = logging.getLogger(__name__)
ALIVE = True
class OpenTSDB():
def __init__(self, config):
#TODO: eliminate message count ASAP, check work with RabbitMQ features
global ALIVE
self.messagecount = 0
self.config = config
if "host" in self.config:
self.host = self.config["host"]
else:
self.host = None
if "port" in self.config:
self.port = self.config["port"]
else:
self.port = None
if "username" in self.config:
self.username = self.config["username"]
else:
self.username = None
if "password" in self.config:
self.password = self.config["password"]
else:
self.password = None
if "metricPrefix" in self.config:
self.metricPrefix = self.config["metricPrefix"]
else:
self.metricPrefix = "probespawner"
self.tags = []
if "tags" in self.config:
self.tags = self.config["tags"]
if "blacklist" in self.config:
self.blacklist = self.config["blacklist"]
self.addresses = []
if "addresses" in self.config:
for address in self.config["addresses"]:
logger.info(address)
self.addresses.append(address.split(":"))
if (self.host != None and self.port != None):
self.addresses.append((self.host, self.port))
self.host = None;
self.port = None;
self.metrics = {}
if "metrics" in self.config:
for metric in self.config["metrics"]:
self.metrics[metric] = 1
self.tags.append("node=" + platform.node())
self.initialize();
#TODO: create blacklist, check tcollector.py
def pickConnection(self):
self.host, self.port = self.addresses[randint(0, len(self.addresses) - 1)]
logger.info('Selected connection: %s:%d', self.host, self.port);
def blacklistConnection(self):
return False
def verifyConnection(self):
if self.tsd is None:
return False
if self.lastVerify > time.time() - 60:
return True
if self.reconnectInterval > 0 and self.timeReconnect < time.time() - self.reconnectInterval:
try:
self.tsd.close()
except socket.error, msg:
pass # not handling that
self.timeReconnect = time.time()
return False
logger.info("Testing connection life")
try:
self.tsd.sendall('version\n')
except socket.error, msg:
self.tsd = None
self.blacklistConnection()
return False
bufsize = 4096
while ALIVE:
try:
buf = self.tsd.recv(bufsize)
except socket.error, msg:
logger.warning('Socket error %s:%d: %s',self.host, self.port, msg)
self.tsd = None
self.blacklistConnection()
return False
if len(buf) == bufsize:
continue
break
logger.info("Connection verified")
self.lastVerify = time.time()
return True
def makeConnection(self):
try_delay = 1
while ALIVE:
if self.verifyConnection():
return True
try_delay *= 1 + random.random()
if try_delay > 600:
try_delay *= 0.5
logger.info('SenderThread blocking %0.2f seconds', try_delay)
time.sleep(try_delay)
self.pickConnection()
try:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0)
except socket.gaierror, e:
if e[0] in (socket.EAI_AGAIN, socket.EAI_NONAME,socket.EAI_NODATA):
logger.info('DNS resolution failure: %s: %s', self.host, e)
continue
raise
for family, socktype, proto, canonname, sockaddr in addresses:
try:
self.tsd = socket.socket(family, socktype, proto)
self.tsd.settimeout(15)
self.tsd.connect(sockaddr)
logger.info('Connection to %s was successful'%(str(sockaddr)))
break
except socket.error, msg:
logger.warning('Connection attempt failed to %s:%d: %s',self.host, self.port, msg)
self.tsd.close()
self.tsd = None
if not self.tsd:
logger.error('Failed to connect to %s:%d', self.host, self.port)
self.blacklistConnection()
def initialize(self):
self.tsd = None
self.lastVerify = time.time() - 60
self.reconnectInterval = 86400
self.timeReconnect = 0
return True
#TODO: handle exceptions on sendall
def writeDocument(self, data, force):
self.makeConnection()
try:
out = ''
#out = "".join("put %s\n" % self.add_tags_to_line(line) for line in self.sendq)
#if it's not a metric, it's a tag
tags = []
for tag in self.tags:
tags.append(tag)
for key in data:
if key not in self.metrics:
if key == "@timestamp":
continue
if key in self.blacklist:
continue
val = str(data[key])
val = val.replace(":", "/")
val = val.replace("=", "/")
val = val.replace(",", "/")
key_str = key + "=" + val
key_str = key_str.replace(" ", "_")
key_str = key_str.replace("@", "")
tags.append(key_str)
tags_str = " ".join(tags)
time_str = str(int(time.time() * 1000))
for key in data:
if key in self.metrics:
out = "put " + self.metricPrefix + "." + key + " " + time_str + " " + str(data[key]) + " " + tags_str + "\n"
self.tsd.sendall(out)
self.messagecount = self.messagecount + 1
logger.debug(out)
if "metric_field" in self.config and "value_field" in self.config:
if self.config["metric_field"] in data and self.config["value_field"] in data:
out = "put " + self.metricPrefix + "." + str(data[self.config["metric_field"]]) + " " + time_str + " " + str(data[self.config["value_field"]]) + " " + tags_str + "\n"
self.tsd.sendall(out)
self.messagecount = self.messagecount + 1
logger.debug(out)
except Exception, ex:
logger.error("Some error writing document, will retry?")
logger.error(data)
logger.error(ex)
raise
#self.writeDocument(data, force)
def flush(self):
logger.info("Flushing. Total messages: %d", self.messagecount)
return True
def cleanup(self):
self.tsd.close()
self.tsd = None
return True
| {
"repo_name": "filipealmeida/probespawner",
"path": "opentsdblh.py",
"copies": "1",
"size": "6038",
"license": "unlicense",
"hash": -869015529125871700,
"line_mean": 26.3212669683,
"line_max": 171,
"alpha_frac": 0.6586618085,
"autogenerated": false,
"ratio": 3.06497461928934,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.422363642778934,
"avg_score": null,
"num_lines": null
} |
"""Any function from n inputs to m outputs"""
import logging
from itertools import zip_longest
import pypes.component
log = logging.getLogger(__name__)
def default_function(*args):
"pass"
return args
class NMFunction(pypes.component.Component):
"""
mandatory input packet attributes:
- data: for each of the input ports
parameters:
- function: [default: merge the inputs into a list if more than one
input, then replicate over all the outputs]
output packet attributes:
- data: each of the M outputs goes to an output port
"""
# defines the type of component we're creating.
__metatype__ = 'TRANSFORMER'
def __init__(self, n=1, m=1):
# initialize parent class
pypes.component.Component.__init__(self)
# Optionally add/remove component ports
# self.remove_output('out')
self._n = n
self._m = m
self._in_ports = ["in"]
self._out_ports = ["out"]
if n > 1:
self._in_ports += ["in{0}".format(i)
for i in range(1, n)]
for port in self._in_ports:
self.add_input(port, 'input')
if m > 1:
self._out_ports += ["out{0}".format(i)
for i in range(1, m)]
for port in self._out_ports:
self.add_output(port, 'output')
# Setup any user parameters required by this component
# 2nd arg is the default value, 3rd arg is optional list of choices
self.set_parameter('function', default_function)
# log successful initialization message
log.debug('Component Initialized: %s', self.__class__.__name__)
def run(self):
# Define our components entry point
while True:
function = self.get_parameter('function')
name = function.__name__
packets = [self.receive(port)
for port in self._in_ports]
try:
args = [packet.get("data")
for packet in packets]
log.debug("%s: args %s", name, args)
results = function(*args)
log.debug("%s: results %s", name, results)
if self._m == 1:
packet = packets[0]
packet.set("data", results[0])
self.send("out", packet)
elif self._m > 1 and len(results) <= self._m:
for result, port in zip_longest(results,
self._out_ports,
fillvalue=results[-1]):
packet = pypes.packet.Packet()
for key, value in packets[0]:
packet.set(key, value)
packet.set("data", result)
log.debug("%s: sending %s to %s",
name, packet.get("data"), port)
self.send(port, packet)
else:
raise ValueError("too many results!")
except:
log.error('Component Failed: %s',
name, exc_info=True)
# yield the CPU, allowing another component to run
self.yield_ctrl()
| {
"repo_name": "Enucatl/pypes",
"path": "pypes/plugins/nm_function.py",
"copies": "1",
"size": "3356",
"license": "apache-2.0",
"hash": 5331688705115168000,
"line_mean": 33.2448979592,
"line_max": 75,
"alpha_frac": 0.4940405244,
"autogenerated": false,
"ratio": 4.584699453551913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005102040816326531,
"num_lines": 98
} |
# any
prefix_spec = '{: >25s}!!'
prefix_norm = '{} '.format(prefix_spec)
prefix_last = '{}L'.format(prefix_spec)
A1 = 'A-1'
E1 = 'E-1'
#soil
D1 = 'D-1'
D1Soil = 'D-1'
D1Interf = 'D-1'
# D1L = 'D-1'
D2Isotropic = 'D-2.Isotropic'
D2Orthotropic = 'D-2.Orthotropic'
D2Duncan = 'D-2.Duncan'
D3Duncan = 'D-3.Duncan'
D4Duncan = 'D-4.Duncan'
D2Over = 'D-2.Over'
D2Hardin = 'D-2.Hardin'
D2HardinTRIA = 'D-2.Hardin.TRIA'
D2Interface = 'D-2.Interface'
D2Composite = 'D-2.Composite'
D2MohrCoulomb = 'D-2.MohrCoulomb'
# alum
B1Alum = 'B-1.Alum'
B2AlumA = 'B-2.Alum.A'
B2AlumDWSD = 'B-2.Alum.D.WSD'
B2AlumDLRFD = 'B-2.Alum.D.LRFD'
B3AlumADLRFD = 'B-3.Alum.AD.LRFD'
# steel
B1Steel = 'B-1.Steel'
B2SteelA = 'B-2.Steel.A'
B2SteelDWSD = 'B-2.Steel.D.WSD'
B2SteelDLRFD = 'B-2.Steel.D.LRFD'
B2bSteel = 'B-2b.Steel'
B2cSteel = 'B-2c.Steel'
B2dSteel = 'B-2d.Steel'
B3SteelADLRFD = 'B-2.Steel.AD.LRFD'
# plastic
B1Plastic = 'B-1.Plastic'
B2Plastic = 'B-2.Plastic'
B3PlasticAGeneral = 'B-3.Plastic.A.General'
B3PlasticASmooth = 'B-3.Plastic.A.Smooth'
B3PlasticAProfile = 'B-3.Plastic.A.Profile'
B3bPlasticAProfile = 'B-3b.Plastic.A.Profile'
B3PlasticDWSD = 'B-3.Plastic.D.WSD'
B3PlasticDLRFD = 'B-3.Plastic.D.LRFD'
B4Plastic = 'B-4.Plastic'
| {
"repo_name": "Ricyteach/candemaker",
"path": "src/candemaker/cid/prefix/prefix.py",
"copies": "1",
"size": "1228",
"license": "bsd-2-clause",
"hash": 2238863906915841800,
"line_mean": 20.5438596491,
"line_max": 45,
"alpha_frac": 0.674267101,
"autogenerated": false,
"ratio": 1.7174825174825175,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.28917496184825175,
"avg_score": null,
"num_lines": null
} |
# #### Any project wide helper stuff sits here
from django.db import models
# #### In case senders account currency is diff, debit_number != credit_number, conversion will be required
# Following mapping holds conversion of 1 bitcoin to currency in key
# ideally these should come from a third party service API but due to the scope of project
# conversion rate is hardcoded with current market values (9th October, 2016)
from django.utils import timezone
CURRENCY_CONVERSION = {
"BTC": 1,
"ETH": 50.4429,
"PHP": 29848.13,
}
# #### custom Manager to return only active objects
class ActiveManager(models.Manager):
def get_queryset(self):
return super(ActiveManager, self).get_queryset().filter(is_active=True)
# #### all Models extend this class and thus have all its fields
class ModelTemplate(models.Model):
is_active = models.BooleanField(default=True)
added_dttm = models.DateTimeField(default=timezone.now, editable=False)
last_modified_dttm = models.DateTimeField(default=timezone.now)
objects = models.Manager() # The default manager.
active_objects = ActiveManager() # active objects only in queryset
class Meta:
abstract = True # making it usable only when extended
| {
"repo_name": "akash-dev-github/Transactions",
"path": "transactions/proj_utils.py",
"copies": "1",
"size": "1243",
"license": "mit",
"hash": 5905833292752801000,
"line_mean": 35.5588235294,
"line_max": 107,
"alpha_frac": 0.7304907482,
"autogenerated": false,
"ratio": 4.035714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266205033914286,
"avg_score": null,
"num_lines": null
} |
# Any recursive function can be transformed into an iterative function which
# uses an explicit stack. Here's an example:
# Basic stack class.
class Stack(object):
def __init__(self):
self.s = []
def __len__(self):
return len(self.s)
def __repr__(self):
return '[' + ', '.join([str(x) for x in self.s]) + ']'
def top(self):
return self.s[-1]
def push(self, v):
self.s.append(v)
def pop(self):
return self.s.pop()
# This is the iterative version of the function, which uses an explicit stack.
def filter_args_iter(defs):
# Helper class for holding the state at each "frame".
class Node(object):
def __init__(self, defs, x, values):
self.defs = defs
self.x = x
self.values = values
def __repr__(self):
return 'Node({0}, {1}, {2})'.format(self.defs, self.x, self.values)
r = []
s = Stack()
s.push(Node(defs, None, {}))
while s:
t = s.pop()
if t.defs:
# "Recurse" on the next possible value of the first remaining param.
p = t.defs[0]
v = None
if t.x == None:
v = p.start
else:
v = t.x + p.step
if v < p.stop:
s.push(Node(t.defs, v, t.values))
s.push(Node(t.defs[1:], None, merge_dicts(t.values, {p.name: v})))
else:
# Otherwise, we're at a leaf node so yield the values and "return".
r.append(t.values)
return r
# This is the recursive version of the function.
def filter_args(defs):
def helper(defs, values, r):
if not defs:
# All done, return this set of values
r.append(values)
else:
# Fix value of next parameter and recurse on remaining.
p = defs[0]
for x in np.arange(p.start, p.stop, p.step):
helper(defs[1:], merge_dicts(values, {p.name : x}), r)
r = []
helper(defs, {}, r)
return r
| {
"repo_name": "jcarreiro/jmc-python",
"path": "algorithms/recursion.py",
"copies": "1",
"size": "2054",
"license": "mit",
"hash": -2733920461054842400,
"line_mean": 28.3428571429,
"line_max": 82,
"alpha_frac": 0.517526777,
"autogenerated": false,
"ratio": 3.6161971830985915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4633723960098592,
"avg_score": null,
"num_lines": null
} |
"""Any shareable docstring components for rolling/expanding/ewm"""
from textwrap import dedent
from pandas.core.shared_docs import _shared_docs
_shared_docs = dict(**_shared_docs)
def create_section_header(header: str) -> str:
"""Create numpydoc section header"""
return "\n".join((header, "-" * len(header))) + "\n"
template_header = "Calculate the {window_method} {aggregation_description}.\n\n"
template_returns = dedent(
"""
Series or DataFrame
Return type is the same as the original object.\n
"""
).replace("\n", "", 1)
template_see_also = dedent(
"""
pandas.Series.{window_method} : Calling {window_method} with Series data.
pandas.DataFrame.{window_method} : Calling {window_method} with DataFrames.
pandas.Series.{agg_method} : Aggregating {agg_method} for Series.
pandas.DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n
"""
).replace("\n", "", 1)
args_compat = dedent(
"""
*args
For NumPy compatibility and will not have an effect on the result.\n
"""
).replace("\n", "", 1)
kwargs_compat = dedent(
"""
**kwargs
For NumPy compatibility and will not have an effect on the result.\n
"""
).replace("\n", "", 1)
kwargs_scipy = dedent(
"""
**kwargs
Keyword arguments to configure the ``SciPy`` weighted window type.\n
"""
).replace("\n", "", 1)
window_apply_parameters = dedent(
"""
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``. Can also accept a
Numba JIT function with ``engine='numba'`` specified.
.. versionchanged:: 1.0.0
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
engine : str, default None
* ``'cython'`` : Runs rolling apply through C-extensions from cython.
* ``'numba'`` : Runs rolling apply through JIT compiled code from numba.
Only available when ``raw`` is set to ``True``.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.0.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to both the ``func`` and the ``apply`` rolling aggregation.
.. versionadded:: 1.0.0
args : tuple, default None
Positional arguments to be passed into func.
kwargs : dict, default None
Keyword arguments to be passed into func.\n
"""
).replace("\n", "", 1)
numba_notes = (
"See :ref:`window.numba_engine` for extended documentation "
"and performance considerations for the Numba engine.\n\n"
)
window_agg_numba_parameters = dedent(
"""
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.3.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
.. versionadded:: 1.3.0\n
"""
).replace("\n", "", 1)
| {
"repo_name": "datapythonista/pandas",
"path": "pandas/core/window/doc.py",
"copies": "3",
"size": "4151",
"license": "bsd-3-clause",
"hash": -5941213573762472000,
"line_mean": 33.8823529412,
"line_max": 87,
"alpha_frac": 0.6131052758,
"autogenerated": false,
"ratio": 4.002892960462874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010171515432156274,
"num_lines": 119
} |
""" Anything to do with threading and background maintainance """
from concurrent.futures import ThreadPoolExecutor
import threading
import time
from .const import Config
from .log import l
from .hashing import int2bytes
pool = ThreadPoolExecutor(max_workers=Config.WORKERS)
class ThreadPoolMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def __init__(self):
self.idle = threading.Event()
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.idle.clear()
self.finish_request(request, client_address)
self.shutdown_request(request)
except: # noqa
l.exception("Exception in request handler")
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
self.idle.set()
def process_request(self, request, client_address):
"""Submit a new job to process the request."""
pool.submit(self.process_request_thread, request, client_address)
def run_check_firewalled(dht):
""" Refresh the buckets by finding nodes near that bucket """
def task():
""" Run the task """
try:
dht.stop.wait(Config.SLEEP_WAIT)
while dht.firewalled:
dht.boot_peer.fw_ping(dht, dht.peer.id)
l.info("Executed firewall check")
if dht.stop.wait(Config.FIREWALL_CHECK):
return
except: # noqa
l.exception("run_check_firewalled failed")
raise
finally:
l.info("run_check_firewalled ended")
t = threading.Thread(target=task)
t.setDaemon(True)
t.start()
return t
def run_bucket_refresh(dht): # noqa
""" Refresh the buckets by finding nodes near that bucket """
def refresh_bucket(x):
""" Refresh a single bucket """
id_ = int2bytes(2 ** x)
dht.iterative_find_nodes(id_)
def task():
""" Run the task """
try:
while True:
for x in range(Config.ID_BITS):
refresh_bucket(x)
l.info("Refreshed bucket %d", x)
if dht.firewalled:
f = 20
else:
f = 1
if dht.stop.wait(Config.BUCKET_REFRESH * f):
return
except: # noqa
l.exception("run_bucket_refresh failed")
raise
finally:
l.info("run_bucket_refresh ended")
t = threading.Thread(target=task)
t.setDaemon(True)
t.start()
return t
def run_rpc_cleanup(dht):
""" Remove stale RPC from rpc_states dict """
def task():
""" Run the task """
try:
while True:
dht.stop.wait(Config.RPC_TIMEOUT)
with dht.rpc_states as states:
now = time.time()
remove = []
for key in states.keys():
start = states[key][0]
if (now - start) > Config.RPC_TIMEOUT:
remove.append(key)
l.info("Found %d stale rpc states", len(remove))
for key in remove:
del states[key]
if dht.stop.is_set():
return
except: # noqa
l.exception("run_rpc_cleanup failed")
raise
finally:
l.info("run_rpc_cleanup ended")
t = threading.Thread(target=task)
t.setDaemon(True)
t.start()
return t
| {
"repo_name": "ganwell/dht3k",
"path": "dht3k/threads.py",
"copies": "1",
"size": "3870",
"license": "mit",
"hash": -6429798671365571000,
"line_mean": 28.7692307692,
"line_max": 73,
"alpha_frac": 0.5312661499,
"autogenerated": false,
"ratio": 4.271523178807947,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008170736431605996,
"num_lines": 130
} |
""" Any utility functions.
This module holds the owner data along with a handful of
command specific functions and helpers.
"""
import logging
import re
import shlex
from enum import Enum
from functools import wraps
from io import BytesIO
import aiohttp
import discord
from asyncio import subprocess as sub
member_mention_pattern = re.compile(r"<@!?(?P<id>\d+)>")
channel_mention_pattern = re.compile(r"<#(?P<id>\d+)>")
markdown_code_pattern = re.compile(r"^(?P<capt>`*)(?:[a-z]+\n)?(?P<code>.+)(?P=capt)$", flags=re.DOTALL)
http_url_pattern = re.compile(r"(?P<protocol>https?://)(?P<host>[a-z0-9-]+\.[a-z0-9-.]+/?)(?P<sub>\S+)?", flags=re.IGNORECASE)
identifier_prefix = re.compile(r"[a-zA-Z_]")
client = None # Declare the Client. For python 3.6: client: discord.Client
def set_client(c: discord.Client):
""" Assign the client to a variable. """
global client
client = c
class Annotate(Enum):
""" Command annotation enum.
Annotate a command argument with one of these to get the commented result.
"""
Content = 1 # Return all the content after command and/or arguments with Message.content
LowerContent = 2 # Same as above but returns the contents in lowercase
CleanContent = 3 # Return all the content after command and/or arguments with Message.clean_content
LowerCleanContent = 4 # Same as above but returns the contents in lowercase
User = Member = 5 # Return a member (uses utils.find_member with steps=3)
Channel = 6 # Return a channel (uses utils.find_channel with steps=3)
VoiceChannel = 7 # Return a voice channel (uses utils.find_channel with steps=3 and channel_type="voice")
Self = 8 # Used as a default for Member/Channel annotations and returns the message.author/message.channel
Code = 9 # Get formatted code (like Content but extracts any code)
def int_range(f: int=None, t: int=None):
""" Return a helper function for checking if a str converted to int is in the
specified range, f (from) - t (to).
:param f: From: where the range starts. -inf if omitted.
:param t: To: where the range ends. +inf if omitted.
"""
def wrapped(arg: str):
# Convert to int and return None if unsuccessful
try:
num = int(arg)
except ValueError:
return None
# Compare the lowest and highest numbers
if (f and num < f) or (t and num > t):
return None
# The string given is converted to a number and fits the criteria
return num
return wrapped
def choice(*options: str, ignore_case: bool=True):
""" Return a helper function for checking if the argument is either of the
given options.
:param options: Any number of strings to choose from.
:param ignore_case: Do not compare case-sensitively.
"""
def wrapped(arg: str):
# Compare lowercased version
if ignore_case:
return arg if arg.lower() in [s.lower() for s in options] else None
else:
return arg if arg in options else None
return wrapped
def placeholder(_: str):
""" Return False. Using this as a command argument annotation will always fail
the command. Useful for groups.
"""
return False
async def confirm(message: discord.Message, text: str, timeout: int=10):
""" Have the message author confirm their action. """
await client.send_message(message.channel, text + " [{}{}]".format(str(timeout) + "s " if timeout else "", "yes/no"))
reply = await client.wait_for_message(timeout, author=message.author, channel=message.channel,
check=lambda m: m.content.lower() in ("y", "yes", "n", "no"))
if reply and reply.content.lower() in ("y", "yes"):
return True
return False
def permission(*perms: str):
""" Decorator that runs the command only if the author has the specified permissions.
perms must be a string matching any property of discord.Permissions.
NOTE: this function is deprecated. Use the command 'permissions' attribute instead.
"""
def decorator(func):
@wraps(func)
async def wrapped(message: discord.Message, *args, **kwargs):
member_perms = message.author.permissions_in(message.channel)
if all(getattr(member_perms, perm, False) for perm in perms):
await func(message, *args, **kwargs)
return wrapped
return decorator
def role(*roles: str):
""" Decorator that runs the command only if the author has the specified Roles.
roles must be a string representing a role's name.
NOTE: this function is deprecated. Use the command 'roles' attribute instead.
"""
def decorator(func):
@wraps(func)
async def wrapped(message: discord.Message, *args, **kwargs):
member_roles = [r.name for r in message.author.roles[1:]]
if any(r in member_roles for r in roles):
await func(message, *args, **kwargs)
return wrapped
return decorator
async def subprocess(*args, pipe=None, carriage_return=False):
""" Run a subprocess and return the output.
:param args: Arguments to be passed to the subprocess
:param pipe: Any optional input for the stdin.
:param carriage_return: When True, carriage returns, \r, are not removed from the result.
"""
process = await sub.create_subprocess_exec(*args, stdout=sub.PIPE, stdin=sub.PIPE)
result, _ = await process.communicate(input=bytes(pipe, encoding="utf-8") if pipe else None)
result = result.decode("utf-8")
# There were some problems with the carriage_return in windows, so by default they're removed
if not carriage_return:
result = result.replace("\r", "")
return result
async def retrieve_page(url: str, head=False, call=None, headers=None, **params):
""" Download and return a website with aiohttp.
:param url: Download url as str.
:param head: Whether or not to head the function.
:param call: Any attribute coroutine to call before returning. Eg: "text" would return await response.text().
This may also be a coroutine with the response as parameter.
:param headers: A dict of any additional headers.
:param params: Any additional url parameters.
:return: The byte-like file OR whatever return value of the attribute set in call.
"""
async with aiohttp.ClientSession(loop=client.loop) as session:
coro = session.head if head else session.get
async with coro(url, params=params, headers=headers or {}) as response:
if call is not None:
if type(call) is str:
attr = getattr(response, call)
return await attr()
else:
return await call(response)
else:
return response
async def retrieve_headers(url: str, headers=None, **params):
""" Retrieve the headers from a URL.
:param url: URL as str.
:param headers: A dict of any additional headers.
:param params: Any additional url parameters.
:return: Headers as a dict.
"""
head = await retrieve_page(url, head=True, headers=headers, **params)
return head.headers
async def retrieve_html(url: str, headers=None, **params):
""" Retrieve the html from a URL.
:param url: URL as str.
:param headers: A dict of any additional headers.
:param params: Any additional url parameters.
:return: HTML as str.
"""
return await retrieve_page(url, call="text", headers=headers, **params)
async def download_file(url: str, bytesio=False, headers=None, **params):
""" Download and return a byte-like object of a file.
:param url: Download url as str.
:param bytesio: Convert this object to BytesIO before returning.
:param headers: A dict of any additional headers.
:param params: Any additional url parameters.
:return: The byte-like file.
"""
file_bytes = await retrieve_page(url, call="read", headers=headers, **params)
return BytesIO(file_bytes) if bytesio else file_bytes
async def _convert_json(response):
""" Converts the aiohttp ClientResponse object to JSON.
:param response: The ClientResponse object.
:raises: ValueError if the returned data was not of type application/json
:returns: The parsed json of the response
"""
if "Content-Type" in response.headers and "application/json" not in response.headers["Content-Type"]:
raise ValueError("The response from {} does not have application/json mimetype".format(response.url))
return await response.json()
async def download_json(url: str, headers=None, **params):
""" Download and return a json file.
:param url: Download url as str.
:param headers: A dict of any additional headers.
:param params: Any additional url parameters.
:raises: ValueError if the returned data was not of type application/json
:return: A JSON representation of the downloaded file.
"""
return await retrieve_page(url, call=_convert_json, headers=headers, **params)
def convert_image_object(image, format: str="PNG", **params):
""" Saves a PIL.Image.Image object to BytesIO buffer. Effectively
returns the byte-like object for sending through discord.Client.send_file.
:param image: PIL.Image.Image: object to convert.
:param format: The image format, defaults to PNG.
:param params: Any additional parameters sent to the writer.
:return: BytesIO: the image object in bytes.
"""
buffer = BytesIO()
image.save(buffer, format, **params)
buffer.seek(0)
return buffer
def find_member(server: discord.Server, name, steps=3, mention=True):
""" Find any member by their name or a formatted mention.
Steps define the depth at which to search. More steps equal
less accurate checks.
+--------+------------------+
| step | function |
+--------+------------------+
| 0 | perform no check |
| 1 | name is equal |
| 2 | name starts with |
| 3 | name is in |
+--------+------------------+
:param server: discord.Server to look through for members.
:param name: display_name as a string or mention to find.
:param steps: int from 0-3 to specify search depth.
:param mention: bool, check for mentions.
:return: discord.Member
"""
member = None
# Return a member from mention
found_mention = member_mention_pattern.search(name)
if found_mention and mention:
member = server.get_member(found_mention.group("id"))
return member
name = name.lower()
# Steps to check, higher values equal more fuzzy checks
checks = [lambda m: m.name.lower() == name or m.display_name.lower() == name,
lambda m: m.name.lower().startswith(name) or m.display_name.lower().startswith(name),
lambda m: name in m.display_name.lower() or name in m.name.lower()]
for i in range(steps if steps <= len(checks) else len(checks)):
member = discord.utils.find(checks[i], server.members)
if member:
break
# Return the found member or None
return member
def find_channel(server: discord.Server, name, steps=3, mention=True, channel_type="text"):
""" Find any channel by its name or a formatted mention.
Steps define the depth at which to search. More steps equal
less accurate checks.
+--------+------------------+
| step | function |
+--------+------------------+
| 0 | perform no check |
| 1 | name is equal |
| 2 | name starts with |
| 3 | name is in |
+--------+------------------+
:param server: discord.Server to look through for channels.
:param name: name as a string or mention to find.
:param steps: int from 0-3 to specify search depth.
:param mention: check for mentions.
:param channel_type: what type of channel we're looking for. Can be str or discord.ChannelType.
:return: discord.Channel
"""
channel = None
# We want to allow both str and discord.ChannelType, so try converting str and handle exceptions
if type(channel_type) is str:
try:
channel_type = getattr(discord.ChannelType, channel_type)
except AttributeError:
raise TypeError("channel_type (str) must be an attribute of discord.ChannelType")
elif type(channel_type) is not discord.ChannelType:
raise TypeError("channel_type must be discord.ChannelType or a str of a discord.ChannelType attribute")
# Return a member from mention
found_mention = channel_mention_pattern.search(name)
if found_mention and mention and channel_type is discord.ChannelType.text:
channel = server.get_channel(found_mention.group("id"))
if not channel:
# Steps to check, higher values equal more fuzzy checks
checks = [lambda c: c.name.lower() == name.lower() and c.type is channel_type,
lambda c: c.name.lower().startswith(name.lower()) and c.type is channel_type,
lambda c: name.lower() in c.name.lower() and c.type is channel_type]
for i in range(steps if steps <= len(checks) else len(checks)):
channel = discord.utils.find(checks[i], server.channels)
if channel:
break
# Return the found channel or None
return channel
def format_exception(e: Exception):
""" Returns a formatted string as Exception: e """
return type(e).__name__ + ": " + str(e)
def format_syntax_error(e: Exception):
""" Returns a formatted string of a SyntaxError.
Stolen from https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py#L24-L25
"""
return "{0.text}\n{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__).replace("\n\n", "\n")
def format_objects(*objects, attr=None, dec: str="", sep: str=None):
""" Return a formatted string of objects (User, Member, Channel or Server) using
the given decorator and the given separator.
:param objects: Any object with attributes, preferably User, Member, Channel or Server.
:param attr: The attribute to get from any object. Defaults to object names.
:param dec: String to decorate around each object.
:param sep: Separator between each argument.
:return: str: the formatted objects.
"""
if not objects:
return
first_object = objects[0]
if attr is None:
if isinstance(first_object, discord.User):
attr = "display_name"
elif isinstance(first_object, discord.Channel) or isinstance(first_object, discord.Role):
attr = "mention"
sep = " "
elif isinstance(first_object, discord.Server):
attr = "name"
sep = sep if sep is not None else ", "
return sep.join(dec + getattr(m, attr) + dec for m in objects)
def get_formatted_code(code: str):
""" Format code from markdown format. This will filter out markdown code
and give the executable python code, or raise an exception.
:param code: Code formatted in markdown.
:return: str: Code.
"""
code = code.strip(" \n")
match = markdown_code_pattern.match(code)
if match:
code = match.group("code")
# Try finding the code via match, and make sure it wasn't somehow corrupt before returning
if not code == "`":
return code
raise Exception("Could not format code.")
def format_code(code: str, language: str=None, *, simple: bool=False):
""" Format markdown code.
:param code: Code formatted in markdown.
:param language: Optional syntax highlighting language.
:param simple: Use single quotes, e.g `"Hello!"`
:return: str of markdown code.
"""
if simple:
return "`{}`".format(code)
else:
return "```{}\n{}```".format(language or "", code)
async def convert_to_embed(text: str, *, author: discord.Member=None, **kwargs):
""" Convert text to an embed, where urls will be embedded if the url is an image.
:param text: str to convert.
:param author: Additionally format an author.
:param kwargs: Any kwargs to be passed to discord.Embed's init function.
"""
embed = discord.Embed(**kwargs)
url = embed.Empty
# Find the first url or None
for word in text.split():
url_match = http_url_pattern.match(word)
# Handle urls
if url_match:
url = url_match.group(0)
headers = await retrieve_headers(url)
# Remove the url from the text and use it as a description
text = text.replace(url, "")
embed.description = text or None
# If the url is an image, embed it
if "Content-Type" in headers and "image" in headers["Content-Type"]:
embed.set_image(url=url)
# If the embed isn't an image, we'll just use it as the embed url
else:
embed.url = url
break
else:
embed.description = text
# Set the author if given
if author:
embed.set_author(name=author.display_name, icon_url=author.avatar_url, url=url)
return embed
def text_to_emoji(text: str):
""" Convert text to a string of regional emoji.
Text must only contain characters in the alphabet from A-Z.
:param text: text of characters in the alphabet from A-Z.
:return: str: formatted emoji unicode.
"""
regional_offset = 127397 # This number + capital letter = regional letter
return "".join(chr(ord(c) + regional_offset) for c in text.upper())
def split(text: str, maxsplit: int=-1):
""" Split a string with shlex when possible, and add support for maxsplit.
:param text: Text to split.
:param maxsplit: Number of times to split. The rest is returned without splitting.
:return: list: split text.
"""
# Generate a shlex object for eventually splitting manually
split_object = shlex.shlex(text, posix=True)
split_object.quotes = '"`'
split_object.whitespace_split = True
split_object.commenters = ""
# When the maxsplit is disabled, return the entire split object
if maxsplit == -1:
try:
return list(split_object)
except ValueError: # If there is a problem with quotes, use the regular split method
return text.split()
# Create a list for the following split keywords
maxsplit_object = []
splits = 0
# Split until we've reached the limit
while splits < maxsplit:
maxsplit_object.append(next(split_object))
splits += 1
# Add any following text without splitting
maxsplit_object.append(split_object.instream.read())
return maxsplit_object
| {
"repo_name": "PcBoy111/PC-BOT-V2",
"path": "pcbot/utils.py",
"copies": "1",
"size": "18822",
"license": "mit",
"hash": 3157133388312525000,
"line_mean": 35.2658959538,
"line_max": 126,
"alpha_frac": 0.6453618106,
"autogenerated": false,
"ratio": 4.022654413336183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026056890390377284,
"num_lines": 519
} |
"""AoC 2015.01 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def chars(file, chunkSize=4096):
"""
Take a file object, read it in chuncks and iterate over it one character at a time.
Keyword arguments:
file --- a file object to iterate over
chunkSize --- buffer size for file reads (default=4096)
"""
chunk = file.read(chunkSize)
while chunk:
for char in chunk:
yield char
chunk = file.read(chunkSize)
def solver(file):
"""
Take a file object with input and solve AoC 2015.01 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
level = 0
step = 0
basement = 0
for instruction in chars(file):
if instruction == "(":
step += 1
level += 1
elif instruction == ")":
step += 1
level -= 1
if not basement and level == -1:
basement = step
return (level, basement)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Final level is {}".format(solution[0]))
if solution[1]:
print("Part B: First entered basement on step {}".format(solution[1]))
else:
print("Part B: Never entered basement")
| {
"repo_name": "kav2k/AoC",
"path": "2015/01/solver.py",
"copies": "1",
"size": "1214",
"license": "mit",
"hash": -8682508721760082000,
"line_mean": 19.9310344828,
"line_max": 85,
"alpha_frac": 0.6276771005,
"autogenerated": false,
"ratio": 3.6787878787878787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4806464979287879,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.02 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def parseBox(line):
"""
Take a line of the form "NxNxN" and parse it into a sorted tuple (length, width, height).
Keyword arguments:
line --- a string to be parsed as box specifications
"""
dimensions = sorted(map(int, line.split("x")))
return (dimensions[2], dimensions[1], dimensions[0])
def solver(file):
"""
Take a file object with input and solve AoC 2015.02 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
paper = 0
ribbon = 0
for line in file:
(length, width, height) = parseBox(line)
paper += 2 * length * width + 2 * length * height + 3 * width * height
ribbon += 2 * (width + height) + (length * width * height)
return (paper, ribbon)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Total wrapping paper required: {} square feet.".format(solution[0]))
print("Part B: Total ribbon required: {} feet.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/02/solver.py",
"copies": "1",
"size": "1074",
"license": "mit",
"hash": 5000308988695905000,
"line_mean": 23.976744186,
"line_max": 91,
"alpha_frac": 0.6461824953,
"autogenerated": false,
"ratio": 3.4757281553398056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46219106506398056,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.03 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def chars(file, chunkSize=4096):
"""
Take a file object, read it in chuncks and iterate over it one character at a time.
Keyword arguments:
file --- a file object to iterate over
chunkSize --- buffer size for file reads (default=4096)
"""
chunk = file.read(chunkSize)
while chunk:
for char in chunk:
yield char
chunk = file.read(chunkSize)
def move(position, instruction):
"""
Take a position and offset it based on instuction, or raise an error on invalid instruction.
Keyword arguments:
position --- current position as a tuple (x,y)
Instruction --- single-character instruction to move in ["^", "v", ">", "<"]
"""
if instruction == "^":
return (position[0], position[1] + 1)
elif instruction == "v":
return (position[0], position[1] - 1)
elif instruction == ">":
return (position[0] + 1, position[1])
elif instruction == "<":
return (position[0] - 1, position[1])
else:
raise ValueError("Instruction '{}' not recognized".format(instruction))
def solver(file):
"""
Take a file object with input and solve AoC 2015.03 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
alone = set([(0, 0)])
alone_position = (0, 0)
together = set([(0, 0)])
santa_position = (0, 0)
robot_position = (0, 0)
robot_turn = False
for instruction in chars(file):
alone_position = move(alone_position, instruction)
alone.add(alone_position)
if robot_turn:
robot_position = move(robot_position, instruction)
together.add(robot_position)
else:
santa_position = move(santa_position, instruction)
together.add(santa_position)
robot_turn = not robot_turn
return (len(alone), len(together))
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Santa alone will deliver presents to {} houses.".format(solution[0]))
print("Part B: Santa and Robo-Santa will deliver presents to {} houses.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/03/solver.py",
"copies": "1",
"size": "2120",
"license": "mit",
"hash": 162078325013108350,
"line_mean": 25.5,
"line_max": 95,
"alpha_frac": 0.654245283,
"autogenerated": false,
"ratio": 3.6177474402730376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47719927232730375,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.04 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
from hashlib import md5
def solver(file, progress=True, progress_step=10000):
"""
Take a file object with input and solve AoC 2015.04 problem on the input.
Outputs progress to STDERR unless silenced.
Keyword arguments:
file --- a file object to read input from
progress --- boolean, whether to output progress to stderr (default=True)
progress_step --- integer, steps to output as progress (default=10000)
"""
key = file.readline()
solution_five = -1
solution_six = -1
index = 0
while True:
string = (key + str(index)).encode('ascii')
hashed = md5(string).hexdigest()
if (solution_five < 0 and hashed[0:5] == "00000"):
solution_five = index
if progress:
print("Found solution for 5", file=sys.stderr)
if (solution_six < 0 and hashed[0:6] == "000000"):
solution_six = index
if progress:
print("Found solution for 6", file=sys.stderr)
if (solution_five >= 0 and solution_six >= 0):
return (solution_five, solution_six)
if(progress and index > 0 and (index % progress_step == 0)):
sys.stderr.write("\rHashed up to {}.. ".format(index))
index += 1
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Smallest AdventCoin solution for 5 is {}.".format(solution[0]))
print("Part B: Smallest AdventCoin solution for 6 is {}.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/04/solver.py",
"copies": "1",
"size": "1509",
"license": "mit",
"hash": 5701901275762493000,
"line_mean": 25.9464285714,
"line_max": 80,
"alpha_frac": 0.6441351889,
"autogenerated": false,
"ratio": 3.6014319809069213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47455671698069213,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.05 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
import re
def nice1(string):
"""Check a string for being nice under 'old' rules."""
vowels = 0
twice = False
last = ""
for character in string:
if character in ["a", "e", "i", "o", "u"]:
vowels += 1
if last == character:
twice = True
if (last + character) in ["ab", "cd", "pq", "xy"]:
return False
last = character
return (vowels >= 3 and twice)
def nice2(string):
"""Check a string for being nice under 'new' rules."""
return re.search(r"(..).*\1", string) and re.search(r"(.).\1", string)
def solver(file):
"""
Take a file object with input and solve AoC 2015.05 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
nice_a = 0
nice_b = 0
for string in file:
if nice1(string):
nice_a += 1
if nice2(string):
nice_b += 1
return (nice_a, nice_b)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: {} string(s) are nice.".format(solution[0]))
print("Part B: {} string(s) are nice under new rules.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/05/solver.py",
"copies": "1",
"size": "1197",
"license": "mit",
"hash": 5603042090004325000,
"line_mean": 20,
"line_max": 77,
"alpha_frac": 0.5939849624,
"autogenerated": false,
"ratio": 3.2615803814713895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4355565343871389,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.06 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
import re
GRID_SIZE = 1000
def execute(lights, instruction):
"""
Take an instruction as a string and apply it to a 2D array of boolean lights.
Modifies the array in-place.
Keyword arguments:
lights --- a flattened 2D array of boolean light states
instruction --- an instruction formatted according to the challenge rules
"""
(command, x1, y1, x2, y2) = re.match("(.+) (\d+),(\d+) through (\d+),(\d+)", instruction).groups()
[x1, y1, x2, y2] = map(int, [x1, y1, x2, y2])
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
if command == "turn on":
lights[x + GRID_SIZE * y] = True
elif command == "turn off":
lights[x + GRID_SIZE * y] = False
elif command == "toggle":
lights[x + GRID_SIZE * y] = not lights[x + GRID_SIZE * y]
def execute_bright(lights, instruction):
"""
Take an instruction as a string and apply it to a 2D array of brightness-enabled lights.
Modifies the array in-place.
Keyword arguments:
lights --- a flattened 2D array of boolean light states
instruction --- an instruction formatted according to the challenge rules
"""
(command, x1, y1, x2, y2) = re.match("(.+) (\d+),(\d+) through (\d+),(\d+)", instruction).groups()
[x1, y1, x2, y2] = map(int, [x1, y1, x2, y2])
for y in range(y1, y2 + 1):
for x in range(x1, x2 + 1):
if command == "turn on":
lights[x + GRID_SIZE * y] += 1
elif command == "turn off" and lights[x + GRID_SIZE * y]:
lights[x + GRID_SIZE * y] -= 1
elif command == "toggle":
lights[x + GRID_SIZE * y] += 2
def solver(file, progress=True):
"""
Take a file object with input and solve AoC 2015.06 problem on the input.
Outputs progress to STDERR unless silenced.
Keyword arguments:
file --- a file object to read input from
progress --- boolean, whether to output progress to stderr (default=True)
"""
number_lit = 0
total_brightness = 0
lights = [False for _ in range(0, GRID_SIZE * GRID_SIZE)]
bright_lights = [0 for _ in range(0, GRID_SIZE * GRID_SIZE)]
counter = 0
for instruction in file:
execute(lights, instruction)
execute_bright(bright_lights, instruction)
if progress:
counter += 1
sys.stderr.write("\rProcessed {:03d} instructions..".format(counter))
if progress:
sys.stderr.write("\n")
for index in range(0, GRID_SIZE * GRID_SIZE):
if lights[index]:
number_lit += 1
total_brightness += bright_lights[index]
return (number_lit, total_brightness)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: After following instructions, {} lights are lit.".format(solution[0]))
print("Part B: After following instructions, total brightness is {}.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/06/solver.py",
"copies": "1",
"size": "2900",
"license": "mit",
"hash": 9685573776732788,
"line_mean": 28.5918367347,
"line_max": 100,
"alpha_frac": 0.6279310345,
"autogenerated": false,
"ratio": 3.2805429864253393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4408474020925339,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.07 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
import re
def parse(instruction):
"""
Take an instruction as a string and return a tuple (output wire, input specification).
Example input specifications:
* ("ID", "6") for constant input
* ("ID", "a") for direct wire input "a"
* ("NOT", "ab") for negated wire "ab"
* ("AND", "76", "xy") for bitwise-AND between constant and wire "ab"
Keyword arguments:
instruction --- an instruction formatted according to the challenge rules
"""
match = re.search("^(.*) -> ([a-z]+)$", instruction)
if match:
(input_expression, output_wire) = match.groups()
if " " not in input_expression:
input_command = ("ID", input_expression)
elif "NOT" in input_expression:
submatch = re.search(r"^NOT (\w+)$", input_expression)
if submatch:
input_command = ("NOT", submatch.group(1))
else:
raise ValueError("Illegal instruction:", instruction)
else:
submatch = re.search(r"^(\w+) ([A-Z]+) (\w+)$", input_expression)
if submatch:
input_command = (submatch.group(2), submatch.group(1), submatch.group(3))
else:
raise ValueError("Illegal instruction:", instruction)
return (output_wire, input_command)
else:
raise ValueError("Illegal instruction:", instruction)
def compute(wire_id, wire_specs):
"""
Take a wire identifier and compute its output according to wire specification.
Will overwrite specifications with computed values as caching.
Keyword arguments:
wire_id --- string, a wire identifier
wire_specs --- dictionary, mapping output wire to input specification
"""
if wire_id.isdecimal():
return int(wire_id)
if isinstance(wire_specs[wire_id], int):
return wire_specs[wire_id]
else:
command = wire_specs[wire_id][0]
gate_input1 = compute(wire_specs[wire_id][1], wire_specs)
if len(wire_specs[wire_id]) == 3:
gate_input2 = compute(wire_specs[wire_id][2], wire_specs)
if command == "ID":
gate_output = gate_input1
elif command == "NOT":
gate_output = ~ gate_input1 & (2**16 - 1)
elif command == "AND":
gate_output = gate_input1 & gate_input2
elif command == "OR":
gate_output = gate_input1 | gate_input2
elif command == "LSHIFT":
gate_output = (gate_input1 << gate_input2) & (2**16 - 1)
elif command == "RSHIFT":
gate_output = (gate_input1 >> gate_input2) & (2**16 - 1)
wire_specs[wire_id] = gate_output # Cache the result by overwriting spec with value
return gate_output
def solver(file):
"""
Take a file object with input and solve AoC 2015.07 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
wire_specs_a = {}
wire_specs_b = {}
for instruction in file:
(output_id, input_spec) = parse(instruction)
wire_specs_a[output_id] = input_spec
wire_specs_b[output_id] = input_spec
output_a = compute("a", wire_specs_a)
wire_specs_b["b"] = output_a # Overwrite spec with value
output_b = compute("a", wire_specs_b)
return (output_a, output_b)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Output on wire 'a' is {}.".format(solution[0]))
print("Part B: Output on wire 'a' after modification is {}.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/07/solver.py",
"copies": "1",
"size": "3381",
"license": "mit",
"hash": -3098581805348479000,
"line_mean": 29.1875,
"line_max": 88,
"alpha_frac": 0.6388642413,
"autogenerated": false,
"ratio": 3.503626943005181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.956502887030874,
"avg_score": 0.015492462799288061,
"num_lines": 112
} |
"""AoC 2015.08 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def unescape(string):
"""
Take a string according to challenge specifications, and perform unescape.
Keyword arguments:
string --- a string to unescape
"""
result = ""
index = 0
while index < len(string):
if string[index] == "\\":
if string[index + 1] == "\\":
result += "\\"
index += 2
elif string[index + 1] == "\"":
result += "\""
index += 2
elif string[index + 1] == "x":
result += chr(int(string[index + 2: index + 4], base=16))
index += 4
else:
raise ValueError("Unexpected escape string:", "\\" + string[index + 1])
else:
result += string[index]
index += 1
return result
def escape(string):
"""
Take a string according to challenge specifications, and perform escape.
Keyword arguments:
string --- a string to escape
"""
result = ""
for character in string:
if character == "\"":
result += "\\\""
elif character == "\\":
result += "\\\\"
else:
result += character
return result
def solver(file):
"""
Take a file object with input and solve AoC 2015.08 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
code_length = 0
unescaped_length = 0
escaped_length = 0
for line in file:
code_length += len(line)
unescaped_length += len(unescape(line[1:-1]))
escaped_length += len(escape(line)) + 2
return (code_length - unescaped_length, escaped_length - code_length)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Length difference between code and data is {} characters.".format(solution[0]))
print("Part B: Length difference between escaped code and code is {} characters.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/08/solver.py",
"copies": "1",
"size": "1892",
"license": "mit",
"hash": -5207844015329671000,
"line_mean": 22.3580246914,
"line_max": 104,
"alpha_frac": 0.5998942918,
"autogenerated": false,
"ratio": 3.917184265010352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5017078556810353,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.09 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
from math import factorial
import re
def permute(list, index):
"""
Return index-th permutation of a list.
Keyword arguments:
list --- the list to be permuted
index --- index in range (0, factorial(len(list)))
"""
if len(list) < 2:
return list
(rest, choice) = divmod(index, len(list))
return [list[choice]] + permute(list[0:choice] + list[choice + 1:len(list)], rest)
def distance(route, graph):
"""
Calculate total distance for a given route through a graph.
Keyword arguments:
route --- a list of node names that form the route
graph --- a dictionary mapping node name pairs to distance
"""
length = 0
for i in range(1, len(route)):
length += graph[(route[i - 1], route[i])]
return length
def solver(file):
"""
Take a file object with input and solve AoC 2015.09 problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
graph = {}
places = set()
for line in file:
(start, end, length) = re.match(r"(\w+) to (\w+) = (\d+)", line).groups()
places.add(start)
places.add(end)
graph[(start, end)] = int(length)
graph[(end, start)] = int(length)
places = list(places)
min_length = 100000000
max_length = 0
min_route = []
max_route = []
for route in [permute(places, index) for index in range(0, factorial(len(places)))]:
length = distance(route, graph)
if min_length > length:
min_length = length
min_route = route
if max_length < length:
max_length = length
max_route = route
return ((min_length, "->".join(min_route)), (max_length, "->".join(max_route)))
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: The length of minimal route {1} is {0}.".format(*solution[0]))
print("Part B: The length of maximal route {1} is {0}.".format(*solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/09/solver.py",
"copies": "1",
"size": "1966",
"license": "mit",
"hash": 289006748307965630,
"line_mean": 24.2051282051,
"line_max": 86,
"alpha_frac": 0.6307222787,
"autogenerated": false,
"ratio": 3.4131944444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45439167231444444,
"avg_score": null,
"num_lines": null
} |
"""AoC 2015.10 problem solver.
Takes input from STDIN by default.
(c) Alexander Kashev, 2017
"""
import sys
def look_and_say(string):
"""
Expand a string of digits according to look-and-say process in the problem.
Keyword arguments:
string --- a string to be processed
"""
result = ""
last = ""
count = 0
for digit in string:
if digit != last:
if(count):
result += str(count) + last
last = digit
count = 1
else:
count += 1
if(count):
result += str(count) + last
return result
def solver(file, progress=True):
"""
Take a file object with input and solve AoC 2015.10 problem on the input.
Outputs progress to STDERR unless silenced.
Keyword arguments:
file --- a file object to read input from
progress --- boolean, whether to output progress to STDERR
"""
seed = file.readline()
result = seed
for i in range(0, 50):
result = look_and_say(result)
if i == 39:
result40 = result
if progress:
sys.stderr.write("\rProcessed {}/{} steps..".format(i + 1, 50))
if progress:
sys.stderr.write("\n")
return (len(result40), len(result))
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Length after 40 steps is {}.".format(solution[0]))
print("Part B: Length after 50 steps is {}.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2015/10/solver.py",
"copies": "1",
"size": "1370",
"license": "mit",
"hash": -9060703970998854000,
"line_mean": 19.7575757576,
"line_max": 77,
"alpha_frac": 0.6175182482,
"autogenerated": false,
"ratio": 3.624338624338624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.963771323830169,
"avg_score": 0.02082872684738703,
"num_lines": 66
} |
"""AoC 2018.01 problem solver.
Takes input from STDIN by default.
"""
import sys
def solver(file):
"""
Take a file object with input and solve AoC 2015.XX problem on the input.
Keyword arguments:
file --- a file object to read input from
"""
input = list(map(str.strip, file.readlines()))
answer_a = 0
answer_b = ""
twice = 0
thrice = 0
for id in input:
id_twice = 0
id_thrice = 0
frequencies = {}
for letter in id:
if letter in frequencies:
frequencies[letter] += 1
else:
frequencies[letter] = 1
for freq in frequencies:
if frequencies[freq] == 2:
id_twice = 1
elif frequencies[freq] == 3:
id_thrice = 1
twice += id_twice
thrice += id_thrice
answer_a = twice * thrice
def find_b():
answer = ""
for i, id in enumerate(input):
for j in range(i):
count = 0
for k, letter in enumerate(id):
if input[j][k] != letter:
count += 1
if count == 1:
for k, letter in enumerate(id):
if input[j][k] == letter:
answer += letter
return answer
answer_b = find_b()
return (answer_a, answer_b)
if __name__ == "__main__":
solution = solver(sys.stdin)
print("Part A: Solution is {}.".format(solution[0]))
print("Part B: Solution is {}.".format(solution[1]))
| {
"repo_name": "kav2k/AoC",
"path": "2018/02/solver.py",
"copies": "1",
"size": "1397",
"license": "mit",
"hash": 7347573241521392000,
"line_mean": 19.8507462687,
"line_max": 75,
"alpha_frac": 0.5526127416,
"autogenerated": false,
"ratio": 3.582051282051282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9482727799891248,
"avg_score": 0.03038724475200686,
"num_lines": 67
} |
"""aoc_common
Common utility functions for Advent of Code solutions
"""
from __future__ import annotations
import pathlib
from itertools import islice, repeat
from typing import Iterable, TypeVar
def load_puzzle_input(day: int) -> str:
"""Return the puzzle input for the day’s puzzle"""
input_directory = pathlib.Path(__file__).parent.with_name("input")
year = input_directory.parent.name
input_filename = f"{year}-{day:02}.txt"
return input_directory.joinpath(input_filename).read_text()
def report_solution(
*,
puzzle_title: str,
part_one_solution: int | str,
part_two_solution: int | str | None = None,
) -> None:
print(puzzle_title)
print("=" * len(puzzle_title))
print(f"Part one solution: {part_one_solution}")
if part_two_solution is not None:
print(f"Part two solution: {part_two_solution}")
def split_number_by_places(number: int) -> list[int]:
places = []
while number:
places.append(number % 10)
number //= 10
return list(reversed(places))
class Sentinel:
pass
T = TypeVar("T")
def chunked(
iterable: Iterable[T], count: int, *, fill: T | Sentinel = Sentinel()
) -> Iterable[list[T]]:
"""Yield count-long chunks from iterable.
If the length of the iterable is not a multiple of count,
the final chunk will be short, unless `fill` is provided
as a keyword argument, in which case the final chunk will
be filled with the given value.
"""
iterator = iter(iterable)
while True:
chunk = list(islice(iterator, count))
if not chunk:
return
if len(chunk) < count and not isinstance(fill, Sentinel):
chunk += repeat(fill, count - len(chunk))
yield chunk
| {
"repo_name": "robjwells/adventofcode-solutions",
"path": "2016/python/aoc_common.py",
"copies": "1",
"size": "1763",
"license": "mit",
"hash": 4749158148049386000,
"line_mean": 26.0923076923,
"line_max": 73,
"alpha_frac": 0.6456558773,
"autogenerated": false,
"ratio": 3.6459627329192545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47916186102192543,
"avg_score": null,
"num_lines": null
} |
"""aoc_common
Common utility functions for Advent of Code solutions
"""
import pathlib
from itertools import islice, repeat
from typing import Iterable, Iterator, List, Optional, TypeVar, Union
def load_puzzle_input(day: int) -> str:
"""Return the puzzle input for the day’s puzzle"""
input_directory = pathlib.Path(__file__).parent.with_name("input")
year = input_directory.parent.name
input_filename = f"{year}-{day:02}.txt"
return input_directory.joinpath(input_filename).read_text()
def report_solution(
*,
puzzle_title: str,
part_one_solution: Union[int, str],
part_two_solution: Optional[Union[int, str]] = None,
) -> None:
print(puzzle_title)
print("=" * len(puzzle_title))
print(f"Part one solution: {part_one_solution}")
if part_two_solution is not None:
print(f"Part two solution: {part_two_solution}")
def split_number_by_places(number: int) -> List[int]:
places = []
while number:
places.append(number % 10)
number //= 10
return list(reversed(places))
class Sentinel:
pass
T = TypeVar("T")
def chunked(
iterable: Iterable[T], count: int, *, fill: Union[T, Sentinel] = Sentinel()
) -> Iterable[List[T]]:
"""Yield count-long chunks from iterable.
If the length of the iterable is not a multiple of count,
the final chunk will be short, unless `fill` is provided
as a keyword argument, in which case the final chunk will
be filled with the given value.
"""
iterator = iter(iterable)
while True:
chunk = list(islice(iterator, count))
if not chunk:
return
if len(chunk) < count and not isinstance(fill, Sentinel):
chunk += repeat(fill, count - len(chunk))
yield chunk
| {
"repo_name": "robjwells/adventofcode-solutions",
"path": "2019/python/aoc_common.py",
"copies": "1",
"size": "1782",
"license": "mit",
"hash": -9144941585234930000,
"line_mean": 26.8125,
"line_max": 79,
"alpha_frac": 0.6483146067,
"autogenerated": false,
"ratio": 3.6178861788617884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9766200785561788,
"avg_score": 0,
"num_lines": 64
} |
"""AoC Day 10
Usage:
day10.py <filename>
day10.py (-h | --help)
day10.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from collections import defaultdict
class Instruction:
def __init__(self, from_bot, low_type, low_id, high_type, high_id):
self.bot = from_bot
self.low_type = low_type
self.low_id = low_id
self.high_type = high_type
self.high_id = high_id
def __str__(self):
low = self.low_type + " " + str(self.low_id)
high =self.high_type + " " + str(self.high_id)
return str(self.bot) + ":" + low + "/" + high
def assign(self, receptacle, key, micro):
if key in receptacle.keys():
receptacle[key].append(micro)
else:
receptacle[key] = [micro]
def resolve(self, bots, output):
low_micro = bots[self.bot].pop(0)
high_micro = bots[self.bot].pop()
if len(bots[self.bot]) == 0:
bots.pop(self.bot, None)
if low_micro == 17 and high_micro == 61:
print "COMPARE: Bot = " + str(self.bot)
if self.low_type == "output":
self.assign(output, self.low_id, low_micro)
else:
self.assign(bots, self.low_id, low_micro)
if self.high_type == "output":
self.assign(output, self.high_id, high_micro)
else:
self.assign(bots, self.high_id, high_micro)
def process_instructions(bots, instructions, output):
i = 0
while i < len(instructions):
bot_id = instructions[i].bot
if bot_id in bots.keys() and len(bots[bot_id]) >= 2:
bots[bot_id].sort()
instructions[i].resolve(bots, output)
instructions.pop(i)
i = 0
else:
i += 1
def parse_instructions(filename):
fn = open(filename, 'r')
bots = {}
instructions = []
output = {}
for l in fn:
tokens = l.rstrip().split()
if tokens[0] == "value":
bot_id = int(tokens[5])
value = int(tokens[1])
if bot_id in bots.keys():
bots[bot_id].append(value)
else:
bots[bot_id] = [value]
bots[bot_id].sort()
else:
i = Instruction(int(tokens[1]), tokens[5], int(tokens[6]), tokens[10], int(tokens[11]))
instructions.append(i)
process_instructions(bots, instructions, output)
while len(instructions):
process_instructions(bots, instructions, output)
return (bots, instructions)
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
(bots, instructions) = parse_instructions(arguments["<filename>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day10/day10.py",
"copies": "1",
"size": "2752",
"license": "mit",
"hash": -1049943730662482800,
"line_mean": 26.797979798,
"line_max": 99,
"alpha_frac": 0.5439680233,
"autogenerated": false,
"ratio": 3.4923857868020303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.453635381010203,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 11
Usage:
day11.py
day11.py (-h | --help)
day11.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import itertools
import copy
SG = 0
SM = 1
PG = 2
PM = 3
TG = 4
TM = 5
RG = 6
RM = 7
CG = 8
CM = 9
EG = 10
EM = 11
DG = 12
DM = 13
initial_floor_plan = [ [SG, SM, PG, PM],
[CM, CG, TG, RG, RM],
[TM],
[]
]
second_floor_plan = [ [EG, EM, DG, DM, SG, SM, PG, PM],
[CM, CG, TG, RG, RM],
[TM],
[]
]
class Devices:
def compatible_generator(self,x):
return x - 1
def is_microchip(self,x):
if (x % 2) == 1:
return True
return False
def is_generator(self,x):
return not self.is_microchip(x)
def is_pair(self,x, y):
if x == y - 1 and self.is_generator(x):
return True
return False
def num_generators(self,floor):
num = 0
for d in floor:
if self.is_generator(d):
num += 1
return num
def floors_are_valid_configs(self,floors):
valid = True
for f in floors:
num_generators = self.num_generators(f)
if num_generators != 0:
for d in f:
if self.is_microchip(d) and self.compatible_generator(d) not in f:
valid = False
break
if not valid:
break
return valid
def enumerate_all_moves(floor_plan, location):
new_floor = [location - 1, location + 1]
moves = {}
for f in new_floor:
if f < 0 or f >= len(floor_plan):
continue
moves[f] = []
for (i,j) in itertools.combinations(floor_plan[location], 2):
moved_devices = copy.deepcopy(floor_plan)
moved_devices[location].remove(i)
moved_devices[location].remove(j)
moved_devices[f].append(i)
moved_devices[f].append(j)
moves[f].append(moved_devices)
if len(moves[f]) == 0 or f == location - 1:
for i in floor_plan[location]:
moved_devices = copy.deepcopy(floor_plan)
moved_devices[location].remove(i)
moved_devices[f].append(i)
moves[f].append(moved_devices)
return moves
def current_state_hash(floor_plan, floor):
for f in floor_plan:
f.sort()
modified_floor_plan = copy.deepcopy(floor_plan)
d = Devices()
for f in modified_floor_plan:
if len(f) > 1:
i = 0
while i < len(f)-1:
if d.is_pair(f[i], f[i+1]):
f[i] = 'P'
f[i+1] = 'P'
i += 2
else:
i += 1
f.sort()
return hash(str(modified_floor_plan) + str(floor))
# Contains floor plan, current floor, moves
def min_steps(plan, num):
bfs = []
visited_states = set()
d = Devices()
bfs = [ (plan, 0, 0) ]
visited_states.add(current_state_hash(initial_floor_plan, 0))
while len(bfs):
(floor_plan, iterations, location) = bfs.pop(0)
if len(floor_plan[3]) == num:
print iterations
print floor_plan
break
moves = enumerate_all_moves(floor_plan, location)
for e,possible in moves.iteritems():
for m in possible:
if d.floors_are_valid_configs(m):
state = current_state_hash(m, e)
if state not in visited_states:
visited_states.add(state)
bfs.append( (m, iterations + 1, e) )
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
min_steps(initial_floor_plan, 10)
min_steps(second_floor_plan, 14)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day11/day11.py",
"copies": "1",
"size": "3997",
"license": "mit",
"hash": -6180672908468982000,
"line_mean": 23.8260869565,
"line_max": 86,
"alpha_frac": 0.4843632725,
"autogenerated": false,
"ratio": 3.630336058128974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9574645026363486,
"avg_score": 0.00801086085309771,
"num_lines": 161
} |
"""AoC Day 12
Usage:
day12.py <filename>
day12.py (-h | --help)
day12.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
class Instruction:
COPY = 0
INC = 1
DEC = 2
JNZ = 3
def __init__(self, instruction_string):
inst = instruction_string.rstrip().split()
self.instruction_string = instruction_string.rstrip()
self.value = 0
self.register = 0
if inst[0] == "cpy":
self.instruction = Instruction.COPY
self.value = inst[1]
self.register = inst[2]
elif inst[0] == "inc":
self.instruction = Instruction.INC
self.register = inst[1]
elif inst[0] == "dec":
self.instruction = Instruction.DEC
self.register = inst[1]
elif inst[0] == "jnz":
self.instruction = Instruction.JNZ
self.register = inst[1]
self.value = int(inst[2])
else:
print "ERROR: unrecognized instruction: " + inst[0]
def __str__(self):
return self.instruction_string
def decoded(self):
return (self.instruction, self.register, self.value)
class Registers:
def __init__(self, a, b, c, d):
self.registers = {'a':a, 'b':b, 'c':c, 'd':d}
def process_instruction(self, instruction, pc):
if instruction[0] == Instruction.COPY:
if instruction[2] in self.registers.keys():
self.registers[instruction[1]] = self.registers[instruction[2]]
else:
self.registers[instruction[1]] = int(instruction[2])
pc += 1
elif instruction[0] == Instruction.INC:
self.registers[instruction[1]] += 1
pc += 1
elif instruction[0] == Instruction.DEC:
self.registers[instruction[1]] -= 1
pc += 1
elif instruction[0] == Instruction.JNZ:
value = int(instruction[1], 16)
if instruction[1] in self.registers.keys():
value = self.registers[instruction[1]]
if value != 0:
pc += instruction[2]
else:
pc += 1
return pc
def __str__(self):
return str(self.registers)
def parse_instructions(filename):
fn = open(filename, 'r')
inst = []
for l in fn:
i = Instruction(l.rstrip())
inst.append(i)
pc = 0
r = Registers(0,0,0,0)
while pc < len(inst):
pc = r.process_instruction(inst[pc].decoded(), pc)
print r
pc = 0
r = Registers(0,0,1,0)
while pc < len(inst):
pc = r.process_instruction(inst[pc].decoded(), pc)
print r
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
parse_instructions(arguments["<filename>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day12/day12.py",
"copies": "1",
"size": "2852",
"license": "mit",
"hash": -5091384569756763000,
"line_mean": 25.9056603774,
"line_max": 79,
"alpha_frac": 0.5354137447,
"autogenerated": false,
"ratio": 3.762532981530343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9764275213931283,
"avg_score": 0.006734302459812128,
"num_lines": 106
} |
"""AoC Day 13
Usage:
day13.py
day13.py (-h | --help)
day13.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import numpy as np
WALL = '#'
OPEN = ' '
def count_bits(num):
count = 0
while(num):
num &= num - 1
count += 1
return(count)
def is_wall_per_equation(row, column, key):
res = column*column + 3*column + 2*column*row + row + row*row + key
wall = (count_bits(res) % 2) == 1
return wall
def build_maze(size, key):
maze = np.ndarray((size,size), dtype=np.dtype('a1'))
for r in range(size):
for c in range(size):
if is_wall_per_equation(r, c, key):
maze[r][c] = WALL
else:
maze[r][c] = OPEN
return maze
def build_locations(maze, visited, state):
potentials = [ (state[0] - 1, state[1]), (state[0] + 1, state[1]),
(state[0], state[1] - 1), (state[0], state[1] + 1)]
res = []
for p in potentials:
if p[0] < 0 or p[1] < 0:
continue
if p[0] >= len(maze) or p[1] >= len(maze):
continue
if p not in visited and maze[p[0]][p[1]] != WALL:
res.append( (p[0], p[1], state[2] + 1))
return res
def solve(maze, destination, location):
visited = [location]
bfs = [(location[0], location[1], 0)]
while len(bfs):
state = bfs.pop(0)
if state[0] == destination[0] and state[1] == destination[1]:
print "Finished in " + str(state[2]) + " steps"
break
visited.append( (state[0], state[1]))
new_states = build_locations(maze, visited, state)
bfs.extend(new_states)
def max_visited(maze, location, steps):
visited = set()
visited.add( (location[0], location[1]) )
bfs = [(location[0], location[1], 0)]
while len(bfs):
state = bfs.pop(0)
if state[2] == steps + 1:
print "Went " + str(steps) + ". Num visited = " + str(len(visited))
break
visited.add( (state[0], state[1]))
new_states = build_locations(maze, visited, state)
bfs.extend(new_states)
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
np.set_printoptions(threshold=np.inf)
np.set_printoptions(linewidth=200)
# Test
#size = 10
#maze = build_maze(size, 10)
#solve(maze, (4, 7), (1,1))
# Actual
size = 80
maze = build_maze(size, 1358)
solve(maze, (39, 31), (1,1))
max_visited(maze, (1,1), 50)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day13/day13.py",
"copies": "1",
"size": "2563",
"license": "mit",
"hash": 6117905751567799000,
"line_mean": 23.4095238095,
"line_max": 80,
"alpha_frac": 0.5298478346,
"autogenerated": false,
"ratio": 3.0259740259740258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4055821860574026,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 14
Usage:
day14.py
day14.py (-h | --help)
day14.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import hashlib
import re
from collections import OrderedDict
def find_keys(stretch, salt, start, end, three_dict, five_dict):
three_regex = re.compile(r"(.)\1{2}")
five_regex = re.compile(r"(.)\1{4}")
count = start
while count <= end:
check = salt + str(count)
if stretch:
for i in range(2017):
m = hashlib.md5()
m.update(check)
check = m.hexdigest()
else:
m = hashlib.md5()
m.update(check)
dig = m.hexdigest()
#print str(count) + ": " + check + " = " + dig
three_m = re.search(three_regex, dig)
if three_m:
three_dict[three_m.group(0)[0]].append(count)
five_m = re.findall(five_regex, dig)
if len(five_m):
for v in five_m:
five_dict[v].append(count)
count += 1
keys = []
for (k,v) in five_dict.iteritems():
for loc in v:
for entry in three_dict[k]:
if loc > entry and loc - entry <= 1000:
keys.append( (k, entry) )
keys.sort(key=lambda tup: tup[1])
if len(keys) >= 64:
word = "" if stretch else "out"
print salt + " with" + word + " stretching" + str(keys[63])
else:
print "Not enough keys"
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
#salt = "abc" # Test
salt = "yjdafjpo"
three_dict = OrderedDict()
five_dict = OrderedDict()
for i in range(16):
hexchar = format(i, 'x')
three_dict[hexchar] = []
five_dict[hexchar] = []
find_keys(False, salt, 0, 30000, three_dict, five_dict)
three_dict = OrderedDict()
five_dict = OrderedDict()
for i in range(16):
hexchar = format(i, 'x')
three_dict[hexchar] = []
five_dict[hexchar] = []
find_keys(True, salt, 0, 30000, three_dict, five_dict)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day14/day14.py",
"copies": "1",
"size": "2131",
"license": "mit",
"hash": -5813199391896984000,
"line_mean": 22.4175824176,
"line_max": 67,
"alpha_frac": 0.5180666354,
"autogenerated": false,
"ratio": 3.398724082934609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4416790718334609,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 16
Usage:
day16.py <input> <length>
day16.py (-h | --help)
day16.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def checksum(string):
string = list(string)
while True:
i = 0
checksum = []
while i < len(string):
if string[i] == string[i+1]:
checksum.append("1")
else:
checksum.append("0")
i += 2
if len(checksum) % 2 == 1:
break
else:
string = checksum
return ''.join(checksum)
def generate_string(string, length):
length = int(length)
while len(string) < length:
a = string
b = list(string[::-1])
i = 0
while i < len(b):
if b[i] == '0':
b[i] = '1'
else:
b[i] = '0'
i += 1
string = a + '0' + ''.join(b)
return string[0:length]
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
string = generate_string(arguments["<input>"], arguments["<length>"])
print checksum(string)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day16/day16.py",
"copies": "1",
"size": "1171",
"license": "mit",
"hash": -1196758724988947200,
"line_mean": 19.1896551724,
"line_max": 73,
"alpha_frac": 0.4671221178,
"autogenerated": false,
"ratio": 3.64797507788162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9605518651620317,
"avg_score": 0.0019157088122605363,
"num_lines": 58
} |
"""AoC Day 17
Usage:
day17.py
day17.py (-h | --help)
day17.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import hashlib
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
OPEN = ' '
CLOSED = 'X'
def calculate_open_doors(passcode):
doors = []
m = hashlib.md5()
m.update(passcode.encode('utf-8'))
hashed_passcode = m.hexdigest()
for i in range(4):
if hashed_passcode[i] <= 'a':
doors.append(CLOSED)
else:
doors.append(OPEN)
return doors
def valid_location(loc):
if loc[0] < 0 or loc[0] > 3 or loc[1] < 0 or loc[1] > 3:
return False
return True
def new_location(loc, move):
if move == UP: tup = (loc[0] - 1, loc[1])
if move == DOWN: tup = (loc[0] + 1, loc[1])
if move == LEFT: tup = (loc[0], loc[1] - 1)
if move == RIGHT: tup = (loc[0], loc[1] + 1)
return tup
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
#start_passcode = "ihgpwlah"
#start_passcode = "ulqzkmiv"
start_passcode = "qtetzkpl"
path_lookup = {UP:'U', DOWN:'D', RIGHT:'R', LEFT:'L'}
vault = (3,3)
min_moves = 0xffffffff
min_moves_str = ""
max_moves = 0
state = [ ((0,0), 0, start_passcode) ]
while len(state):
(location, moves, passcode) = state.pop(0)
doors = calculate_open_doors(passcode)
i = 0
for i in range(len(doors)):
if doors[i] == OPEN:
new_loc = new_location(location, i)
if new_loc == vault:
if (moves + 1) < min_moves:
min_moves = moves + 1
min_moves_str = passcode[len(start_passcode):] + path_lookup[i]
if (moves + 1) > max_moves:
max_moves = moves + 1
continue
if valid_location(new_loc):
state.insert(0, (new_loc, moves + 1, passcode + path_lookup[i]))
print("Max = " + str(max_moves))
print("Min = " + str(min_moves) + " (" + min_moves_str + ")")
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day17/day17.py",
"copies": "1",
"size": "2128",
"license": "mit",
"hash": 1385084371173802800,
"line_mean": 23.7441860465,
"line_max": 87,
"alpha_frac": 0.5065789474,
"autogenerated": false,
"ratio": 3.1294117647058823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4135990712105882,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 18
Usage:
day18.py <filename> <num_rows>
day18.py (-h | --help)
day18.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
TRAP = '^'
SAFE = '.'
def tile_state(left, center, right):
if left == TRAP and center == TRAP and right == SAFE:
return TRAP
elif center == TRAP and right == TRAP and left == SAFE:
return TRAP
elif center == SAFE and right == SAFE and left == TRAP:
return TRAP
elif center == SAFE and left == SAFE and right == TRAP:
return TRAP
return SAFE
def generate_floor_plan(filename, num_rows):
f = open(filename, 'r')
rows = [list(f.readline().rstrip())]
while len(rows) < num_rows:
next_row = []
# Special case for left column next to wall
next_row.append(tile_state(SAFE, rows[-1][0], rows[-1][1]))
# Middle columns
for i in range(1, len(rows[-1]) - 1):
next_row.append(tile_state(rows[-1][i-1], rows[-1][i], rows[-1][i+1]))
# Special case for right column next to wall
next_row.append(tile_state(rows[-1][-2], rows[-1][-1], SAFE))
rows.append(next_row)
return rows
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
tiles = generate_floor_plan(arguments["<filename>"], int(arguments["<num_rows>"]))
safe_count = 0
for row in tiles:
for c in row:
if c == SAFE:
safe_count += 1
print("Safe: " + str(safe_count))
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day18/day18.py",
"copies": "1",
"size": "1546",
"license": "mit",
"hash": 3722772864896298000,
"line_mean": 23.935483871,
"line_max": 86,
"alpha_frac": 0.5614489004,
"autogenerated": false,
"ratio": 3.310492505353319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9353913789169073,
"avg_score": 0.0036055233168491544,
"num_lines": 62
} |
"""AoC Day 19
Usage:
day19.py <num_elves>
day19.py (-h | --help)
day19.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def move_presents_from_across(presents):
dropped = 0
num_elves = len(presents)
i = 0
while i < len(presents):
if presents[i]:
across = (i + dropped + (num_elves - dropped)//2)
if across < len(presents):
presents[across] = None
dropped += 1
else:
break
i += 1
return list(filter(None, presents[i:] + presents[:i]))
def move_presents_from_left(presents):
for i in range(0, len(presents), 2):
to_left = (i+1) % len(presents)
presents[to_left] = None
return list(filter(None, presents))
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
num_elves = int(arguments["<num_elves>"])
presents = list(range(1,int(num_elves) + 1))
while len(presents) > 1:
presents = move_presents_from_left(presents)
print("Left: " + str(presents))
presents = list(range(1,int(num_elves) + 1))
while len(presents) > 1:
presents = move_presents_from_across(presents)
print("Across: " + str(presents))
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day19/day19.py",
"copies": "1",
"size": "1293",
"license": "mit",
"hash": -7927529908033005000,
"line_mean": 23.8653846154,
"line_max": 62,
"alpha_frac": 0.5630317092,
"autogenerated": false,
"ratio": 3.145985401459854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9080168776454377,
"avg_score": 0.02576966684109541,
"num_lines": 52
} |
"""AoC Day 1
Usage:
day1.py <filename>
day1.py (-h | --help)
day1.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def get_directions(filename):
fn = open(filename, 'r')
directions = [x.strip() for x in fn.read().split(',')]
return directions
def calculate_location(directions):
direction_change = {'R':90, 'L':-90}
direction_move = {0:0, 1:1, 2:0, 3:1}
location = [0, 0]
facing = 0
for d in directions:
turn = d[0];
length = int(d[1:])
facing = (facing + direction_change[turn]) % 360
if facing >= 180:
adjust = -1;
else:
adjust = 1;
location[ direction_move[facing / 90]] += (adjust * length)
return location
def calculate_distance(location):
return abs(location[0]) + abs(location[1])
def first_visited_twice(directions):
visited = set()
direction_change = {'R':90, 'L':-90}
direction_move = {0:0, 1:1, 2:0, 3:1}
location = [0, 0]
facing = 0
for d in directions:
turn = d[0];
length = int(d[1:])
facing = (facing + direction_change[turn]) % 360
if facing >= 180:
adjust = -1;
else:
adjust = 1;
intermediate_location = list(location)
for i in range(1,length):
intermediate_location[direction_move[facing / 90]] += adjust
loc = (intermediate_location[0], intermediate_location[1])
if loc in visited:
return calculate_distance(loc)
else:
visited.add(loc)
location[ direction_move[facing / 90]] += (adjust * length)
def distance(filename):
directions = get_directions(filename)
location = calculate_location(directions)
print calculate_distance(location)
print first_visited_twice(directions)
if __name__ == '__main__':
arguments = docopt(__doc__, version='AoC Day 1')
print arguments["<filename>"]
distance(arguments["<filename>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day1/day1.py",
"copies": "1",
"size": "2085",
"license": "mit",
"hash": 4935492921131838000,
"line_mean": 23.8214285714,
"line_max": 72,
"alpha_frac": 0.5625899281,
"autogenerated": false,
"ratio": 3.6387434554973823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47013333835973825,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 20
Usage:
day20.py <filename>
day20.py (-h | --help)
day20.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def read_input_file(filename):
f = open(filename, 'r')
numbers = []
for i in f:
numbers.append(list(map(int, i.rstrip().split('-'))))
numbers.sort(key=lambda x: x[0])
return numbers
def reduce_numbers(numbers):
prev = 0
i = 1
while i < len(numbers):
# if beginning of this entry is less than end of previous
if numbers[i][0] <= numbers[prev][1] + 1:
if numbers[i][1] <= numbers[prev][1]: # Completely inside
numbers[i] = None
elif numbers[i][1] > numbers[prev][1]: # partial
numbers[prev][1] = numbers[i][1]
numbers[i] = None
else:
prev = i
else:
prev = i
i += 1
numbers = list(filter(None, numbers))
return numbers
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
numbers = read_input_file(arguments["<filename>"])
numbers = reduce_numbers(numbers)
print("First number: " + str(numbers[0][1] + 1))
summation = 0
prev = 0
i = 1
while i < len(numbers):
summation += (numbers[i][0] - numbers[prev][1]) - 1
prev = i
i += 1
if numbers[-1][1] != 4294967295:
summation += 4294967295 - numbers[-1][1]
print("Total: " + str(summation))
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day20/day20.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 4984235918004002000,
"line_mean": 22.3538461538,
"line_max": 69,
"alpha_frac": 0.5309617918,
"autogenerated": false,
"ratio": 3.481651376146789,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45126131679467896,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 2
Usage:
day2.py <filename>
day2.py (-h | --help)
day2.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import numpy as np
def parse_instructions(filename):
instructions = []
fn = open(filename, 'r')
for l in fn:
instructions.append(l.strip())
return instructions
def engineered_keypad_number(location):
lookup = [ [ 0, 0, 0x1, 0, 0],
[ 0, 0x2, 0x3, 0x4, 0],
[0x5, 0x6, 0x7, 0x8, 0x9],
[ 0, 0xA, 0xB, 0xC, 0],
[ 0, 0, 0xD, 0, 0] ]
# Location stored as Row/Column
return lookup[location[0]][location[1]]
def engineered_keypad_move(direction, location):
new_location = map(sum, zip(location, direction))
if engineered_keypad_validation(new_location):
location = new_location
return location
def engineered_keypad_validation(key_loc):
if key_loc[0] < 0 or key_loc[1] < 0:
return False
if key_loc[0] > 4 or key_loc[1] > 4:
return False
if engineered_keypad_number(key_loc) == 0:
return False
return True
def standard_keypad_number(location):
lookup = [ [1, 2, 3],
[4, 5, 6],
[7, 8, 9] ]
# Location stored as Row/Column
return lookup[location[0]][location[1]]
def standard_keypad_validation(key_loc):
if key_loc < 0:
key_loc = 0
if key_loc > 2:
key_loc = 2
return key_loc
def standard_keypad_number(location):
lookup = [ [1, 2, 3],
[4, 5, 6],
[7, 8, 9] ]
# Location stored as Row/Column
return lookup[location[0]][location[1]]
def standard_keypad_move(direction, location):
location = map(sum, zip(location, direction))
location[0] = standard_keypad_validation(location[0])
location[1] = standard_keypad_validation(location[1])
return location
def find_new_location(location, instructions, movement):
move_translation = {'U':[-1,0], 'L':[0,-1], 'D':[1,0], 'R':[0,1]}
for c in instructions:
location = movement(move_translation[c], location)
return location
if __name__ == '__main__':
arguments = docopt(__doc__, version='AoC Day 2')
instructions = parse_instructions(arguments["<filename>"])
print instructions
loc = [1,1]
answer = []
for i in instructions:
loc = find_new_location(loc, i, standard_keypad_move)
answer.append(standard_keypad_number(loc))
print "Standard Keypad: " + str(answer)
loc = [2,0]
answer = []
for i in instructions:
loc = find_new_location(loc, i, engineered_keypad_move)
answer.append(engineered_keypad_number(loc))
np.set_printoptions(formatter={'int':lambda x:hex(int(x))})
print np.array(answer)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day2/day2.py",
"copies": "1",
"size": "2838",
"license": "mit",
"hash": 123733510677874830,
"line_mean": 26.0285714286,
"line_max": 69,
"alpha_frac": 0.5916138125,
"autogenerated": false,
"ratio": 3.1923509561304835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4283964768630484,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 3
Usage:
day3.py <filename>
day3.py (-h | --help)
day3.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def is_triangle(t):
a = t[0]
b = t[1]
c = t[2]
if (a + b > c) and (a + c > b) and (b + c > a):
return True
return False
def count_col_triangles(filename):
fn = open(filename, 'r')
potentials = [ [], [], [] ]
count = 0
for l in fn:
t = [int(x) for x in l.split()]
potentials[0].append(t[0])
potentials[1].append(t[1])
potentials[2].append(t[2])
if len(potentials[0]) == 3:
for i in range(0, len(potentials)):
if is_triangle(potentials[i]):
count += 1
potentials[i] = []
return count
def count_row_triangles(filename):
fn = open(filename, 'r')
count = 0
for l in fn:
t = [int(x) for x in l.split()]
if is_triangle(t):
count += 1
fn.close()
return count
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
num_row_triangles = count_row_triangles(arguments["<filename>"])
print "Number of row triangles = " + str(num_row_triangles)
num_col_triangles = count_col_triangles(arguments["<filename>"])
print "Number of col triangles = " + str(num_col_triangles)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day3/day3.py",
"copies": "1",
"size": "1398",
"license": "mit",
"hash": -8273505580315922000,
"line_mean": 20.84375,
"line_max": 68,
"alpha_frac": 0.530758226,
"autogenerated": false,
"ratio": 3.1557562076749437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4186514433674943,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 4
Usage:
day4.py <filename>
day4.py (-h | --help)
day4.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
import re
from docopt import docopt
from collections import defaultdict
def is_real_room(name, checksum):
name = name.translate(None, "-")
freq = defaultdict(int)
for c in name:
freq[c] += 1
success = True
count = 0
# Sort by alphabet and then frequency (python sort is stable so alphabetical
# order will hold in case of ties
freq = sorted(freq.items(), key=lambda x: x[0])
freq = sorted(freq, key=lambda x: x[1], reverse=True)
for (k,v) in freq:
if checksum[count] == k:
count += 1
if count == 5:
break
else:
success = False
return success
def decrypt_room(name, sector_id):
shift = sector_id % 26
true_name = ""
for c in name:
if c == '-':
true_name += ' '
else:
#print "BEFORE: " + str(c) + " at " + str(ord(c)) + " with shift " + str(shift)
new_char = chr(ord(c) + shift)
if new_char > 'z':
new_char = chr(ord('a') + (ord(new_char) - ord('z')) - 1)
#print "AFTER: " + str(new_char) + " at " + str(ord(new_char))
true_name += new_char
return true_name
def sum_sector_id_of_real_rooms(filename):
fn = open(filename, 'r')
north_pole_sector_id = 0
sum_ids = 0
for l in fn:
m = re.match('^([\w+-]+)-(\d+?)\[(\w+?)\]$', l)
name = m.group(1)
sector_id = int(m.group(2))
checksum = m.group(3)
if is_real_room(name, checksum):
sum_ids += sector_id
if "north" in decrypt_room(name, sector_id):
north_pole_sector_id = sector_id
return (sum_ids, north_pole_sector_id)
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
print sum_sector_id_of_real_rooms(arguments["<filename>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day4/day4.py",
"copies": "1",
"size": "2020",
"license": "mit",
"hash": 1329860324608635100,
"line_mean": 24.8974358974,
"line_max": 91,
"alpha_frac": 0.5252475248,
"autogenerated": false,
"ratio": 3.263327948303716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9191902585544698,
"avg_score": 0.019334577511803397,
"num_lines": 78
} |
"""AoC Day 5
Usage:
day5.py <input>
day5.py (-h | --help)
day5.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import hashlib
def find_8ch_ordered_pw(input):
pw = ""
count = 0
while len(pw) < 8:
m = hashlib.md5()
m.update(input + str(count))
if m.hexdigest().startswith("00000"):
pw += str(m.hexdigest()[5])
count += 1
print pw
def find_8ch_position_pw(input):
pw = list(" ")
added_chars = 0
count = 0
while added_chars < 8:
m = hashlib.md5()
m.update(input + str(count))
if m.hexdigest().startswith("00000"):
pos = int(m.hexdigest()[5], 16)
c = str(m.hexdigest()[6])
if pos < 8 and pw[pos] == " ":
pw[pos] = c
added_chars += 1
count += 1
print ''.join(pw)
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
find_8ch_ordered_pw(arguments["<input>"])
find_8ch_position_pw(arguments["<input>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day5/day5.py",
"copies": "1",
"size": "1100",
"license": "mit",
"hash": 4861020407500950000,
"line_mean": 20.1538461538,
"line_max": 46,
"alpha_frac": 0.5063636364,
"autogenerated": false,
"ratio": 3.3033033033033035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9189140764489627,
"avg_score": 0.024105235042735044,
"num_lines": 52
} |
"""AoC Day 6
Usage:
day6.py <filename>
day6.py (-h | --help)
day6.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def update_char_count_dict(line, count_dict):
line = line.rstrip()
for i in range(len(line)):
if line[i] in count_dict[i].keys():
count_dict[i][line[i]] += 1
else:
count_dict[i][line[i]] = 1
return count_dict
def common_chars(filename, most):
fd = open(filename, 'r')
# initialize storage
l = fd.readline().rstrip()
count_dict = [{} for _ in range(len(l))]
count_dict = update_char_count_dict(l, count_dict)
for l in fd:
count_dict = update_char_count_dict(l.rstrip(), count_dict)
ecc = ""
for e in count_dict:
res = sorted(e.items(), key=lambda x: x[1], reverse=most)
ecc += res[0][0]
return ecc
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
print common_chars(arguments["<filename>"], True)
print common_chars(arguments["<filename>"], False)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day6/day6.py",
"copies": "1",
"size": "1100",
"license": "mit",
"hash": -4889100872572302000,
"line_mean": 21,
"line_max": 67,
"alpha_frac": 0.5718181818,
"autogenerated": false,
"ratio": 3.160919540229885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4232737722029885,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 7
Usage:
day7.py <filename>
day7.py (-h | --help)
day7.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import re
def find_all_bab(ip):
bab = []
if len(ip) >= 3:
address = [ip[0], ip[1]]
for i in range(2, len(ip)):
if address[0] != address[1] and ip[i] == address[0]:
bab.append(address[0] + address[1] + ip[i])
address = [address[1], ip[i]]
return bab
def is_abba(ip):
abba = False
if len(ip) >= 4:
address = [ip[0], ip[1]]
for i in range(2, len(ip) - 1):
if address[0] != address[1] and ip[i] == address[1] and ip[i + 1] == address[0]:
abba = True
break
else:
address = [address[1], ip[i]]
return abba
def split_network(line):
supernet = []
hypernet = []
matches = re.split('(\[\w*?\])', line.rstrip())
for m in matches:
if m[0] == '[':
hypernet.append(m[1:-1])
else:
supernet.append(m)
return (supernet, hypernet)
def check_network(filename):
fd = open(filename, 'r')
sup_hyp = []
for l in fd:
res = split_network(l)
sup_hyp.append(res)
tls = 0
for (supernet, hypernet) in sup_hyp:
found_in_hypernet = False
for h in hypernet:
if is_abba(''.join(h)):
found_in_hypernet = True
continue
if not found_in_hypernet:
for s in supernet:
if is_abba(''.join(s)):
tls += 1
break
ssl = 0
for (supernet, hypernet) in sup_hyp:
bab = []
for h in hypernet:
bab.append(find_all_bab(''.join(h)))
bab = [item for sublist in bab for item in sublist]
if len(bab) > 0:
found_in_supernet = False
for b in bab:
aba = b[1] + b[0] + b[1]
for s in supernet:
if aba in s:
found_in_supernet = True
ssl += 1
break
if found_in_supernet:
break
return (tls, ssl)
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
(tls, ssl) = check_network(arguments["<filename>"])
print "TLS: " + str(tls)
print "SSL: " + str(ssl)
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day7/day7.py",
"copies": "1",
"size": "2488",
"license": "mit",
"hash": -6333497820819321000,
"line_mean": 22.4716981132,
"line_max": 92,
"alpha_frac": 0.4569935691,
"autogenerated": false,
"ratio": 3.43646408839779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9311092730620223,
"avg_score": 0.016472985375513373,
"num_lines": 106
} |
"""AoC Day 8
Usage:
day8.py <width> <tall> <filename>
day8.py (-h | --help)
day8.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import re
class Display:
def __init__(self, width, tall):
self.width = width
self.tall = tall
self.display = [(['.'] * width) for i in range(tall)]
def __str__(self):
return '\n'.join(''.join(*zip(*row)) for row in self.display)
def create_rectangle(self, width, tall):
for w in range(width):
for t in range(tall):
self.display[t][w] = '#'
def rotate(self, row_col_inst, row_col_used, amount):
if row_col_inst == 'row':
prev = []
for w in range(self.width):
prev.append(self.display[row_col_used][w])
for w in range(self.width):
self.display[row_col_used][w] = prev[w - amount % self.width]
elif row_col_inst == 'column':
prev = []
for t in range(tall):
prev.append(self.display[t][row_col_used])
for t in range(tall):
self.display[t][row_col_used] = prev[t - amount % self.tall]
else:
print "Expected either row or column, not: " + row_col_inst
def num_pixels_on(self):
count = 0
for w in range(self.width):
for t in range(self.tall):
if self.display[t][w] == '#':
count += 1
return count
def process_instructions(display, filename):
fn = open(filename, 'r')
for l in fn:
instructions = l.split()
print instructions
if instructions[0] == "rect":
size = instructions[1].split('x')
display.create_rectangle(int(size[0]), int(size[1]))
elif l.startswith("rotate"):
row_col_inst = instructions[1]
row_col_used = int(instructions[2].split('=')[1])
amount = int(instructions[4])
display.rotate(row_col_inst, row_col_used, amount)
else:
print "Unrecognized instruction: " + l
print display
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
width = int(arguments["<width>"])
tall = int(arguments["<tall>"])
d = Display(width, tall)
process_instructions(d, arguments["<filename>"])
print d.num_pixels_on()
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day8/day8.py",
"copies": "1",
"size": "2433",
"license": "mit",
"hash": -7911872293658624000,
"line_mean": 29.037037037,
"line_max": 77,
"alpha_frac": 0.5318536786,
"autogenerated": false,
"ratio": 3.588495575221239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4620349253821239,
"avg_score": null,
"num_lines": null
} |
"""AoC Day 9
Usage:
day9.py <filename>
day9.py (-h | --help)
day9.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
def decompress_length_ver2(string):
length = 0
i = 0
while i < len(string):
if string[i] == '(':
end = string.find(")", i)
substr = string[i+1:end]
counts = substr.split('x')
length += int(counts[1]) * decompress_length_ver2(string[end+1:end+1+int(counts[0])])
i = end + int(counts[0])
else:
length += 1
i += 1
return length
def decompress_string(string):
chars = list(string.rstrip())
result = ""
i = 0
i = 0
while i < len(string):
if string[i] == '(':
end = string.find(")", i)
substr = string[i+1:end]
counts = substr.split('x')
repeat_substr = string[end+1:end+1+int(counts[0])]
result += repeat_substr * int(counts[1])
i = end + int(counts[0])
else:
result += str(string[i])
i += 1
#print result
return len(result)
def decompress(filename):
fn = open(filename, 'r')
for l in fn:
print l.rstrip()
print "\tVersion 1 = " + str(decompress_string(l.rstrip()))
print "\tVersion 2 = " + str(decompress_length_ver2(l.rstrip()))
if __name__ == '__main__':
arguments = docopt(__doc__, version='1')
decompress(arguments["<filename>"])
| {
"repo_name": "arink/advent-of-code",
"path": "2016/day9/day9.py",
"copies": "1",
"size": "1524",
"license": "mit",
"hash": -7464098797757518000,
"line_mean": 21.0869565217,
"line_max": 97,
"alpha_frac": 0.5144356955,
"autogenerated": false,
"ratio": 3.4247191011235953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44391547966235956,
"avg_score": null,
"num_lines": null
} |
"""A one line summary of the module or program, terminated by a period.
Leave one blank line. The rest of this docstring should contain an
overall description of the module or program. Optionally, it may also
contain a brief description of exported classes and functions and/or usage
examples.
Typical usage example:
foo = ClassFoo()
bar = foo.FunctionBar()
"""
# above: "2.8.2 Modules" section example
# https://google.github.io/styleguide/pyguide.html#382-modules
# Examples from the official "Google Python Style Guide" documentation:
# * As HTML: https://google.github.io/styleguide/pyguide.html
# * Source Markdown:
# https://github.com/google/styleguide/blob/gh-pages/pyguide.md
import os
from .expected import Expectation
expectation = Expectation()
expect = expectation.expect
# module docstring expected violations:
expectation.expected.add((
os.path.normcase(__file__),
"D213: Multi-line docstring summary should start at the second line"))
# "3.8.3 Functions and Methods" section example
# https://google.github.io/styleguide/pyguide.html#383-functions-and-methods
@expect("D213: Multi-line docstring summary should start at the second line",
arg_count=3)
@expect("D401: First line should be in imperative mood "
"(perhaps 'Fetch', not 'Fetches')", arg_count=3)
@expect("D406: Section name should end with a newline "
"('Raises', not 'Raises:')", arg_count=3)
@expect("D406: Section name should end with a newline "
"('Returns', not 'Returns:')", arg_count=3)
@expect("D407: Missing dashed underline after section ('Raises')", arg_count=3)
@expect("D407: Missing dashed underline after section ('Returns')",
arg_count=3)
@expect("D413: Missing blank line after last section ('Raises')", arg_count=3)
def fetch_bigtable_rows(big_table, keys, other_silly_variable=None):
"""Fetches rows from a Bigtable.
Retrieves rows pertaining to the given keys from the Table instance
represented by big_table. Silly things may happen if
other_silly_variable is not None.
Args:
big_table: An open Bigtable Table instance.
keys: A sequence of strings representing the key of each table row
to fetch.
other_silly_variable: Another optional variable, that has a much
longer name than the other args, and which does nothing.
Returns:
A dict mapping keys to the corresponding table row data
fetched. Each row is represented as a tuple of strings. For
example:
{'Serak': ('Rigel VII', 'Preparer'),
'Zim': ('Irk', 'Invader'),
'Lrrr': ('Omicron Persei 8', 'Emperor')}
If a key from the keys argument is missing from the dictionary,
then that row was not found in the table.
Raises:
IOError: An error occurred accessing the bigtable.Table object.
"""
# "3.8.4 Classes" section example
# https://google.github.io/styleguide/pyguide.html#384-classes
@expect("D203: 1 blank line required before class docstring (found 0)")
@expect("D213: Multi-line docstring summary should start at the second line")
@expect("D406: Section name should end with a newline "
"('Attributes', not 'Attributes:')")
@expect("D407: Missing dashed underline after section ('Attributes')")
@expect("D413: Missing blank line after last section ('Attributes')")
class SampleClass:
"""Summary of class here.
Longer class information....
Longer class information....
Attributes:
likes_spam: A boolean indicating if we like SPAM or not.
eggs: An integer count of the eggs we have laid.
"""
@expect("D401: First line should be in imperative mood "
"(perhaps 'Init', not 'Inits')", arg_count=2)
def __init__(self, likes_spam=False):
"""Inits SampleClass with blah."""
if self: # added to avoid NameError when run via @expect decorator
self.likes_spam = likes_spam
self.eggs = 0
@expect("D401: First line should be in imperative mood "
"(perhaps 'Perform', not 'Performs')", arg_count=1)
def public_method(self):
"""Performs operation blah."""
| {
"repo_name": "GreenSteam/pep257",
"path": "src/tests/test_cases/canonical_google_examples.py",
"copies": "3",
"size": "4176",
"license": "mit",
"hash": 3283959215609520600,
"line_mean": 37.6666666667,
"line_max": 79,
"alpha_frac": 0.6829501916,
"autogenerated": false,
"ratio": 3.8846511627906977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 108
} |
# a one-solution labyrinth generator
# outputs: text (debug) or bitmap (TGA)
# algorithm: dumb (blind) or smart (maintains a TODO list)
# Ange Albertini, BSD Licence 2013
# output functions #############################################################
def printmap():
for j in xrange(HEIGHT):
print "".join(map[WIDTH * j: WIDTH * (j + 1)])
return
def writeTGA():
import struct
ImageIDField = 0
ColorMap = 1
ImageType = 1
PaletteOffset = 0
ColorCount = 1
ColorMapSize = 24
X = 0
Y = 0
LenX = WIDTH
LenY = HEIGHT
palette = [8, 0, 255, 255, 255]
r = struct.pack("<BBBHHBHHHH",
ImageIDField, ColorMap, ImageType, PaletteOffset, ColorCount,
ColorMapSize, X, Y, LenX, LenY)
r += struct.pack(("<%iB" % len(palette)), *palette)
r += "".join(map) # Warning: it's saved UPSIDE DOWN!
with open("result.tga", "wb") as f:
f.write(r)
return
################################################################################
def fill(x, y):
map[x + WIDTH * y] = FULL
return
def square(X, Y, SizeX, SizeY):
assert X % 2 == 0
assert Y % 2 == 0
assert SizeX % 2 == 1
assert SizeY % 2 == 1
for i in range(SizeX):
fill(i + X, Y)
fill(i + X, Y + SizeY - 1)
for j in range(SizeY):
fill(X, Y + j)
fill(X + SizeX - 1, Y + j)
return
def init():
square(0, 0, WIDTH, HEIGHT)
# you can also draw in advance some areas for fancy effects
square(WIDTH * 1 / 3 - 1, HEIGHT * 1 / 3 - 1, WIDTH * 1 / 3, HEIGHT * 1 / 3)
fill(1, 2) # start
fill(WIDTH - 2, HEIGHT - 3) # end
fill(2, 2) # first main point
return
# algorithms ###################################################################
def brutefill():
DELTAS = [-1, 1, WIDTH, -WIDTH]
# how many inter-wall left to draw
count = (W - 1) * (H - 1) - 1
while (count > 0):
# let's take a random 'main' point
X = random.randrange(0, W - 1)
Y = random.randrange(0, H - 1)
loc = (2 * X + 2) + WIDTH * (2 * Y + 2)
# is it already explored ?
if map[loc] == FULL:
delta = DELTAS[random.randrange(0, len(DELTAS))]
# not explored yet ?
if map[loc + delta * 2] == EMPTY:
# let's fill it
map[loc + delta * 2] = FULL
# and join both points
map[loc + delta] = FULL
count -= 1
return
def smartfill():
UP = [-1, 0]
DOWN = [1, 0]
LEFT = [0, -1]
RIGHT = [0, 1]
DIRS = [UP, DOWN, LEFT, RIGHT]
todo = [[2, 2, RIGHT], [2, 2, DOWN]]
while (todo):
X, Y, [DX, DY] = todo.pop(random.randrange(0, len(todo)))
# draw the dots
iX = X +DX
iY = Y + DY
fill(iX, iY)
tX = iX + DX
tY = iY + DY
fill(tX, tY)
# check the directions related to the new dot
for dx, dy in DIRS:
# removing any already existing direction pointing to the new dot
if [tX + 2 * dx, tY + 2 * dy, [-dx, -dy]] in todo:
todo.remove([tX + 2 * dx, tY + 2 * dy, [-dx, -dy]])
# adding any empty pixel to be processed
if map[tX + dx * 2 + WIDTH * (tY + dy * 2)] == EMPTY:
todo += [[tX, tY , [dx, dy]]]
return
# main #########################################################################
import random
# TGA style
EMPTY, FULL = "\0", "\xFF"
# text style
#EMPTY, FULL = " ", "*"
W = 64
H = 38
WIDTH = 2 * W + 1
HEIGHT = 2 * H + 1
map = [EMPTY] * WIDTH * HEIGHT
init()
smartfill() # brutefill()
writeTGA() # printmap()
| {
"repo_name": "angea/corkami",
"path": "misc/laby/laby.py",
"copies": "1",
"size": "3954",
"license": "bsd-2-clause",
"hash": 1656759654508526300,
"line_mean": 21.5357142857,
"line_max": 81,
"alpha_frac": 0.4367728882,
"autogenerated": false,
"ratio": 3.4472537053182215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9368621997381832,
"avg_score": 0.003080919227277967,
"num_lines": 168
} |
a = open("Q11.txt")
import numpy as np
import operator
def mulprod(x):
return reduce(operator.mul, x, 1)
grid = []
for i in a.read().strip().split('\n'):
grid.append(map(int, i.strip().split(' ')))
grid = np.array(grid)
## horizental
max_sum = 0
for i in range(0,20):
for j in range(0,17):
cur = mulprod(grid[i, [j, j+1, j+2, j+3]])
if cur > max_sum:
max_sum = cur
elements = grid[i, [j, j+1, j+2, j+3]]
## diagnal aligned
for i in range(0,17):
for j in range(0,17):
cur = mulprod(grid[[i, i+1, i+2, i+3], [j, j+1, j+2, j+3]])
if cur > max_sum:
max_sum = cur
elements = grid[[i, i+1, i+2, i+3], [j, j+1, j+2, j+3]]
## vertical
grid_trs = grid.transpose()
for i in range(0,20):
for j in range(0,17):
cur = mulprod(grid[[j, j+1, j+2, j+3], i])
if cur > max_sum:
max_sum = cur
elements = grid[[j, j+1, j+2, j+3], i]
## diagnal disaligned
for i in range(3,20):
for j in range(0, 17):
cur = mulprod(grid[[i, i-1, i-2, i-3], [j, j+1, j+2, j+3]])
if cur > max_sum:
max_sum = cur
elements = grid[[i, i-1, i-2, i-3], [j, j+1, j+2, j+3]]
| {
"repo_name": "weiwang/project_euler",
"path": "python/Q11.py",
"copies": "1",
"size": "1218",
"license": "mit",
"hash": -3390894165758793000,
"line_mean": 26.0666666667,
"line_max": 67,
"alpha_frac": 0.4958949097,
"autogenerated": false,
"ratio": 2.465587044534413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8258406131421747,
"avg_score": 0.04061516456253299,
"num_lines": 45
} |
a = open("starsin_new_all_ordered.txt",'r')
a = open("test4_selfg.txt",'r')
a = open("test14.txt",'r')
a = open("test18.txt",'r')
al = a.readlines()
bl = []
name = []
for each in al:
name.append(each.split()[1])
name_ind = []
name_ind_first = []
name_u = unique(name)
name = array(name)
for each in name_u:
pick = name == each
name_ind.append(arange(0,len(name))[pick] )
name_ind_first.append(arange(0,len(name))[pick] [0])
name_ind_sort = sort(name_ind_first)
name_order = name_u[argsort(name_ind_first)]
#name_ind_sort = [np.int(a)-1 for a in name_ind_sort]
#cluster_ages = names[name_ind_sort[0:-1]]
name = array(name)
a_M107 = 14.0
a_M13 = 11.65
a_M15 = 12.0
a_M2 = 13
a_M3 = 11.4
a_M5 = 13.0
a_M53 = 12.67
a_M67 = 3.2
a_M71 = 11.0# was 10 but looked up and some literatre said 11 or 12
a_M92 = 14.
a_N188 = 5.
a_N2158 = 1.05
a_N2420 = 2.
a_N4147 = 14.
a_N5466 = 12.5
a_N6791 = 5
a_N6819 = 2.5
a_N7789 = 1.6
a_Pleiades = 0.15
d = {}
# Fill in the entries one by one
# Static lists for purpose of illustration
names = name_u
ages = [a_M107, a_M13, a_M15, a_M2, a_M3, a_M5, a_M53, a_M67, a_M71, a_M92, a_N188, a_N2158, a_N2420, a_N4147, a_N5466, a_N6791, a_N6819, a_N7789, a_Pleiades]
ages_dict = {}
for i in range(len(names)):
ages_dict[names[i]] = ages[i]
age_vals = ones(len(name) )
for one,two in zip(name_u, name_ind):
# one = "a_"+one
age_vals[two] = ages_dict[one]
savetxt("ages.txt", age_vals, fmt = '%s' )
n_each = [0] + list(name_ind_sort)
num_each = diff(n_each)
#array(['a_M15', 'a_M53', 'a_N5466', 'a_N4147', 'a_M13', 'a_M2', 'a_M3', 'a_M5', 'a_M107',
# 'a_M71', 'a_N2158', 'a_N2420', 'a_Pleiades', 'a_N7789', 'a_M67', 'a_N6819',
# 'a_N188', 'a_N6791']
| {
"repo_name": "mkness/TheCannon",
"path": "code/ages.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": 5521604477395461000,
"line_mean": 25.5076923077,
"line_max": 159,
"alpha_frac": 0.5983749275,
"autogenerated": false,
"ratio": 2.008158508158508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7989210785931282,
"avg_score": 0.023464529945445213,
"num_lines": 65
} |
a = open('test14_edited_own.txt', 'r')
ta,ga,feha =loadtxt('test14_edited_own.txt', usecols = (3,5,7), unpack =1)
tc,gc,fehc =loadtxt('test14_edited_own.txt', usecols = (4,6,8), unpack =1)
#a = open('test14.txt', 'r')
al = a.readlines()
name = []
bl = []
for each in al:
name.append(each.split()[1])
bl.append(each.split()[0])
bl =array(bl)
unames = unique(name)
name = array(name)
take = []
for each in unames:
take.append(name == each )
J,K = loadtxt("all_2mass_download_edit_cut.txt", usecols = (11, 19), unpack =1)
ebmv = loadtxt("all_extinction_edit_cut.txt", usecols = (3,), unpack =1)
J = array(J)
K = array(K)
JmK = J-K
ebmv = array(ebmv)
ejmk = 0.535*ebmv
JmKo = J-K-ejmk
dir1= '/Users/ness/old_laptop/workdir/Apogee/isochrones/'
#f_M107 ,f_M13, f_M15, f_M2, f_M3, f_M5, f_M53, f_M71, f_M92, f_N2158, f_N2420, f_N4147, f_N5446, f_M45, f_M67, f_N188, f_N6791, f_N6819, f_N7789 = \
# [
#names1 = ["f_M92","f_M15", "f_M53", "f_N5466", "f_N4147", "f_M2", "f_M13", "f_M3", "f_M5", "f_M107", "f_M71", "f_N2158", "f_M35", "f_N2420", "f_N188", "f_M67", "f_N7789", "f_M45","f_N6819",
#"f_N6791" ]
#metals1 = [ -2.35, -2.33,-2.06, -1.98, -1.78, -1.66, -1.58, -1.50, -1.33, -1.03, -0.82, -0.28, -0.21, -0.35, -0.03, -0.01, 0.02, 0.03, 0.09, 0.47]
names1 = ["f_M92","f_M15", "f_M53", "f_N5466", "f_N4147", "f_M2", "f_M13", "f_M3", "f_M5", "f_M107", "f_M71", "f_N2158", "f_M35", "f_N2420", "f_N188", "f_M67", "f_N7789", "f_N6819",
"f_N6791" ]
metals1 = [ -2.35, -2.33,-2.06, -1.98, -1.78, -1.66, -1.58, -1.50, -1.33, -1.03, -0.82, -0.28, -0.21, -0.35, -0.03, -0.01, 0.02, 0.09, 0.47]
feh_dict = {}
for i in range(len(names1)):
feh_dict[names1[i]] = metals1[i]
M107 = 'M107_edit.txt'
M13 = 'M13.txt'
M15 = 'M15.txt'
M2 = 'M2.txt'
M3 = 'M3.txt'
M5 = 'M5.txt'
M53 = 'M53.txt'
M71 = 'M71.txt'
M92 = 'M92.txt'
N2158 = 'N2158.txt'
N2420 = 'N2420.txt'
N4147 = 'N4147.txt'
N5466 = 'N5466.txt'
#Pleiades = 'Pleiades.txt'
M67 = 'M67.txt'
N118 = 'N118.txt'
N6791 = 'N6791.txt'
N6819 = 'N6819.txt'
N7789 = 'N7789.txt'
K = JmKo
def theta(b0,b1,b2,b3,b4,b5, feh, JmKo):
result = b0+b1*JmKo + b2*JmKo*JmKo + b3*JmKo*feh + b4*feh + b5*feh*feh
return 5040.0/result
b0,b1,b2,b3,b4,b5,Na,Nb = 0.6517,0.6312,0.0168, -0.0381, 0.0256,0.0013,145, 94
nameall = []
tall = []
fehall = []
gall = []
gaall =[]
taall = []
fehaall = []
blall = []
for each in unames:
fehinput = "f_"+each
isoinput = each+".txt"
fehval = feh_dict[(fehinput)]
takeit = array(name) == each
JmKotake = JmKo[takeit]
bltake = bl[takeit]
fehatake = feha[takeit]
fehval = fehc[takeit]
tatake = ta[takeit]
gatake = ga[takeit]
temperatures = tc[takeit]
isoreadin = dir1+isoinput
logt, g_iso = genfromtxt(isoreadin, usecols = (5,6), unpack =1)
t_iso = 10**logt
take1 = list(g_iso).index(min(g_iso))
#take1 = diff(g_iso) > 0
#nums = arange(0,len(g_iso))[take1][0]
t_iso = t_iso[0:take1]
g_iso = g_iso[0:take1]
#temperatures = theta(b0,b1,b2,b3,b4,b5,fehval , JmKotake)
#temp_take = temperatures > min(t_iso)
#temperatures2 = temperatures[temp_take]
y_new_all = []
for t_each in temperatures:
sel_t = logical_and(t_iso > t_each -400, t_iso < t_each + 400 )
sel = logical_and(g_iso < 4, sel_t)
t_pick = t_iso[sel]
g_pick = g_iso[sel]
t_new = arange(min(t_pick), max(t_pick), 1)
g_new = arange(min(g_pick), max(g_pick), 0.01)
fa = interpolate.interp1d(sort(t_pick), g_pick[argsort(t_pick)])
if t_each > min(t_pick):
new_ydata = fa(t_each)
if t_each <= min(t_pick):
new_ydata = 9999.9
y_new_all.append(new_ydata)
#fehval2 = [fehval]*len(y_new_all)
fehval2 = fehval
each2 = [each]*len(y_new_all)
nameall.append(each2)
tall.append(temperatures)
gall.append(y_new_all)
fehall.append(fehval2)
taall.append(tatake)
gaall.append(gatake)
fehaall.append(fehatake)
blall.append(bltake)
blall = hstack((blall))
tall = hstack((tall))
gall = hstack((gall))
fehall = hstack((fehall))
taall = hstack((taall))
gaall = hstack((gaall))
fehaall = hstack((fehaall))
nameall = hstack((nameall))
g_new = [round(a, 2) for a in gall]
t_new = [round(a, 2) for a in tall]
g_new =array(g_new)
t_new =array(t_new)
arangeit = argsort(blall)
blall =array(blall)
data = zip(blall[arangeit], nameall[arangeit], taall[arangeit], t_new[arangeit], gaall[arangeit], g_new[arangeit], fehaall[arangeit], fehall[arangeit])
savetxt("mkn_labels_Atempfeh.txt", data, fmt = "%s" )
al = array(al)
| {
"repo_name": "mkness/TheCannon",
"path": "code/fitiso_apogeetempfeh.py",
"copies": "1",
"size": "4621",
"license": "mit",
"hash": -8588441183202854000,
"line_mean": 32.2446043165,
"line_max": 190,
"alpha_frac": 0.584938325,
"autogenerated": false,
"ratio": 2.0338908450704225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.31188291700704224,
"avg_score": null,
"num_lines": null
} |
# A OpenTraced client for a Python service that implements the store interface.
from __future__ import print_function
import time
import argparse
from builtins import input, range
import grpc
from jaeger_client import Config
from grpc_opentracing import open_tracing_client_interceptor, \
SpanDecorator
from grpc_opentracing.grpcext import intercept_channel
import store_pb2
class CommandExecuter(object):
def __init__(self, stub):
self._stub = stub
def _execute_rpc(self, method, via, timeout, request_or_iterator):
if via == 'future':
result = getattr(self._stub, method).future(request_or_iterator,
timeout)
return result.result()
elif via == 'with_call':
return getattr(self._stub, method).with_call(request_or_iterator,
timeout)[0]
else:
return getattr(self._stub, method)(request_or_iterator, timeout)
def do_stock_item(self, via, timeout, arguments):
if len(arguments) != 1:
print('must input a single item')
return
request = store_pb2.AddItemRequest(name=arguments[0])
self._execute_rpc('AddItem', via, timeout, request)
def do_stock_items(self, via, timeout, arguments):
if not arguments:
print('must input at least one item')
return
requests = [store_pb2.AddItemRequest(name=name) for name in arguments]
self._execute_rpc('AddItems', via, timeout, iter(requests))
def do_sell_item(self, via, timeout, arguments):
if len(arguments) != 1:
print('must input a single item')
return
request = store_pb2.RemoveItemRequest(name=arguments[0])
response = self._execute_rpc('RemoveItem', via, timeout, request)
if not response.was_successful:
print('unable to sell')
def do_sell_items(self, via, timeout, arguments):
if not arguments:
print('must input at least one item')
return
requests = [
store_pb2.RemoveItemRequest(name=name) for name in arguments
]
response = self._execute_rpc('RemoveItems', via, timeout,
iter(requests))
if not response.was_successful:
print('unable to sell')
def do_inventory(self, via, timeout, arguments):
if arguments:
print('inventory does not take any arguments')
return
if via != 'functor':
print('inventory can only be called via functor')
return
request = store_pb2.Empty()
result = self._execute_rpc('ListInventory', via, timeout, request)
for query in result:
print(query.name, '\t', query.count)
def do_query_item(self, via, timeout, arguments):
if len(arguments) != 1:
print('must input a single item')
return
request = store_pb2.QueryItemRequest(name=arguments[0])
query = self._execute_rpc('QueryQuantity', via, timeout, request)
print(query.name, '\t', query.count)
def do_query_items(self, via, timeout, arguments):
if not arguments:
print('must input at least one item')
return
if via != 'functor':
print('query_items can only be called via functor')
return
requests = [store_pb2.QueryItemRequest(name=name) for name in arguments]
result = self._execute_rpc('QueryQuantities', via, timeout,
iter(requests))
for query in result:
print(query.name, '\t', query.count)
def execute_command(command_executer, command, arguments):
via = 'functor'
timeout = None
for argument_index in range(0, len(arguments), 2):
argument = arguments[argument_index]
if argument == '--via' and argument_index + 1 < len(arguments):
if via not in ('functor', 'with_call', 'future'):
print('invalid --via option')
return
via = arguments[argument_index + 1]
elif argument == '--timeout' and argument_index + 1 < len(arguments):
timeout = float(arguments[argument_index + 1])
else:
arguments = arguments[argument_index:]
break
try:
getattr(command_executer, 'do_' + command)(via, timeout, arguments)
except AttributeError:
print('unknown command: \"%s\"' % command)
INSTRUCTIONS = \
"""Enter commands to interact with the store service:
stock_item Stock a single item.
stock_items Stock one or more items.
sell_item Sell a single item.
sell_items Sell one or more items.
inventory List the store's inventory.
query_item Query the inventory for a single item.
query_items Query the inventory for one or more items.
You can also optionally provide a --via argument to instruct the RPC to be
initiated via either the functor, with_call, or future method; or provide a
--timeout argument to set a deadline for the RPC to be completed.
Example:
> stock_item apple
> stock_items --via future apple milk
> inventory
apple 2
milk 1
"""
def read_and_execute(command_executer):
print(INSTRUCTIONS)
while True:
try:
line = input('> ')
components = line.split()
if not components:
continue
command = components[0]
arguments = components[1:]
execute_command(command_executer, command, arguments)
except EOFError:
break
class StoreSpanDecorator(SpanDecorator):
def __call__(self, span, rpc_info):
span.set_tag('grpc.method', rpc_info.full_method)
span.set_tag('grpc.headers', str(rpc_info.metadata))
span.set_tag('grpc.deadline', str(rpc_info.timeout))
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
parser.add_argument(
'--include_grpc_tags',
action='store_true',
help='set gRPC-specific tags on spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='store-client')
tracer = config.initialize_tracer()
span_decorator = None
if args.include_grpc_tags:
span_decorator = StoreSpanDecorator()
tracer_interceptor = open_tracing_client_interceptor(
tracer, log_payloads=args.log_payloads, span_decorator=span_decorator)
channel = grpc.insecure_channel('localhost:50051')
channel = intercept_channel(channel, tracer_interceptor)
stub = store_pb2.StoreStub(channel)
read_and_execute(CommandExecuter(stub))
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
run()
| {
"repo_name": "yuewko/themis",
"path": "vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/store/store_client.py",
"copies": "4",
"size": "7169",
"license": "apache-2.0",
"hash": -2520362107607347000,
"line_mean": 32.9763033175,
"line_max": 80,
"alpha_frac": 0.5960384991,
"autogenerated": false,
"ratio": 4.1997656707674285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6795804169867429,
"avg_score": null,
"num_lines": null
} |
# A OpenTraced server for a Python service that implements the store interface.
from __future__ import print_function
import time
import argparse
from collections import defaultdict
from six import iteritems
import grpc
from concurrent import futures
from jaeger_client import Config
from grpc_opentracing import open_tracing_server_interceptor, \
SpanDecorator
from grpc_opentracing.grpcext import intercept_server
import store_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Store(store_pb2.StoreServicer):
def __init__(self):
self._inventory = defaultdict(int)
def AddItem(self, request, context):
self._inventory[request.name] += 1
return store_pb2.Empty()
def AddItems(self, request_iter, context):
for request in request_iter:
self._inventory[request.name] += 1
return store_pb2.Empty()
def RemoveItem(self, request, context):
new_quantity = self._inventory[request.name] - 1
if new_quantity < 0:
return store_pb2.RemoveItemResponse(was_successful=False)
self._inventory[request.name] = new_quantity
return store_pb2.RemoveItemResponse(was_successful=True)
def RemoveItems(self, request_iter, context):
response = store_pb2.RemoveItemResponse(was_successful=True)
for request in request_iter:
response = self.RemoveItem(request, context)
if not response.was_successful:
break
return response
def ListInventory(self, request, context):
for name, count in iteritems(self._inventory):
if not count:
continue
else:
yield store_pb2.QuantityResponse(name=name, count=count)
def QueryQuantity(self, request, context):
count = self._inventory[request.name]
return store_pb2.QuantityResponse(name=request.name, count=count)
def QueryQuantities(self, request_iter, context):
for request in request_iter:
count = self._inventory[request.name]
yield store_pb2.QuantityResponse(name=request.name, count=count)
class StoreSpanDecorator(SpanDecorator):
def __call__(self, span, rpc_info):
span.set_tag('grpc.method', rpc_info.full_method)
span.set_tag('grpc.headers', str(rpc_info.metadata))
span.set_tag('grpc.deadline', str(rpc_info.timeout))
def serve():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
parser.add_argument(
'--include_grpc_tags',
action='store_true',
help='set gRPC-specific tags on spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='store-server')
tracer = config.initialize_tracer()
span_decorator = None
if args.include_grpc_tags:
span_decorator = StoreSpanDecorator()
tracer_interceptor = open_tracing_server_interceptor(
tracer, log_payloads=args.log_payloads, span_decorator=span_decorator)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
server = intercept_server(server, tracer_interceptor)
store_pb2.add_StoreServicer_to_server(Store(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
serve()
| {
"repo_name": "johnbelamaric/themis",
"path": "vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/store/store_server.py",
"copies": "4",
"size": "3747",
"license": "apache-2.0",
"hash": 3250750088118991400,
"line_mean": 29.7131147541,
"line_max": 79,
"alpha_frac": 0.6394448892,
"autogenerated": false,
"ratio": 3.977707006369427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6617151895569428,
"avg_score": null,
"num_lines": null
} |
"""aospy DataLoader objects"""
import logging
import os
import pprint
import warnings
import numpy as np
import xarray as xr
from .internal_names import (
ETA_STR,
GRID_ATTRS,
TIME_STR,
TIME_BOUNDS_STR,
)
from .utils import times, io
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
def grid_attrs_to_aospy_names(data, grid_attrs=None):
"""Rename grid attributes to be consistent with aospy conventions.
Search all of the dataset's coords and dims looking for matches to known
grid attribute names; any that are found subsequently get renamed to the
aospy name as specified in ``aospy.internal_names.GRID_ATTRS``.
Also forces any renamed grid attribute that is saved as a dim without a
coord to have a coord, which facilitates subsequent slicing/subsetting.
This function does not compare to Model coordinates or add missing
coordinates from Model objects.
Parameters
----------
data : xr.Dataset
grid_attrs : dict (default None)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
xr.Dataset
Data returned with coordinates consistent with aospy
conventions
"""
if grid_attrs is None:
grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = GRID_ATTRS.copy()
for k, v in grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a custom '
'grid attribute name. See the full list of valid internal '
'names below:\n\n{}'.format(k, list(GRID_ATTRS.keys())))
attrs[k] = (v, )
dims_and_vars = set(data.variables).union(set(data.dims))
for name_int, names_ext in attrs.items():
data_coord_name = set(names_ext).intersection(dims_and_vars)
if data_coord_name:
data = data.rename({data_coord_name.pop(): name_int})
return set_grid_attrs_as_coords(data)
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
def _maybe_cast_to_float64(da):
"""Cast DataArrays to np.float64 if they are of type np.float32.
Parameters
----------
da : xr.DataArray
Input DataArray
Returns
-------
DataArray
"""
if da.dtype == np.float32:
logging.warning('Datapoints were stored using the np.float32 datatype.'
'For accurate reduction operations using bottleneck, '
'datapoints are being cast to the np.float64 datatype.'
' For more information see: https://github.com/pydata/'
'xarray/issues/1346')
return da.astype(np.float64)
else:
return da
def _sel_var(ds, var, upcast_float32=True):
"""Select the specified variable by trying all possible alternative names.
Parameters
----------
ds : Dataset
Dataset possibly containing var
var : aospy.Var
Variable to find data for
upcast_float32 : bool (default True)
Whether to cast a float32 DataArray up to float64
Returns
-------
DataArray
Raises
------
KeyError
If the variable is not in the Dataset
"""
for name in var.names:
try:
da = ds[name].rename(var.name)
if upcast_float32:
return _maybe_cast_to_float64(da)
else:
return da
except KeyError:
pass
msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
raise LookupError(msg)
def _prep_time_data(ds):
"""Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset
"""
ds = times.ensure_time_as_index(ds)
if TIME_BOUNDS_STR in ds:
ds = times.ensure_time_avg_has_cf_metadata(ds)
ds[TIME_STR] = times.average_time_bounds(ds)
else:
logging.warning("dt array not found. Assuming equally spaced "
"values in time, even though this may not be "
"the case")
ds = times.add_uniform_time_weights(ds)
# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for
# now to maintain backwards compatibility for older xarray versions.
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
with xr.set_options(enable_cftimeindex=True):
ds = xr.decode_cf(ds, decode_times=True, decode_coords=False,
mask_and_scale=True)
return ds
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
decode_times=False, decode_coords=False,
mask_and_scale=True, data_vars=data_vars,
coords=coords)
def apply_preload_user_commands(file_set, cmd=io.dmget):
"""Call desired functions on file list before loading.
For example, on the NOAA Geophysical Fluid Dynamics Laboratory
computational cluster, data that is saved on their tape archive
must be accessed via a `dmget` (or `hsmget`) command before being used.
"""
if cmd is not None:
cmd(file_set)
def _setattr_default(obj, attr, value, default):
"""Set an attribute of an object to a value or default value."""
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value)
class DataLoader(object):
"""A fundamental DataLoader object."""
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load()
def _load_or_get_from_model(self, var, start_date=None, end_date=None,
time_offset=None, model=None, **DataAttrs):
"""Load a DataArray for the requested variable and time range
Supports both access of grid attributes either through the DataLoader
or through an optionally-provided Model object. Defaults to using
the version found in the DataLoader first.
"""
grid_attrs = None if model is None else model.grid_attrs
try:
return self.load_variable(
var, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs)
except (KeyError, IOError) as e:
if var.name not in GRID_ATTRS or model is None:
raise e
else:
try:
return getattr(model, var.name)
except AttributeError:
raise AttributeError(
'Grid attribute {} could not be located either '
'through this DataLoader or in the provided Model '
'object: {}.'.format(var, model))
def recursively_compute_variable(self, var, start_date=None, end_date=None,
time_offset=None, model=None,
**DataAttrs):
"""Compute a variable recursively, loading data where needed.
An obvious requirement here is that the variable must eventually be
able to be expressed in terms of model-native quantities; otherwise the
recursion will never stop.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
model : Model
aospy Model object (optional)
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
if var.variables is None:
return self._load_or_get_from_model(
var, start_date, end_date, time_offset, model, **DataAttrs)
else:
data = [self.recursively_compute_variable(
v, start_date, end_date, time_offset, model, **DataAttrs)
for v in var.variables]
return var.func(*data).rename(var.name)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Apply specified time shift to DataArray"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
raise NotImplementedError(
'All DataLoaders require a _generate_file_set method')
class DictDataLoader(DataLoader):
"""A DataLoader that uses a dict mapping lists of files to string tags.
This is the simplest DataLoader; it is useful for instance if one is
dealing with raw model history files, which tend to group all variables
of a single output interval into single filesets. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value.
Parameters
----------
file_map : dict
A dict mapping an input interval to a list of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of two sets of files, one with monthly average output, and one with
3-hourly output.
>>> file_map = {'monthly': '000[4-6]0101.atmos_month.nc',
... '3hr': '000[4-6]0101.atmos_8xday.nc'}
>>> data_loader = DictDataLoader(file_map)
If one wanted to correct a CF-incompliant units attribute on each Dataset
read in, which depended on the ``intvl_in`` of the fileset one could
define a ``preprocess_func`` which took into account the ``intvl_in``
keyword argument.
>>> def preprocess(ds, **kwargs):
... if kwargs['intvl_in'] == 'monthly':
... ds['time'].attrs['units'] = 'days since 0001-01-0000'
... if kwargs['intvl_in'] == '3hr':
... ds['time'].attrs['units'] = 'hours since 0001-01-0000'
... return ds
>>> data_loader = DictDataLoader(file_map, preprocess)
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new DictDataLoader."""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
class NestedDictDataLoader(DataLoader):
"""DataLoader that uses a nested dictionary mapping to load files.
This is the most flexible existing type of DataLoader; it allows for the
specification of different sets of files for different variables. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value. The variable name
can be any variable name in your aospy object library (including
alternative names).
Parameters
----------
file_map : dict
A dict mapping intvl_in to dictionaries mapping Var
objects to lists of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of a set of monthly average files for large scale precipitation,
and another monthly average set of files for convective precipitation.
>>> file_map = {'monthly': {'precl': '000[4-6]0101.precl.nc',
... 'precc': '000[4-6]0101.precc.nc'}}
>>> data_loader = NestedDictDataLoader(file_map)
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new NestedDictDataLoader"""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
for name in var.names:
try:
return self.file_map[intvl_in][name]
except KeyError:
pass
raise KeyError('Files for the var {0} cannot be found in for the '
'intvl_in {1} in this'
' OneDirDataLoader'.format(var, intvl_in))
class GFDLDataLoader(DataLoader):
"""DataLoader for NOAA GFDL model output.
This is an example of a domain-specific custom DataLoader, designed
specifically for finding files output by the Geophysical Fluid Dynamics
Laboratory's model history file post-processing tools.
Parameters
----------
template : GFDLDataLoader
Optional argument to specify a base GFDLDataLoader to inherit
parameters from
data_direc : str
Root directory of data files
data_dur : int
Number of years included per post-processed file
data_start_date : datetime.datetime
Start date of data files
data_end_date : datetime.datetime
End date of data files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case without a template to start from.
>>> base = GFDLDataLoader(data_direc='/archive/control/pp', data_dur=5,
... data_start_date=datetime(2000, 1, 1),
... data_end_date=datetime(2010, 12, 31))
Case with a starting template.
>>> data_loader = GFDLDataLoader(base, data_direc='/archive/2xCO2/pp')
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, template=None, data_direc=None, data_dur=None,
data_start_date=None, data_end_date=None,
upcast_float32=None, data_vars=None, coords=None,
preprocess_func=None):
"""Create a new GFDLDataLoader"""
if template:
_setattr_default(self, 'data_direc', data_direc,
getattr(template, 'data_direc'))
_setattr_default(self, 'data_dur', data_dur,
getattr(template, 'data_dur'))
_setattr_default(self, 'data_start_date', data_start_date,
getattr(template, 'data_start_date'))
_setattr_default(self, 'data_end_date', data_end_date,
getattr(template, 'data_end_date'))
_setattr_default(self, 'upcast_float32', upcast_float32,
getattr(template, 'upcast_float32'))
_setattr_default(self, 'data_vars', data_vars,
getattr(template, 'data_vars'))
_setattr_default(self, 'coords', coords,
getattr(template, 'coords'))
_setattr_default(self, 'preprocess_func', preprocess_func,
getattr(template, 'preprocess_func'))
else:
self.data_direc = data_direc
self.data_dur = data_dur
self.data_start_date = data_start_date
self.data_end_date = data_end_date
_setattr_default(self, 'upcast_float32', upcast_float32, True)
_setattr_default(self, 'data_vars', data_vars, 'minimal')
_setattr_default(self, 'coords', coords, 'minimal')
_setattr_default(self, 'preprocess_func', preprocess_func,
lambda ds, **kwargs: ds)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
file_set = self._input_data_paths_gfdl(
name, start_date, end_date, domain, intvl_in, dtype_in_vert,
dtype_in_time, intvl_out)
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located '
'using GFDL post-processing conventions. '
'Attempted using the following sets of paths:\n\n'
'{1}'.format(var, pprint.pformat(attempted_file_sets)))
def _input_data_paths_gfdl(self, name, start_date, end_date, domain,
intvl_in, dtype_in_vert, dtype_in_time,
intvl_out):
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(
name, domain, dtype, intvl_in, year, intvl_out,
data_start_year, self.data_dur))
for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
return files
| {
"repo_name": "spencerkclark/aospy",
"path": "aospy/data_loader.py",
"copies": "1",
"size": "27430",
"license": "apache-2.0",
"hash": 6683652013243506000,
"line_mean": 39.0437956204,
"line_max": 79,
"alpha_frac": 0.6043018593,
"autogenerated": false,
"ratio": 4.141627661180734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245929520480734,
"avg_score": null,
"num_lines": null
} |
"""aospy DataLoader objects"""
import logging
import os
import pprint
import numpy as np
import xarray as xr
from .internal_names import (
ETA_STR,
GRID_ATTRS,
TIME_STR,
)
from .utils import times, io
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
def grid_attrs_to_aospy_names(data, grid_attrs=None):
"""Rename grid attributes to be consistent with aospy conventions.
Search all of the dataset's coords and dims looking for matches to known
grid attribute names; any that are found subsequently get renamed to the
aospy name as specified in ``aospy.internal_names.GRID_ATTRS``.
Also forces any renamed grid attribute that is saved as a dim without a
coord to have a coord, which facilitates subsequent slicing/subsetting.
This function does not compare to Model coordinates or add missing
coordinates from Model objects.
Parameters
----------
data : xr.Dataset
grid_attrs : dict (default None)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
xr.Dataset
Data returned with coordinates consistent with aospy
conventions
"""
if grid_attrs is None:
grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = GRID_ATTRS.copy()
for k, v in grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a custom '
'grid attribute name. See the full list of valid internal '
'names below:\n\n{}'.format(k, list(GRID_ATTRS.keys())))
attrs[k] = (v, )
dims_and_vars = set(data.variables).union(set(data.dims))
for name_int, names_ext in attrs.items():
data_coord_name = set(names_ext).intersection(dims_and_vars)
if data_coord_name:
data = data.rename({data_coord_name.pop(): name_int})
return set_grid_attrs_as_coords(data)
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
def _maybe_cast_to_float64(da):
"""Cast DataArrays to np.float64 if they are of type np.float32.
Parameters
----------
da : xr.DataArray
Input DataArray
Returns
-------
DataArray
"""
if da.dtype == np.float32:
logging.warning('Datapoints were stored using the np.float32 datatype.'
'For accurate reduction operations using bottleneck, '
'datapoints are being cast to the np.float64 datatype.'
' For more information see: https://github.com/pydata/'
'xarray/issues/1346')
return da.astype(np.float64)
else:
return da
def _sel_var(ds, var, upcast_float32=True):
"""Select the specified variable by trying all possible alternative names.
Parameters
----------
ds : Dataset
Dataset possibly containing var
var : aospy.Var
Variable to find data for
upcast_float32 : bool (default True)
Whether to cast a float32 DataArray up to float64
Returns
-------
DataArray
Raises
------
KeyError
If the variable is not in the Dataset
"""
for name in var.names:
try:
da = ds[name].rename(var.name)
if upcast_float32:
return _maybe_cast_to_float64(da)
else:
return da
except KeyError:
pass
msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
raise LookupError(msg)
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(
file_set,
preprocess=func,
combine='by_coords',
decode_times=False,
decode_coords=False,
mask_and_scale=True,
data_vars=data_vars,
coords=coords,
)
def apply_preload_user_commands(file_set, cmd=io.dmget):
"""Call desired functions on file list before loading.
For example, on the NOAA Geophysical Fluid Dynamics Laboratory
computational cluster, data that is saved on their tape archive
must be accessed via a `dmget` (or `hsmget`) command before being used.
"""
if cmd is not None:
cmd(file_set)
def _setattr_default(obj, attr, value, default):
"""Set an attribute of an object to a value or default value."""
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value)
class DataLoader(object):
"""A fundamental DataLoader object."""
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(
var=var,
start_date=start_date,
end_date=end_date,
**DataAttrs,
)
ds = _load_data_from_disk(
file_set,
self.preprocess_func,
data_vars=self.data_vars,
coords=self.coords,
start_date=start_date,
end_date=end_date,
time_offset=time_offset,
grid_attrs=grid_attrs,
**DataAttrs,
)
if var.def_time:
ds = times.prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load()
def _load_or_get_from_model(self, var, start_date=None, end_date=None,
time_offset=None, model=None, **DataAttrs):
"""Load a DataArray for the requested variable and time range
Supports both access of grid attributes either through the DataLoader
or through an optionally-provided Model object. Defaults to using
the version found in the DataLoader first.
"""
grid_attrs = None if model is None else model.grid_attrs
try:
return self.load_variable(
var, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs)
except (KeyError, IOError) as e:
if var.name not in GRID_ATTRS or model is None:
raise e
else:
try:
return getattr(model, var.name)
except AttributeError:
raise AttributeError(
'Grid attribute {} could not be located either '
'through this DataLoader or in the provided Model '
'object: {}.'.format(var, model))
def recursively_compute_variable(self, var, start_date=None, end_date=None,
time_offset=None, model=None,
**DataAttrs):
"""Compute a variable recursively, loading data where needed.
An obvious requirement here is that the variable must eventually be
able to be expressed in terms of model-native quantities; otherwise the
recursion will never stop.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
model : Model
aospy Model object (optional)
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
if var.variables is None:
return self._load_or_get_from_model(
var, start_date, end_date, time_offset, model, **DataAttrs)
else:
data = [self.recursively_compute_variable(
v, start_date, end_date, time_offset, model, **DataAttrs)
for v in var.variables]
return var.func(*data).rename(var.name)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Apply specified time shift to DataArray"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
raise NotImplementedError(
'All DataLoaders require a _generate_file_set method')
class DictDataLoader(DataLoader):
"""A DataLoader that uses a dict mapping lists of files to string tags.
This is the simplest DataLoader; it is useful for instance if one is
dealing with raw model history files, which tend to group all variables
of a single output interval into single filesets. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value.
Parameters
----------
file_map : dict
A dict mapping an input interval to a list of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of two sets of files, one with monthly average output, and one with
3-hourly output.
>>> file_map = {'monthly': '000[4-6]0101.atmos_month.nc',
... '3hr': '000[4-6]0101.atmos_8xday.nc'}
>>> data_loader = DictDataLoader(file_map)
If one wanted to correct a CF-incompliant units attribute on each Dataset
read in, which depended on the ``intvl_in`` of the fileset one could
define a ``preprocess_func`` which took into account the ``intvl_in``
keyword argument.
>>> def preprocess(ds, **kwargs):
... if kwargs['intvl_in'] == 'monthly':
... ds['time'].attrs['units'] = 'days since 0001-01-0000'
... if kwargs['intvl_in'] == '3hr':
... ds['time'].attrs['units'] = 'hours since 0001-01-0000'
... return ds
>>> data_loader = DictDataLoader(file_map, preprocess)
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new DictDataLoader."""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
class NestedDictDataLoader(DataLoader):
"""DataLoader that uses a nested dictionary mapping to load files.
This is the most flexible existing type of DataLoader; it allows for the
specification of different sets of files for different variables. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value. The variable name
can be any variable name in your aospy object library (including
alternative names).
Parameters
----------
file_map : dict
A dict mapping intvl_in to dictionaries mapping Var
objects to lists of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of a set of monthly average files for large scale precipitation,
and another monthly average set of files for convective precipitation.
>>> file_map = {'monthly': {'precl': '000[4-6]0101.precl.nc',
... 'precc': '000[4-6]0101.precc.nc'}}
>>> data_loader = NestedDictDataLoader(file_map)
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new NestedDictDataLoader"""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
for name in var.names:
try:
return self.file_map[intvl_in][name]
except KeyError:
pass
raise KeyError('Files for the var {0} cannot be found in for the '
'intvl_in {1} in this'
' OneDirDataLoader'.format(var, intvl_in))
class GFDLDataLoader(DataLoader):
"""DataLoader for NOAA GFDL model output.
This is an example of a domain-specific custom DataLoader, designed
specifically for finding files output by the Geophysical Fluid Dynamics
Laboratory's model history file post-processing tools.
Parameters
----------
template : GFDLDataLoader
Optional argument to specify a base GFDLDataLoader to inherit
parameters from
data_direc : str
Root directory of data files
data_dur : int
Number of years included per post-processed file
data_start_date : datetime.datetime
Start date of data files
data_end_date : datetime.datetime
End date of data files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case without a template to start from.
>>> base = GFDLDataLoader(data_direc='/archive/control/pp', data_dur=5,
... data_start_date=datetime(2000, 1, 1),
... data_end_date=datetime(2010, 12, 31))
Case with a starting template.
>>> data_loader = GFDLDataLoader(base, data_direc='/archive/2xCO2/pp')
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, template=None, data_direc=None, data_dur=None,
data_start_date=None, data_end_date=None,
upcast_float32=None, data_vars=None, coords=None,
preprocess_func=None):
"""Create a new GFDLDataLoader"""
if template:
_setattr_default(self, 'data_direc', data_direc,
getattr(template, 'data_direc'))
_setattr_default(self, 'data_dur', data_dur,
getattr(template, 'data_dur'))
_setattr_default(self, 'data_start_date', data_start_date,
getattr(template, 'data_start_date'))
_setattr_default(self, 'data_end_date', data_end_date,
getattr(template, 'data_end_date'))
_setattr_default(self, 'upcast_float32', upcast_float32,
getattr(template, 'upcast_float32'))
_setattr_default(self, 'data_vars', data_vars,
getattr(template, 'data_vars'))
_setattr_default(self, 'coords', coords,
getattr(template, 'coords'))
_setattr_default(self, 'preprocess_func', preprocess_func,
getattr(template, 'preprocess_func'))
else:
self.data_direc = data_direc
self.data_dur = data_dur
self.data_start_date = data_start_date
self.data_end_date = data_end_date
_setattr_default(self, 'upcast_float32', upcast_float32, True)
_setattr_default(self, 'data_vars', data_vars, 'minimal')
_setattr_default(self, 'coords', coords, 'minimal')
_setattr_default(self, 'preprocess_func', preprocess_func,
lambda ds, **kwargs: ds)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
file_set = self._input_data_paths_gfdl(
name, start_date, end_date, domain, intvl_in, dtype_in_vert,
dtype_in_time, intvl_out)
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located '
'using GFDL post-processing conventions. '
'Attempted using the following sets of paths:\n\n'
'{1}'.format(var, pprint.pformat(attempted_file_sets)))
def _input_data_paths_gfdl(self, name, start_date, end_date, domain,
intvl_in, dtype_in_vert, dtype_in_time,
intvl_out):
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(
name, domain, dtype, intvl_in, year, intvl_out,
data_start_year, self.data_dur))
for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
return files
| {
"repo_name": "spencerahill/aospy",
"path": "aospy/data_loader.py",
"copies": "1",
"size": "25915",
"license": "apache-2.0",
"hash": 3075672740497129000,
"line_mean": 37.9114114114,
"line_max": 79,
"alpha_frac": 0.5997684739,
"autogenerated": false,
"ratio": 4.135152385511409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5234920859411409,
"avg_score": null,
"num_lines": null
} |
"""aospy.Model objects associated w/ GFDL climate models."""
import datetime
from aospy import Model
from .. import runs
am2 = Model(
name='am2',
grid_file_paths=(
('/archive/Spencer.Hill/am2/am2clim_reyoi/gfdl.ncrc2-default-prod/'
'pp/atmos/atmos.static.nc'),
('/archive/Spencer.Hill/am2/am2clim_reyoi/gfdl.ncrc2-default-prod/'
'pp/atmos_level/ts/monthly/30yr/atmos_level.198301-201212.temp.nc'),
('/archive/Spencer.Hill/am2/am2clim_reyoi/gfdl.ncrc2-default-prod/'
'pp/atmos/ts/monthly/30yr/atmos.198301-201212.temp.nc'),
),
default_start_date=datetime.datetime(1982, 1, 1),
default_end_date=datetime.datetime(2012, 12, 31),
runs={
runs.am2_cont,
runs.am2_aero,
runs.am2_atm,
runs.am2_amtm,
runs.am2_gas,
runs.am2_gtm,
runs.am2_gmtm,
runs.am2_aatl,
runs.am2_aind,
runs.am2_apac,
runs.am2_noT,
runs.am2_noT_p2K,
runs.am2_amip,
runs.am2_reyoi_cont,
runs.am2_reyoi_m0p25,
runs.am2_reyoi_m0p5,
runs.am2_reyoi_m1,
runs.am2_reyoi_m1p5,
runs.am2_reyoi_m2,
runs.am2_reyoi_m3,
runs.am2_reyoi_m4,
runs.am2_reyoi_p0p25,
runs.am2_reyoi_p0p5,
runs.am2_reyoi_p1,
runs.am2_reyoi_p1p5,
runs.am2_reyoi_p2,
runs.am2_reyoi_p3,
runs.am2_reyoi_p4,
runs.am2_reyoi_p6,
runs.am2_reyoi_p8,
runs.am2_reyoi_m6,
runs.am2_reyoi_m8,
runs.am2_reyoi_m10,
runs.am2_reyoi_m15,
runs.am2_reyoi_p10,
runs.am2_reyoi_wpwp_p2,
runs.am2_reyoi_wpwp_m2,
runs.am2_reyoi_uw,
runs.am2_reyoi_uw_p2,
runs.am2_reyoi_uw_p5,
runs.am2_reyoi_uw_p10,
runs.am2_reyoi_uw_m2,
runs.am2_reyoi_uw_m5,
runs.am2_reyoi_uw_m10,
runs.am2_reyoi_uw_lo_0p5,
runs.am2_reyoi_uw_lo_0p5_p2k,
runs.am2_reyoi_uw_lo_0p5_p4k,
runs.am2_reyoi_uw_lo_0p5_p6k,
runs.am2_reyoi_uw_lo_0p5_p8k,
runs.am2_reyoi_uw_lo_0p5_p10k,
runs.am2_reyoi_uw_lo_0p5_m2k,
runs.am2_reyoi_uw_lo_0p5_m4k,
runs.am2_reyoi_uw_lo_0p5_m6k,
runs.am2_reyoi_uw_lo_0p5_m8k,
runs.am2_reyoi_uw_lo_0p5_m10k,
runs.am2_reyoi_uw_lo_0p25,
runs.am2_reyoi_uw_lo_0p25_p2k,
runs.am2_cld_lock_cont,
runs.am2_cld_lock_p2,
runs.am2_cld_lock_sst,
runs.am2_cld_lock_cld,
runs.am2_amip1,
runs.am2_amip1_p2,
runs.am2_reynolds,
runs.am2_reynolds_p2,
runs.am2_hurrell_cont,
runs.am2_hurrell_p2,
runs.am2_cld_seed_all_p2,
runs.am2_cld_seed_np_p2,
runs.am2_cld_seed_sp_p2,
runs.am2_cld_seed_sa_p2,
runs.am2_zshen_cont,
runs.am2_atmos_heat_wpwp,
runs.am2_atmos_heat_wpwp_small,
runs.am2_reyoi_w_ice,
runs.am2_test,
},
default_runs={
runs.am2_reyoi_cont,
runs.am2_reyoi_p2
}
)
am3 = Model(
name='am3',
grid_file_paths=(
('/archive/Spencer.Hill/am3/am3clim_hurrell/gfdl.ncrc2-intel-prod-'
'openmp/pp/atmos/atmos.static.nc'),
('/archive/Spencer.Hill/am3/am3clim_hurrell/gfdl.ncrc2-intel-prod-'
'openmp/pp/atmos/ts/monthly/1yr/atmos.198101-198112.ucomp.nc'),
('/archive/Spencer.Hill/am3/am3clim_hurrell/gfdl.ncrc2-intel-prod-'
'openmp/pp/atmos_level/ts/monthly/1yr/'
'atmos_level.198101-198112.ucomp.nc')
),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2010, 12, 31),
runs={
runs.am3_cont,
runs.am3_aero,
runs.am3_atm,
runs.am3_amtm,
runs.am3_gas,
runs.am3_gtm,
runs.am3_gmtm,
runs.am3_aatl,
runs.am3_aind,
runs.am3_apac,
runs.am3_hc,
runs.am3_hp1k,
runs.am3_hp2k,
runs.am3_hp4k,
runs.am3_hp6k,
runs.am3_hp8k,
runs.am3_hp10k,
runs.am3_hm1k,
runs.am3_hm2k,
runs.am3_hm4k,
runs.am3_hm6k,
runs.am3_hm8k,
runs.am3_hm10k,
runs.am3_hm15k,
# runs.am3_amip,
runs.am3_hwpwp_p2k,
runs.am3_hc_static_veg,
runs.am3_hc_static_veg_p4k,
runs.am3_hc_static_veg_10kyr,
},
default_runs={
runs.am3_hc,
runs.am3_hp2k,
}
)
hiram = Model(
name='hiram',
grid_file_paths=(
'/archive/Yi.Ming/siena_201211/c180_hiram_clim/gfdl.ncrc2-default-prod/'
'pp/atmos/atmos.static.nc',
'/archive/Yi.Ming/siena_201211/c180_hiram_clim/gfdl.ncrc2-default-prod/'
'pp/atmos/ts/monthly/17yr/atmos.197901-199512.ucomp.nc'
),
default_start_date=datetime.datetime(1979, 1, 1),
default_end_date=datetime.datetime(1995, 12, 31),
runs={
# runs.hiram_amip,
runs.hiram_cont,
runs.hiram_aero,
runs.hiram_atm,
runs.hiram_amtm,
runs.hiram_gas,
runs.hiram_gtm,
runs.hiram_gmtm,
runs.hiram_aatl,
runs.hiram_aind,
runs.hiram_apac,
},
default_runs={
runs.hiram_cont,
runs.hiram_gtm,
}
)
sm2 = Model(
name='sm2',
description='AM2.1 atmosphere coupled to mixed-layer ocean.',
grid_file_paths=(
'/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie_rerun6.YIM/pp/'
'atmos/ts/monthly/100yr/atmos.000101-010012.vcomp.nc',
'/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie_rerun6.YIM/pp/'
'atmos/atmos.static.nc'
),
default_start_date=datetime.datetime(61, 1, 1),
default_end_date=datetime.datetime(80, 12, 31),
runs={
runs.sm2_cont,
runs.sm2_aero,
runs.sm2_gas,
runs.sm2_both,
},
)
hiram_c48 = Model(
name='hiram_mz',
description=('Low resolution version of HiRAM used by Zhao 2014,'
' J. Climate.'),
grid_file_paths=(
'/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0/'
'gfdl.ncrc2-intel-prod/pp/atmos/atmos.static.nc',
'/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0/'
'gfdl.ncrc2-intel-prod/pp/atmos/ts/monthly/15yr/'
'atmos.198101-199512.ucomp.nc'
),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(1995, 12, 31),
runs={
runs.hiram_c48_0,
runs.hiram_c48_0_p2K,
runs.hiram_c48_1,
runs.hiram_c48_1_p2K,
runs.hiram_c48_2,
runs.hiram_c48_2_p2K,
runs.hiram_c48_3,
runs.hiram_c48_3_p2K,
runs.hiram_c48_4,
runs.hiram_c48_4_p2K,
runs.hiram_c48_5,
runs.hiram_c48_5_p2K,
runs.hiram_c48_6,
runs.hiram_c48_6_p2K,
runs.hiram_c48_7,
runs.hiram_c48_7_p2K,
runs.hiram_c48_8,
runs.hiram_c48_8_p2K,
},
default_runs={
runs.hiram_c48_0,
runs.hiram_c48_0_p2K,
}
)
am3c90 = Model(
name='am3c90',
grid_file_paths=(
'/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim/gfdl.ncrc2-intel'
'-prod-openmp/pp/atmos/atmos.static.nc',
'/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim/gfdl.ncrc2-intel'
'-prod-openmp/pp/atmos/ts/monthly/10yr/atmos.198101-199012.ucomp.nc'
),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(1990, 12, 31),
runs={
runs.am3c90_cont,
runs.am3c90_p2K,
},
default_runs={
runs.am3c90_cont,
runs.am3c90_p2K,
}
)
am2p5 = Model(
name='am2p5',
# The atmos.static.nc in the actual AM2.5 data directories has the wrong
# horizontal resolution, so use the one from HiRAM, which matches the
# actual AM2.5 resolution.
grid_file_paths=(
'/archive/Yi.Ming/siena_201211/c180_hiram_clim/'
'gfdl.ncrc2-default-prod/pp/atmos/atmos.static.nc',
['/archive/miz/hiramdp/siena_201204/c180l32_am2_C0/gfdl.ncrc2-intel-'
'prod/pp/atmos/ts/monthly/10yr/atmos.%04d01-%04d12.ucomp.nc'
% (y1, y2) for (y1, y2) in zip((1981, 1991), (1990, 2000))]
),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
runs={
runs.am2p5_cont,
runs.am2p5_p2K,
},
default_runs={
runs.am2p5_cont,
runs.am2p5_p2K,
}
)
am4a1 = Model(
name='am4a1',
grid_file_paths=(
'/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1/gfdl.ncrc2-intel-prod-openmp/pp/atmos/'
'atmos.static.nc',
['/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1/gfdl.ncrc2-intel-prod-openmp/pp/atmos/'
'ts/monthly/1yr/atmos.00%02d01-00%02d12.temp.nc' % (n, n)
for n in range(2, 12)]
),
default_start_date=datetime.datetime(2, 1, 1),
default_end_date=datetime.datetime(11, 12, 31),
runs={
runs.am4_a1c,
runs.am4_a1p2k,
}
)
am4a2 = Model(
name='am4a2',
grid_file_paths=(
'/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_2000climo/gfdl.ncrc2-'
'intel-prod-openmp/pp/atmos/atmos.static.nc',
['/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_2000climo/gfdl.ncrc2-'
'intel-prod-openmp/pp/atmos/ts/monthly/5yr/atmos.00%02d01-00%02d12.'
'ucomp.nc' % (y1, y2) for (y1, y2) in zip((2, 7), (6, 11))]
),
default_start_date=datetime.datetime(2, 1, 1),
default_end_date=datetime.datetime(11, 12, 31),
runs={
runs.am4_a2c,
runs.am4_a2p2k,
}
)
am4c1 = Model(
name='am4c1',
grid_file_paths=(
'/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo/gfdl.ncrc2-intel-prod-openmp/pp/'
'atmos/atmos.static.nc',
'/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo/gfdl.ncrc2-intel-prod-openmp/pp/'
'atmos/ts/monthly/10yr/atmos.000101-001012.temp.nc'
),
default_start_date=datetime.datetime(1, 1, 1),
default_end_date=datetime.datetime(10, 12, 31),
runs={
runs.am4_c1c,
runs.am4_c1p2k,
}
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/models/gfdl_models.py",
"copies": "1",
"size": "10250",
"license": "apache-2.0",
"hash": -2242017832286169600,
"line_mean": 30.1550151976,
"line_max": 81,
"alpha_frac": 0.5857560976,
"autogenerated": false,
"ratio": 2.377638598932962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.846264877553103,
"avg_score": 0.00014918420038641453,
"num_lines": 329
} |
"""aospy.Model objects corresponding to CMIP5 data."""
import datetime
import os
from aospy.model import Model
from .. import runs
root_dir = '/archive/pcmdi/repo/CMIP5/output/'
# BCC
bcc_csm1 = Model(
name='bcc_csm1-1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'BCC/BCC-CSM1-1')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/orog/orog_fx_bcc-csm1-1_historical_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/sftlf/sftlf_fx_bcc-csm1-1_historical_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/BCC/BCC-CSM1-1/historical/fx/atmos/'
'fx/r0i0p0/v1/areacella/areacella_fx_bcc-csm1-1_historical_r0i0p0.nc',
],
# data_dur=30,
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# BNU
bnu_esm = Model(
name='bnu_esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'BNU/BNU-ESM')),
runs=[runs.amip],
default_runs=False
)
# CCCma
cccma_canam4 = Model(
name='cccma_canam4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanAM4')),
repo_version=0,
# data_dur=30,
# data_start_date=datetime.datetime(1950, 1, 1),
# data_end_date=datetime.datetime(2009, 12, 31),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cccma_cancm4 = Model(
name='cccma_cancm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanCM4')),
runs=[runs.amip],
default_runs=False
)
cccma_canesm2 = Model(
name='cccma_canesm2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CCCma/CanESM2')),
runs=[runs.amip],
default_runs=False
)
# CMCC
cmcc_cesm = Model(
name='cmcc-cesm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CESM')),
runs=[runs.amip],
default_runs=False
)
cmcc_cm = Model(
name='cmcc-cm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CM')),
runs=[runs.amip],
default_runs=False
)
cmcc_cms = Model(
name='cmcc-cms',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CMCC/CMCC-CMS')),
runs=[runs.amip],
default_runs=False
)
# CNRM-CERFACS
cnrm_cm5 = Model(
name='cnrc-cm5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CNRM-CERFACS/CNRM-CM5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cnrm_cm5_2 = Model(
name='cnrc-cm5-2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CNRM-CERFACS/CNRC-CM5-2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# COLA-CFS
cola_cfsv2 = Model(
name='cola-cfsv2-2011',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'COLA/CFSv2-2011')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# CSIRO-BOM
csiro_bom_access1_0 = Model(
name='csiro-bom-access1-0',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-BOM/CSIRO-ACCESS1-0')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
csiro_bom_access1_3 = Model(
name='csiro-bom-access1-3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-BOM/CSIRO-ACCESS1-3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# CSIRO-QCCCE
csiro_qccce_mk3_6_0 = Model(
name='csiro-qccce-mk3-6-0',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'CSIRO-QCCCE/CSIRO-Mk3-6-0')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# FIO
fio_esm = Model(
name='fio-esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'FIO/FIO-ESM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# ICHEC
ichec_ec_earth = Model(
name='ichec_ec_earth',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'ICHEC/EC-EARTH')),
repo_ens_mem='r3i1p1',
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# INM
inm_cm4 = Model(
name='inm-cm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'INM/INM-CM4')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# INPE
inpe_hadgem2_es = Model(
name='inpe-hadgem2-es',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'INPE/HadGEM2-ES')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# IPSL
ipsl_cm5a_lr = Model(
name='ipsl-cm5a-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5A-LR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ipsl_cm5a_mr = Model(
name='ipsl-cm5a-mr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5A-MR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ipsl_cm5b_lr = Model(
name='ipsl-cm5b-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'IPSL/IPSL-CM5B-LR')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/orog/'
'orog_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/areacella/'
'areacella_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/IPSL/IPSL-CM5B-LR/piControl/fx/'
'atmos/fx/r0i0p0/v20120430/sftlf/'
'sftlf_fx_IPSL-CM5B-LR_piControl_r0i0p0.nc',
],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# LASG-CESS
lasg_cess_fgoals_g2 = Model(
name='lasg-cess-fgoals-g2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-CESS/FGOALS-g2')),
repo_version=0,
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# LASG-IAP
lasg_iap_fgoals_g1 = Model(
name='lasg-iap-fgoals-g1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-IAP/FGOALS-g1')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
lasg_iap_fgoals_s2 = Model(
name='lasg-iap-fgoals-s2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'LASG-IAP/FGOALS-s2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MIROC
miroc4h = Model(
name='miroc4h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC4h')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc5 = Model(
name='miroc5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc_esm = Model(
name='miroc-esm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC-ESM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
miroc_esm_chem = Model(
name='miroc-esm-chem',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MIROC/MIROC-ESM-CHEM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MOHC (Met Office Hadley Centre)
mohc_hadcm3 = Model(
name='mohc_hadcm3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadCM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_a = Model(
name='mohc_hadgem2a',
description='',
# data_dir_struc='gfdl_repo',
repo_version=1,
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-A')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_cc = Model(
name='mohc_hadgem2cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mohc_hadgem2_es = Model(
name='hadgem2-es',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MOHC/HadGEM2-ES')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MPI-M
mpi_m_esm_lr = Model(
name='mpi-esm-lr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-LR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mpi_m_esm_mr = Model(
name='mpi-esm-mr',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-MR')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mpi_m_esm_p = Model(
name='mpi-esm-p',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MPI-M/MPI-ESM-P')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# MRI
mri_agcm3_2h = Model(
name='mri-agcm3-2h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-AGCM3-2H')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_agcm3_2s = Model(
name='mri-agcm3-2s',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-AGCM3-2S')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_cgcm3 = Model(
name='mri-cgcm3',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-CGCM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
mri_esm1 = Model(
name='mri-esm1',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'MRI/MRI-ESM1')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NASA-GISS
nasa_giss_e2_h = Model(
name='giss-e2-h',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-H')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_h_cc = Model(
name='giss-e2-h-cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-H-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_r = Model(
name='giss-e2-r',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-R')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
nasa_giss_e2_r_cc = Model(
name='giss-e2-r-cc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GISS/GISS-E2-R-CC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NASA-GMAO
nasa_gmao_geos_5 = Model(
name='gmao-geos-5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NASA-GMAO/GEOS-5')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCAR
ncar_ccsm4 = Model(
name='ncar-ccsm4',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCAR/CCSM4')),
grid_file_paths=[
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120413/orog/orog_fx_CCSM4_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120413/sftlf/sftlf_fx_CCSM4_piControl_r0i0p0.nc',
'/archive/pcmdi/repo/CMIP5/output/NCAR/CCSM4/piControl/fx/atmos/fx/'
'r0i0p0/v20120213/areacella/areacella_fx_CCSM4_piControl_r0i0p0.nc',
],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCC
ncc_noresm1_m = Model(
name='ncc-noresm1-m',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCC/NorESM1-M')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
ncc_noresm1_me = Model(
name='ncc-noresm1-me',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCC/NorESM1-me')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NCEP
ncep_cfsv2_2011 = Model(
name='ncep_cfsv2-2011',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NCEP/CFSv2-2011')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NIMR-KMA
nimr_kma_hadgem2_ao = Model(
name='nimr-kma-hadgem2-ao',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NIMR-KMA/HadGEM2-AO')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NOAA-GFDL
gfdl_cm2_1 = Model(
name='gfdl_cm2.1',
description='NOAA GFDL CM2.1 AOGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-CM2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_cm3 = Model(
name='gfdl_cm3',
description='NOAA GFDL CM3 AOGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-CM3')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_esm2m = Model(
name='gfdl_esm2m',
description='NOAA GFDL ESM2M earth-system model',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-ESM2M')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_esm2g = Model(
name='gfdl_esm2g',
description='NOAA GFDL ESM2G earth-system model',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-ESM2G')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_hiram_c180 = Model(
name='gfdl_hiram-c180',
description='NOAA GFDL HIRAM-C180 AGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-HIRAM-C180')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
gfdl_hiram_c360 = Model(
name='gfdl_hiram-c360',
description='NOAA GFDL HIRAM-C360 AGCM',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NOAA-GFDL/GFDL-HIRAM-C360')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# NSF-DOE-NCAR
cesm1_bgc = Model(
name='cesm1-bgc',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-BGC')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_cam5 = Model(
name='ncar_cesm1_cam5',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-CAM5')),
grid_file_paths=['/archive/s1h/cmip5/cam5_land_mask/cam5_land_mask.nc'],
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_cam5_1_fv2 = Model(
name='cesm1-cam5-1-fv2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-CAM5-1-FV2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_fastchem = Model(
name='cesm1-fastchem',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-FASTCHEM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
cesm1_waccm = Model(
name='cesm1-waccm',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'NSF-DOE-NCAR/CESM1-WACCM')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# SMHI
smhi_ec_earth = Model(
name='smhi_ec_earth',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'SMHI/EC-EARTH')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
# UNSW
unsw_csiro_mk3l_1_2 = Model(
name='unsw-csiro-mk3l-1-2',
description='',
# data_dir_struc='gfdl_repo',
# data_direc=os.path.realpath(os.path.join(root_dir, 'UNSW/CSIRO-Mk3L-1-2')),
runs=[runs.amip, runs.amip4K],
default_runs=[runs.amip, runs.amip4K]
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/models/cmip5_models.py",
"copies": "1",
"size": "18439",
"license": "apache-2.0",
"hash": 5292037132025496000,
"line_mean": 29.7829716194,
"line_max": 91,
"alpha_frac": 0.6409783611,
"autogenerated": false,
"ratio": 2.3147125282450416,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3455690889345041,
"avg_score": null,
"num_lines": null
} |
"""aospy Model objects corresponding to observational & renalayses data."""
import datetime
from aospy.model import Model
from .. import runs
# Precipitation
cru = Model(
name='cru',
description='Univ. East Anglia Climate Research Unit obs',
grid_file_paths=('/archive/Spencer.Hill/obs/HadCRU/3.22/'
'cru_ts3.22.1901.2013.pre.dat.nc',),
data_dur=113,
data_start_date=datetime.datetime(1901, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
runs=[runs.cru_v322],
default_runs=[runs.cru_v322]
)
prec_l = Model(
name='prec_l',
description='NOAA PRECipitation REConstruction over Land (PREC/L)',
grid_file_paths=('/archive/Spencer.Hill/obs/PREC_L/20150212/'
'precip.mon.mean.1x1.nc',),
data_dur=64,
data_start_date=datetime.datetime(1948, 1, 1),
data_end_date=datetime.datetime(2012, 12, 31),
# runs=[runs.prec_l_0p5deg, runs.prec_l_1deg, runs.prec_l_2p5deg]
runs=[runs.prec_l_1deg],
default_runs=[runs.prec_l_1deg]
)
gpcp = Model(
name='gpcp',
description=('Global Precipitation Climatology Project: '
'http://www.gewex.org/gpcp.html'),
grid_file_paths=([
'/archive/pcmdi/repo/obs4MIPs/NASA-GSFC/GPCP/atmos/'
'mon/v20130401/pr_GPCP-SG_L3_v2.2_' + yrs + '.nc' for yrs in
('197901-197912', '198001-198912', '199001-199912',
'200001-200912', '201001-201312')
],),
data_dur=10,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
runs=[runs.gpcp_v2p2],
default_runs=[runs.gpcp_v2p2]
)
cmap = Model(
name='cmap',
description='CPC Merged Analysis of Precipitation',
grid_file_paths=('/archive/Spencer.Hill/obs/CMAP/standard/'
'precip.mon.mean.nc',),
data_dur=36,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2014, 12, 31),
runs=[runs.cmap_standard, runs.cmap_enhanced],
default_runs=[runs.cmap_standard]
)
trmm = Model(
name='trmm',
description=('Tropical Rainfall Measuring Mission: '
'http://trmm.gsfc.nasa.gov/'),
grid_file_paths=(['/archive/pcmdi/repo/obs4MIPs/NASA-GSFC/TRMM/atmos/'
'mon/v20130204/pr_TRMM-L3_v7A_' + yrs + '.nc' for yrs
in ('200001-200912', '201001-201009')],),
data_dur=10,
data_start_date=datetime.datetime(2000, 1, 1),
data_end_date=datetime.datetime(2010, 9, 30),
runs=[runs.trmm_v7a],
default_runs=[runs.trmm_v7a]
)
udel = Model(
name='udel',
description='U. Delaware gridded land data from station obs',
grid_file_paths=('/archive/Spencer.Hill/obs/U_Del/precip.mon.total.v301.nc',),
data_dur=111,
data_start_date=datetime.datetime(1900, 1, 1),
runs=[runs.udel_v201, runs.udel_v301],
default_runs=[runs.udel_v301]
)
# Radiation
ceres = Model(
name='ceres',
grid_file_paths=(
'/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF/atmos/mon/v20140402/CERES-EBAF/'
'rsut_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
),
data_dur=14,
data_start_date=datetime.datetime(2000, 1, 1),
data_end_date=datetime.datetime(2013, 3, 31),
runs=[runs.ceres_ebaf, runs.ceres_ebaf_sfc],
default_runs=[runs.ceres_ebaf]
)
# Reanalyses
era = Model(
name='era',
description='ERA reanalyses',
grid_file_paths=('/archive/pcmdi/repo/ana4MIPs/ECMWF/ERA-Interim/atmos/'
'mon/v20140416/wap_Amon_reanalysis_IFS-Cy31r2_197901-197912.nc'),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
runs=[runs.era_i],
default_runs=[runs.era_i]
)
merra = Model(
name='merra',
description='MERRA reanalyses',
grid_file_paths=(
# ['/archive/pcmdi/repo/ana4MIPs/NASA-GMAO/MERRA/atmos/'
# 'mon/v20140624/hfss_Amon_reanalysis_MERRA_' + yrs +
# '.nc' for yrs in [str(yr) + '01-' + str(yr) + '12'
# for yr in range(1979, 2012)]],
['/archive/pcmdi/repo/ana4MIPs/NASA-GMAO/MERRA/atmos/'
'mon/v20140624/wap_Amon_reanalysis_MERRA_' + yrs +
'.nc' for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2011, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
runs=[runs.merra],
default_runs=[runs.merra]
)
cfsr = Model(
name='cfsr',
description='NCEP CFSR reanalyses',
grid_file_paths=(
['/archive/pcmdi/repo/ana4MIPs/NOAA-NCEP/CFSR/atmos/'
'mon/v20140822/zg_Amon_reanalysis_CFSR_' + yrs +
'.nc' for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2014)]],
# ('/archive/pcmdi/repo/ana4MIPs/NOAA-NCEP/CFSR/atmos/'
# 'mon/v20140822/rlut_Amon_reanalysis_CFSR_201201-201212.nc'),
),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
runs=[runs.cfsr],
default_runs=[runs.cfsr]
)
jra = Model(
name='jra',
description='JRA-25 reanalyses',
grid_file_paths=('/archive/pcmdi/repo/ana4MIPs/JMA/JRA-25/atmos/mon/'
'v20140408/va_Amon_reanalysis_JRA-25_197901-201312.nc',),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
runs=[runs.jra25]
)
# Evapotranspiration
landflux = Model(
name='landflux-eval',
description='LandFlux-EVAL evapotranspiration data, 1989-2005',
grid_file_paths=('/archive/Spencer.Hill/obs/LandFlux-EVAL/'
'LandFluxEVAL.merged.89-05.monthly.all.nc',),
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
runs=[runs.lfe_all, runs.lfe_diag, runs.lfe_lsm, runs.lfe_rean],
default_runs=[runs.lfe_all]
)
landflux95 = Model(
name='landflux-eval95',
description='LandFlux-EVAL evapotranspiration data, 1989-1995',
grid_file_paths=('/archive/Spencer.Hill/obs/LandFlux-EVAL/'
'LandFluxEVAL.merged.89-95.monthly.all.nc',),
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
runs=[runs.lfe95_all, runs.lfe95_diag, runs.lfe95_lsm, runs.lfe95_rean],
default_runs=[runs.lfe95_all]
)
# SST
hadisst = Model(
name='hadisst',
description='HadISST: Hadley Centre SST and sea ice obs datasets',
grid_file_paths=('/archive/Spencer.Hill/obs/HadISST/HadISST_sst.nc',),
data_dur=1,
data_start_date=datetime.datetime(2005, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
runs=[runs.hadisst1],
default_runs=[runs.hadisst1]
)
hurrell = Model(
name='hurrell',
description='Hurrell SST observational dataset',
grid_file_paths=('/archive/Spencer.Hill/obs/Hurrell/'
'sst.climo.1981-2000.data.nc',),
data_dur=1,
data_start_date=datetime.datetime(2000, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
runs=[runs.hurrell],
default_runs=[runs.hurrell]
)
reynolds_oi = Model(
name='reynolds_oi',
description='Reynolds OI SST observational dataset',
grid_file_paths=('/archive/Spencer.Hill/obs/ReynoldsOI/reyoi_sst.data.nc',),
data_dur=19,
data_start_date=datetime.datetime(1981, 11, 1),
data_end_date=datetime.datetime(1999, 1, 31),
default_start_date=datetime.datetime(1982, 1, 1),
default_end_date=datetime.datetime(1998, 12, 31),
runs=[runs.reynolds_oi],
default_runs=[runs.reynolds_oi]
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/models/obs_models.py",
"copies": "1",
"size": "8015",
"license": "apache-2.0",
"hash": 6435464503015499000,
"line_mean": 35.598173516,
"line_max": 86,
"alpha_frac": 0.6348097318,
"autogenerated": false,
"ratio": 2.7105174163003043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8842811168101017,
"avg_score": 0.0005031959998575002,
"num_lines": 219
} |
"""aospy.Run objects for observational data."""
import datetime
from aospy.run import Run
from aospy.data_loader import NestedDictDataLoader
# CRU
cru_v322 = Run(
name='v3.22',
description='CRU v3.22',
data_direc='/archive/Spencer.Hill/obs/HadCRU/3.22',
data_dur=113,
data_start_date=datetime.datetime(1901, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
data_files={
'precip': 'cru_ts3.22.1901.2013.pre.dat.nc',
'cld_amt': 'cru_ts3.22.1901.2013.cld.dat.nc',
'diurnal_temp_range': 'cru_ts3.22.1901.2013.dtr.dat.nc',
'ground_frost_freq': 'cru_ts3.22.1901.2013.frs.dat.nc',
'pet': 'cru_ts3.22.1901.2013.pet.dat.nc',
't_surf_min': 'cru_ts3.22.1901.2013.tmn.dat.nc',
't_surf_max': 'cru_ts3.22.1901.2013.tmx.dat.nc',
't_surf': 'cru_ts3.22.1901.2013.tmp.dat.nc',
'vap_pres': 'cru_ts3.22.1901.2013.vap.dat.nc',
'wet_day_freq': 'cru_ts3.22.1901.2013.wet.dat.nc'
}
)
# PREC/L
prec_l_0p5deg = Run(
name='0.5deg',
description='PREC/L 0.5x0.5 degree resolution',
data_direc='/archive/Spencer.Hill/obs/PREC_L/20150212',
data_dur=64,
data_start_date=datetime.datetime(1948, 1, 1),
data_end_date=datetime.datetime(2011, 12, 31),
data_files={'precip': 'precip.mon.mean.0.5x0.5.nc'}
)
prec_l_1deg = Run(
name='1deg',
description='PREC/L 1x1 degree resolution',
data_direc='/archive/Spencer.Hill/obs/PREC_L/20150212',
data_dur=67,
data_start_date=datetime.datetime(1948, 1, 1),
data_end_date=datetime.datetime(2014, 12, 31),
data_files={'precip': 'precip.mon.mean.1x1.nc'}
)
prec_l_2p5deg = Run(
name='2.5deg',
description='PREC/L 2.5x2.5 degree resolution',
data_direc='/archive/Spencer.Hill/obs/PREC_L/20150212',
data_dur=67,
data_start_date=datetime.datetime(1948, 1, 1),
data_end_date=datetime.datetime(2014, 12, 31),
data_files={'precip': 'precip.mon.mean.2.5x2.5.nc'}
)
# CERES
ceres_ebaf = Run(
name='ebaf',
description='CERES EBAF',
data_direc=('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/CERES-EBAF/'
'atmos/mon/v20140402/CERES-EBAF'),
data_dur=14,
data_start_date=datetime.datetime(2000, 3, 1),
data_end_date=datetime.datetime(2013, 10, 31),
data_suffix='_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
data_files={
'swdn_toa': 'rsdt_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
'swup_toa': 'rsut_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
'swup_toa_clr': 'rsutcs_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
'olr': 'rlut_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
'olr_clr': 'rlutcs_CERES-EBAF_L3B_Ed2-8_200003-201310.nc',
'swdn_sfc': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rsds_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'swdn_sfc_clr': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rsdscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'swup_sfc': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rsus_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'swup_sfc_clr': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rsuscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'lwdn_sfc': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rlds_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'lwdn_sfc_clr': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rldscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
'lwup_sfc': ('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/'
'CERES-EBAF_Surface/atmos/mon/v20140402/'
'rlus_CERES-EBAF_L3B_Ed2-7_200003-201303.nc'),
}
)
ceres_ebaf_sfc = Run(
name='ebaf-sfc',
description='CERES EBAF-surface',
data_direc=('/archive/pcmdi/repo/obs4MIPs/NASA-LaRC/CERES-EBAF_Surface/'
'atmos/mon/v20140402'),
data_dur=14,
data_start_date=datetime.datetime(2000, 3, 1),
data_end_date=datetime.datetime(2013, 3, 31),
data_suffix='_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
data_files={
'swdn_sfc': 'rsds_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'swdn_sfc_clr': 'rsdscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'swup_sfc': 'rsus_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'swup_sfc_clr': 'rsuscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'lwdn_sfc': 'rlds_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'lwdn_sfc_clr': 'rldscs_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
'lwup_sfc': 'rlus_CERES-EBAF_L3B_Ed2-7_200003-201303.nc',
}
)
# GPCP
gpcp_v2p2 = Run(
name='v2p2',
description=('GPCP v2.2 gridded precipitation, from blend of '
'satellite and station gauge data.'),
data_direc='/archive/pcmdi/repo/obs4MIPs/NASA-GSFC/GPCP/atmos/',
data_dur=10,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
data_files={'monthly':
['mon/v20130401/pr_GPCP-SG_L3_v2.2_' + yrs + '.nc' for yrs in
('197901-197912', '198001-198912', '199001-199912',
'200001-200912', '201001-201312')],
'pentad': 'day/v20121003/'}
)
# TRMM
trmm_v7a = Run(
name='v7a',
description='TRMM v7 gridded precipitation, from satellite data',
data_direc='/archive/pcmdi/repo/obs4MIPs/NASA-GSFC/TRMM/atmos/',
data_dur=2,
data_start_date=datetime.datetime(2000, 1, 1),
data_end_date=datetime.datetime(2010, 9, 30),
data_files={'monthly': ['mon/v20130204/pr_TRMM-L3_v7A_' + yrs + '.nc'
for yrs in ('200001-200912', '201001-201009')]}
)
# CMAP
cmap_standard = Run(
name='standard',
description=('CMAP standard version, which does not include NCEP '
'reanalysis data to fill in gaps.'),
data_direc='/archive/Spencer.Hill/obs/CMAP/standard',
data_dur=36,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2014, 12, 31),
data_files={'monthly': 'precip.mon.mean.nc',
'pentad': 'precip.pentad.mean.nc'}
)
cmap_enhanced = Run(
name='enhanced',
description=('CMAP enhanced version, which includes NCEP reanalysis '
'data to fill in gaps.'),
data_direc='/archive/Spencer.Hill/obs/CMAP/enhanced',
data_dur=36,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2014, 12, 31),
data_files={'monthly': 'precip.mon.mean.nc',
'pentad': 'precip.pentad.mean.nc'}
)
# U. Delaware
udel_v201 = Run(
name='v201',
description='U. Delaware version 2.01',
data_direc='/archive/Spencer.Hill/obs/U_Del',
data_dur=109,
data_start_date=datetime.datetime(1900, 1, 1),
data_end_date=datetime.datetime(2008, 12, 31),
data_files={'precip': 'precip.mon.total.v201.nc',
't_surf': 'air.mon.total.v201.nc'}
)
udel_v301 = Run(
name='v301',
description='U. Delaware version 3.01',
data_direc='/archive/Spencer.Hill/obs/U_Del',
data_dur=111,
data_start_date=datetime.datetime(1900, 1, 1),
data_end_date=datetime.datetime(2010, 12, 31),
data_files={'precip': 'precip.mon.total.v301.nc',
't_surf': 'air.mon.total.v301.nc'}
)
# ERA-Interim
era_i = Run(
name='interim',
description='',
data_direc=('/archive/pcmdi/repo/ana4MIPs/ECMWF/ERA-Interim/atmos/'
'mon/v20140416'),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
# data_dir_struc='one_dir',
data_dir_struc='gfdl_repo',
data_files={
'cld_amt': 'cl_*.nc',
'evap': 'evspsbl_*.nc',
'hght': 'zg_*.nc',
'lwdn_sfc': 'rlds_*.nc',
'lwup_sfc': 'rlus_*.nc',
'olr': 'rlut_*.nc',
'olr_clr': 'rlutcs_*.nc',
'omega': 'wap_*.nc',
'precip': 'pr_*.nc',
'ps': 'ps_*.nc',
'rh': 'hur_*.nc',
'shflx': 'hfss_*.nc',
'slp': 'psl_*.nc',
'sphum': 'hus_*.nc',
'swdn_sfc': 'rsds_*.nc',
'swdn_toa': 'rsdt_*.nc',
'swup_sfc': 'rsus_*.nc',
# 'swup_toa': 'rsut_*.nc',
't_surf': 'tas_*.nc',
'temp': 'ta_*.nc',
'ucomp': 'ua_*.nc',
'vcomp': 'va_*.nc',
'wvp': 'prw_*.nc',
}
)
# MERRA
merra = Run(
name='merra',
description='',
data_direc=('/archive/pcmdi/repo/ana4MIPs/NASA-GMAO/MERRA/atmos/mon/'
'v20140624'),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2011, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
data_files={
'cld_amt': ['cl_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'evap': ['evspsbl_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'hght': ['zg_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'lwdn_sfc': ['rlds_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'lwdn_sfc_clr': ['rldscs_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'lwup_sfc': ['rlus_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'olr': ['rlut_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'olr_clr': ['rlutcs_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'omega': ['wap_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'precip': ['pr_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'ps': ['ps_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'rh': ['hur_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'shflx': ['hfss_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'slp': ['psl_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'sphum': ['hus_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'swdn_sfc': ['rsds_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
# 'swup_sfc': ['rsus_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
# ['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'swdn_toa': ['rsdt_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'swup_toa': ['rsut_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'swup_toa_clr': ['rsutcs_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'temp': ['ta_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'ucomp': ['ua_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]],
'vcomp': ['va_Amon_reanalysis_MERRA_' + yrs + '.nc' for yrs in
['%s01-%s12' % (yr, yr) for yr in range(1979, 2012)]],
'wvp': ['prw_Amon_reanalysis_MERRA_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2012)]]
}
)
# NCEP CFSR
cfsr = Run(
name='cfsr',
description='',
data_direc=('/archive/pcmdi/repo/ana4MIPs/NOAA-NCEP/CFSR/atmos/'
'mon/v20140822'),
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
default_start_date=datetime.datetime(1981, 1, 1),
default_end_date=datetime.datetime(2000, 12, 31),
data_files={
'cld_amt': ['cl_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'hght': ['zg_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'omega': ['wap_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'rh': ['hur_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'sphum': ['hus_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'temp': ['ta_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'ucomp': ['ua_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'vcomp': ['va_Amon_reanalysis_CFSR_' + yrs + '.nc' for yrs in
['%s01-%s12'% (yr, yr) for yr in range(1979, 2014)]],
'evap': 'evspsbl_Amon_reanalysis_CFSR_197901-201112.nc',
'lwdn_sfc': 'rlds_Amon_reanalysis_CFSR_197901-201112.nc',
'lwdn_sfc_clr': 'rldscs_Amon_reanalysis_CFSR_197901-201112.nc',
'lwup_sfc': 'rlus_Amon_reanalysis_CFSR_197901-201112.nc',
'olr': 'rlut_Amon_reanalysis_CFSR_197901-201112.nc',
'olr_clr': 'rlutcs_Amon_reanalysis_CFSR_197901-201112.nc',
'precip': 'pr_Amon_reanalysis_CFSR_197901-201112.nc',
'ps': 'ps_Amon_reanalysis_CFSR_197901-201112.nc',
'shflx': 'hfss_Amon_reanalysis_CFSR_197901-201112.nc',
'slp': 'psl_Amon_reanalysis_CFSR_197901-201112.nc',
'swdn_sfc': 'rsds_Amon_reanalysis_CFSR_197901-201112.nc',
'swdn_sfc_clr': 'rsdscs_Amon_reanalysis_CFSR_197901-201112.nc',
'swdn_toa': 'rsdt_Amon_reanalysis_CFSR_197901-201112.nc',
'swup_sfc': 'rsus_Amon_reanalysis_CFSR_197901-201112.nc',
'swup_sfc_clr': 'rsuscs_Amon_reanalysis_CFSR_197901-201112.nc',
'swup_toa': 'rsut_Amon_reanalysis_CFSR_197901-201112.nc',
'swup_toa_clr': 'rsutcs_Amon_reanalysis_CFSR_197901-201112.nc',
't_surf': 'tas_Amon_reanalysis_CFSR_197901-201112.nc',
'wvp': 'prw_Amon_reanalysis_CFSR_197901-201112.nc',
}
)
# JMA JRA-25
jra25 = Run(
name='jra-25',
description='Japanase Meteorological Agency reanalyses',
data_direc='/archive/pcmdi/repo/ana4MIPs/JMA/JRA-25/atmos/mon/v20140408',
data_dur=1,
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(2013, 12, 31),
data_files={'monthly': ['va_Amon_reanalysis_JRA-25_' + yrs + '.nc'
for yrs in [str(yr) + '01-' + str(yr) + '12'
for yr in range(1979, 2014)]]}
)
# LandFlux-EVAL 1989-2005
lfe_all = Run(
name='all',
description='LandFlux-EVAL 1989-2005 using all products',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-05.monthly.all.nc',
'annual': 'LandFluxEVAL.merged.89-05.yearly.all.nc'}
)
lfe_diag = Run(
name='diagnostic',
description='LandFlux-EVAL 1989-2005 using only diagnostic products',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-05.monthly.diagnostic.nc',
'annual': 'LandFluxEVAL.merged.89-05.yearly.diagnostic.nc'}
)
lfe_lsm = Run(
name='lsm',
description='LandFlux-EVAL 1989-2005 using land surface models only',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-05.monthly.lsm.nc',
'annual': 'LandFluxEVAL.merged.89-05.yearly.lsm.nc'}
)
lfe_rean = Run(
name='reanalyses',
description='LandFlux-EVAL 1989-2005 using reanalyses only',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-05.monthly.reanalyses.nc',
'annual': 'LandFluxEVAL.merged.89-05.yearly.reanlayses.nc'}
)
# LandFlux-EVAL 1989-1995
lfe95_all = Run(
name='all',
description='LandFlux-EVAL 1989-1995 using all products',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-95.monthly.all.nc',
'annual': 'LandFluxEVAL.merged.89-95.yearly.all.nc'}
)
lfe95_diag = Run(
name='diagnostic',
description='LandFlux-EVAL 1989-1995 using diagnostic products only',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-95.monthly.diagnostic.nc',
'annual': 'LandFluxEVAL.merged.89-95.yearly.diagnostic.nc'}
)
lfe95_lsm = Run(
name='lsm',
description='LandFlux-EVAL 1989-1995 using land surface models only',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-95.monthly.lsm.nc',
'annual': 'LandFluxEVAL.merged.89-95.yearly.lsm.nc'}
)
lfe95_rean = Run(
name='reanalyses',
description='LandFlux-EVAL 1989-1995 using reanalyses only',
data_direc='/archive/Spencer.Hill/obs/LandFlux-EVAL',
data_dur=17,
data_start_date=datetime.datetime(1989, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
data_files={'monthly': 'LandFluxEVAL.merged.89-95.monthly.reanalyses.nc',
'annual': 'LandFluxEVAL.merged.89-95.yearly.reanlayses.nc'}
)
# SST datasets
hadisst1 = Run(
name='hadisst1',
description='HadISST1 product; SST data only',
data_direc='/archive/Spencer.Hill/obs/HadISST',
data_dur=1,
data_start_date=datetime.datetime(2005, 1, 1),
data_end_date=datetime.datetime(2005, 12, 31),
data_files={'monthly': '/archive/Spencer.Hill/obs/HadISST/HadISST_sst.nc'}
)
hurrell = Run(
name='hurrell',
description='Hurrell SST product',
data_direc='/archive/Spencer.Hill/obs/Hurrell',
data_dur=1,
data_start_date=datetime.datetime(2000, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
data_files={'monthly':
'/archive/Spencer.Hill/obs/Hurrell/sst.climo.1981-2000.data.nc'}
)
reynolds_oi = Run(
name='reynolds_oi',
description='Reynolds OI SST observational dataset',
data_direc='/archive/Spencer.Hill/obs/ReynoldsOI',
data_dur=19,
data_start_date=datetime.datetime(1981, 11, 1),
data_end_date=datetime.datetime(1999, 1, 31),
data_files={'monthly':
'/archive/Spencer.Hill/obs/ReynoldsOI/reyoi_sst.data.nc'}
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/obs_runs.py",
"copies": "1",
"size": "21332",
"license": "apache-2.0",
"hash": 2071836269344356600,
"line_mean": 42.7131147541,
"line_max": 79,
"alpha_frac": 0.5732233265,
"autogenerated": false,
"ratio": 2.6062309102015884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3679454236701588,
"avg_score": null,
"num_lines": null
} |
"""aospy.Run objects for simulations from the GFDL HiRAM model."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
hiram_cont = Run(
name='cont',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs '
'and sea ice repeated annually, with PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
),
)
hiram_aero = Run(
name='aero',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and '
'sea ice repeated annually, overlaid with annual cycle of '
'equilibrium SST anomalies from a PI-to-PD aerosols '
'simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_atm = Run(
name='aero_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual tropical mean equilibrium '
'SST anomaly from a PI-to-PD aerosols simulation of AM2.1 with a '
'mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_amtm = Run(
name='aero_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, subtracting annual tropical mean equilibrium SST '
'anomaly from a PI-to-PD aerosols simulation of AM2.1 with a mixed '
'layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_m_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_apac = Run(
name='aero_pac',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Pacific Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_pac/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_aatl = Run(
name='aero_atl',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Atlantic Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_atl/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_aind = Run(
name='aero_ind',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Indian Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'M2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_ind/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gas = Run(
name='gas',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and '
'sea ice repeated annually, overlaid with annual cycle of '
'equilibrium SST anomalies from a PI-to-PD WMGG and ozone '
'simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_rerun2/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gtm = Run(
name='gas_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea '
'ice repeated annually, overlaid with annual tropical mean '
'equilibrium SST anomaly from a PI-to-PD WMGG and ozone simulation '
'of AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gmtm = Run(
name='gas_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice'
'repeated annually, overlaid with annual cycle of equilibrium SST'
'anomalies minus their annual tropical mean from a PI-to-PD WMGG &'
'ozone simulation of AM2.1 with a mixed layer ocean. PD atmos'
'composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_m_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
# hiram_amip = Run(
# name='amip',
# ens_mem_prefix='/archive/hrao/ornl/cmip5/c180_hiram_',
# ens_mem_ext=['H1', 'H3'],
# ens_mem_suffix='/pp',
# data_dur=5,
# data_start_date=datetime.datetime(1979, 1, 1),
# data_end_date=datetime.datetime(2008, 12, 31),
# data_dir_struc='gfdl'
# )
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/hiram_runs.py",
"copies": "1",
"size": "6123",
"license": "apache-2.0",
"hash": 6103408727792456000,
"line_mean": 37.5094339623,
"line_max": 79,
"alpha_frac": 0.639229136,
"autogenerated": false,
"ratio": 2.940922190201729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4080151326201729,
"avg_score": null,
"num_lines": null
} |
"""aospy.Run objects for simulations from various GFDL models."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
# SM2.1
sm2_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie'
'_rerun6.YIM/pp'),
data_dur=20,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(120, 12, 31),
),
)
sm2_aero = Run(
name='aero',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie2'
'_rerun6.YIM/pp'),
data_dur=100,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(100, 12, 31),
),
)
sm2_gas = Run(
name='gas',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie3'
'_rerun8.YIM/pp'),
data_dur=5,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(80, 12, 31),
),
)
sm2_both = Run(
name='both',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie4'
'_rerun6.YIM/pp'),
data_dur=100,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(100, 12, 31),
),
)
# c48-HiRAM
hiram_c48_0 = Run(
name='ming0',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0/'
'gfdl.ncrc2-intel-prod/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
),
)
hiram_c48_0_p2K = Run(
name='ming0_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0'
'_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_1 = Run(
name='ming1',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0b/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_1_p2K = Run(
name='ming1_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0b_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_2 = Run(
name='ming2',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0e/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_2_p2K = Run(
name='ming2_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0e_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_3 = Run(
name='ming3',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0f/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_3_p2K = Run(
name='ming3_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0f_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_4 = Run(
name='ming4',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0c/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_4_p2K = Run(
name='ming4_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0c_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_5 = Run(
name='ming5',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X01/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_5_p2K = Run(
name='ming5_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X01_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_6 = Run(
name='ming6',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X02/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_6_p2K = Run(
name='ming6_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X02_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_7 = Run(
name='ming7',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X03/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_7_p2K = Run(
name='ming7_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X03_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_8 = Run(
name='ming8',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X04/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_8_p2K = Run(
name='ming8_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X04_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
# AM3_c90
am3c90_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1990, 12, 31),
),
)
am3c90_p2K = Run(
name='p2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim_p2k/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1990, 12, 31),
),
)
# AM2.5
am2p5_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/hiramdp/siena_201204/c180l32_am2_C0/'
'gfdl.ncrc2-intel-prod/pp'),
data_dur=10,
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
),
)
am2p5_p2K = Run(
name='p2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/hiramdp/siena_201204/c180l32_am2_C0_p2K/'
'gfdl.ncrc2-intel-prod/pp'),
data_dur=10,
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
),
)
# AM4 prototypes
am4_a1c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a1p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1_p2K/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a2c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_'
'2000climo/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a2p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_'
'2000climo_p2K/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_c1c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_c1p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo_p2K/gfdl.ncrc2-intel-prod-'
'openmp/pp'),
),
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/gfdl_runs.py",
"copies": "1",
"size": "8778",
"license": "apache-2.0",
"hash": -8567861114673584000,
"line_mean": 30.1276595745,
"line_max": 79,
"alpha_frac": 0.5926179084,
"autogenerated": false,
"ratio": 2.429559922502076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35221778309020757,
"avg_score": null,
"num_lines": null
} |
"""aospy.Run objects for the CMIP5 Project."""
import datetime
from aospy import Run
onepctCO2 = Run(
name='1pctCO2',
description='Coupled; 1% increase in CO2 per year from PI',
)
abrupt4xCO2 = Run(
name='abrupt4xCO2',
description='Coupled; Instantaneous 4X CO2 increase',
)
amip = Run(
name='amip',
description='Atmosphere only',
# data_direc='mon/atmos/Amon/r1i1p1',
# data_dir_struc='gfdl_repo',
default_start_date=datetime.datetime(1979, 1, 1),
default_end_date=datetime.datetime(2008, 12, 31),
)
amip4K = Run(
name='amip4K',
description='Atmosphere only',
# data_direc='mon/atmos/Amon/r1i1p1',
# data_dir_struc='gfdl_repo',
default_start_date=datetime.datetime(1979, 1, 1),
default_end_date=datetime.datetime(2008, 12, 31),
)
amip4xCO2 = Run(
name='amip4xCO2',
description='Atmosphere only',
default_start_date=datetime.datetime(1979, 1, 1),
default_end_date=datetime.datetime(2008, 12, 31),
)
amipFuture = Run(
name='amipFuture',
description='Atmosphere only',
default_start_date=datetime.datetime(1979, 1, 1),
default_end_date=datetime.datetime(2008, 12, 31),
)
historical = Run(
name='historical',
description='Coupled',
)
rcp26 = Run(
name='rcp26',
description='Coupled',
)
rcp45 = Run(
name='rcp45',
description='Coupled',
)
rcp60 = Run(
name='rcp60',
description='Coupled',
)
rcp86 = Run(
name='rcp86',
description='Coupled',
)
sstClim = Run(
name='sstClim',
description='Atmosphere only',
)
sstClim4xCO2 = Run(
name='sstClim4xCO2',
description='Atmosphere only',
)
aquaControl = Run(
name='aquaControl',
description='Atmosphere only',
)
aqua4K = Run(
name='aqua4K',
description='Atmosphere only',
)
aqua4xCO2 = Run(
name='aqua4xCO2',
description='Atmosphere only',
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/cmip5_runs.py",
"copies": "1",
"size": "1857",
"license": "apache-2.0",
"hash": -6651167933789053000,
"line_mean": 21.9259259259,
"line_max": 63,
"alpha_frac": 0.6596661282,
"autogenerated": false,
"ratio": 2.8394495412844036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8999115669484403,
"avg_score": 0,
"num_lines": 81
} |
"""aospy.Run objects library for simulations in GFDL AM2.1 model."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
_old_runs_dataloader = GFDLDataLoader(
data_dur=16,
data_start_date=datetime.datetime(1983, 1, 1),
data_end_date=datetime.datetime(1998, 12, 31)
)
_new_runs_dataloader = GFDLDataLoader(
data_dur=30,
data_start_date=datetime.datetime(1983, 1, 1),
data_end_date=datetime.datetime(2012, 12, 31)
)
am2_cont = Run(
name='cont',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea '
'ice repeated annually, with PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/yim/siena_201203/m45_am2p14_1990/'
'gfdl.ncrc2-intel-prod/pp'),
)
)
am2_aero = Run(
name='aero',
description=(
"""1981-2000 HadISST climatological annual cycle of SSTs and sea ice
repeated annually, overlaid with annual cycle of equilibrium SST
anomalies from a PI-to-PD aerosols simulation of AM2.1 with a mixed
layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_aero/'
'gfdl.ncrc2-intel-prod/pp'),
)
)
am2_atm = Run(
name='aero_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual tropical mean equilibrium '
'SST anomaly from a PI-to-PD aerosols simulation of AM2.1 with a '
'mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim'
'_aero_trop_mean/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_amtm = Run(
name='aero_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, subtracting annual tropical mean equilibrium SST '
'anomaly from a PI-to-PD aerosols simulation of AM2.1 with a mixed '
'layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_aero_m'
'_trop_mean_fixed2/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_apac = Run(
name='aero_pac',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Pacific Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_aero_'
'pac/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_aatl = Run(
name='aero_atl',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Atlantic Ocean only with annual '
'cycle of equilibrium SST anomalies from a PI-to-PD aerosols '
'simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_aero_'
'atl/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_aind = Run(
name='aero_ind',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Indian Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_aero_'
'ind/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_gas = Run(
name='gas',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual cycle of equilibrium SST '
'anomalies from a PI-to-PD WMGG and ozone simulation of AM2.1 with '
'a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_gas/'
'gfdl.ncrc2-intel-prod/pp'),
)
)
am2_gtm = Run(
name='gas_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual tropical mean equilibrium '
'SST anomaly from a PI-to-PD WMGG and ozone simulation of AM2.1 with '
'a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_gas_'
'trop_mean/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_gmtm = Run(
name='gas_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual cycle of equilibrium SST '
'anomalies minus their annual tropical mean from a PI-to-PD WMGG and '
'ozone simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/siena_201203/m45_am2p14_1990_clim_gas_'
'm_trop_mean/gfdl.ncrc2-intel-prod/pp'),
)
)
am2_noT = Run(
name='noTok',
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc='/archive/miz/GCM/miz_cess_noT/cess/am2_cess/pp',
data_dur=5,
data_end_date=datetime.datetime(1987, 12, 31)
)
)
am2_noT_p2K = Run(
name='noTok_p2K',
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc='/archive/miz/GCM/miz_cess_noT/cess+2/am2_cess+2/pp',
data_dur=5,
data_end_date=datetime.datetime(1987, 12, 31)
)
)
am2_amip = Run(
name='amip',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/AM2.1_1870-2004/AM2.1_1870-2004-'
'HGlob-SST-ICE-AllForc_B1-_B10_ens/pp'),
data_dur=5,
data_start_date=datetime.datetime(1870, 1, 1),
data_end_date=datetime.datetime(1999, 12, 31),
)
)
am2_reyoi_cont = Run(
name='reyoi_cont',
description='PI atmos and Reynolds OI climatological SSTs',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_m0p25 = Run(
name='reyoi-0.25K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-0p25K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m0p5 = Run(
name='reyoi-0.5K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-0p5K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m1 = Run(
name='reyoi-1K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-1K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m1p5 = Run(
name='reyoi-1.5K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-1p5K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m2 = Run(
name='reyoi-2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-2K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_m3 = Run(
name='reyoi-3K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-3K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m4 = Run(
name='reyoi-4K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-4K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m6 = Run(
name='reyoi-6K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-6K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m8 = Run(
name='reyoi-8K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-8K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m10 = Run(
name='reyoi-10K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-10K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_m15 = Run(
name='reyoi-15K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi-15K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p0p25 = Run(
name='reyoi+0.25K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+0p25K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p0p5 = Run(
name='reyoi+0.5K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+0p5K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p1 = Run(
name='reyoi+1K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+1K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p1p5 = Run(
name='reyoi+1.5K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+1p5K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p2 = Run(
name='reyoi+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+2K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_p3 = Run(
name='reyoi+3K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+3K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p4 = Run(
name='reyoi+4K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+4K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p6 = Run(
name='reyoi+6K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+6K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p8 = Run(
name='reyoi+8K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+8K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_p10 = Run(
name='reyoi+10K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi+10K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_wpwp_p2 = Run(
name='reyoi_wpwp+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_wpwp+2K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_wpwp_m2 = Run(
name='reyoi_wpwp-2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_wpwp-2K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_uw = Run(
name='reyoi_uw_lo_0p75',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_uw_p2 = Run(
name='reyoi_uw_lo_0p75_p2k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and +2K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1),
),
default_start_date=datetime.datetime(1983, 1, 1)
)
am2_reyoi_uw_p5 = Run(
name='reyoi_uw_lo_0p75_p5k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and +5K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv+5K/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1)
),
default_start_date=datetime.datetime(1983, 1, 1)
)
am2_reyoi_uw_p10 = Run(
name='reyoi_uw_lo_0p75_p10k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and +10K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv+10K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_uw_m2 = Run(
name='reyoi_uw_lo_0p75_m2k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and -2K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv-2K/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1)
),
default_start_date=datetime.datetime(1983, 1, 1),
)
am2_reyoi_uw_m5 = Run(
name='reyoi_uw_lo_0p75_m5k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and -5K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv-5K/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1)
),
default_start_date=datetime.datetime(1983, 1, 1),
)
am2_reyoi_uw_m10 = Run(
name='reyoi_uw_lo_0p75_m10k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS and -10K SST',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_conv-10K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5 = Run(
name='reyoi_uw_landocean0.5',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_lofactor0.5/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_p2k = Run(
name='reyoi_uw_landocean0.5+2K',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and +2K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_lofactor0.5'
'+2K/gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_p4k = Run(
name='reyoi_uw_lo_0p5_p4k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and +4K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw+4K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_p6k = Run(
name='reyoi_uw_lo_0p5_p6k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and +6K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw+6K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_p8k = Run(
name='reyoi_uw_lo_0p5_p8k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and +8K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw+8K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_p10k = Run(
name='reyoi_uw_lo_0p5_p10k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and +10K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw+10K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_m2k = Run(
name='reyoi_uw_lo_0p5_m2k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and -2K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw-2K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_m4k = Run(
name='reyoi_uw_lo_0p5_m4k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and -4K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw-4K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_m6k = Run(
name='reyoi_uw_lo_0p5_m6k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and -6K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw-6K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_m8k = Run(
name='reyoi_uw_lo_0p5_m8k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and -8K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw-8K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p5_m10k = Run(
name='reyoi_uw_lo_0p5_m10k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.5 and -10K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw-10K/'
'gfdl.ncrc3-intel-prod/pp'),
)
)
am2_reyoi_uw_lo_0p25 = Run(
name='reyoi_uw_lo_0p25',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.25',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_lofactor0.25/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1),
),
default_start_date=datetime.datetime(1983, 1, 1),
)
am2_reyoi_uw_lo_0p25_p2k = Run(
name='reyoi_uw_lo_0p25_p2k',
description=(
'Same NOAA OI SST dataset climatology used in other `am2_reyoi` runs, '
'but using UW shallow convection scheme rather than RAS, and with the '
'land-ocean entrainment rate ratio set to 0.25 and +2K SSTs',
),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_uw_lofactor0.25'
'+2K/gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1982, 1, 1)
),
default_start_date=datetime.datetime(1983, 1, 1),
)
am2_cld_lock_cont = Run(
name='cld_lock_cont',
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/quickstart/m45_am2p14_1990_nocre_1995/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_cld_lock_cld = Run(
name='cld_lock+2Kcld',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Yi.Ming/quickstart/m45_am2p14_1990_nocre_1995_'
'p2K_fix2/gfdl.ncrc2-default-prod/pp'),
)
)
am2_cld_lock_sst = Run(
name='cld_lock+2Ksst',
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/quickstart/m45_am2p14_1990_nocre_1995_'
'p2K_fix1/gfdl.ncrc2-default-prod/pp'),
)
)
am2_cld_lock_p2 = Run(
name='cld_lock+2K',
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/quickstart/m45_am2p14_1990_nocre_1995'
'_p2K/gfdl.ncrc2-default-prod/pp'),
)
)
am2_hurrell_cont = Run(
name='hurrell_cont',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_hurrell/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_hurrell_p2 = Run(
name='hurrell+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_hurrell+2K/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_reynolds = Run(
name='reynolds_cont',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reynoldsEOF/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_reynolds_p2 = Run(
name='reynolds+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reynoldsEOF+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1,
)
)
am2_amip1 = Run(
name='amip1_cont',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_amip1/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_amip1_p2 = Run(
name='amip1+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_amip1+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_cld_seed_all_p2 = Run(
name='cld_seed_all+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_cld_seed_all+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_cld_seed_np_p2 = Run(
name='cld_seed_np+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_cld_seed_np+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_cld_seed_sp_p2 = Run(
name='cld_seed_sp+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_cld_seed_sp+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_cld_seed_sa_p2 = Run(
name='cld_seed_sa+2K',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_cld_seed_sa+2K/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1
)
)
am2_zshen_cont = Run(
name='zshen_cont',
description="Control run for Zhaoyi Shen's simulations",
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Zhaoyi.Shen/quickstart/m45_am2p14_1990_base/'
'gfdl.ncrc2-default-prod/pp'),
)
)
am2_atmos_heat_wpwp = Run(
name='atmos_heat_wpwp',
description=(
"Created by Zhaoyi Shen. Prescribed PD SSTs (standard AM2 control "
"simulation) but with a prescribed heating in the upper troposphere "
"above the Indo-Pacific Warm Pool, defined as 10S-10N,90E-150E."
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Zhaoyi.Shen/quickstart/m45_am2p14_1990_bc_l8_'
'WPWP/gfdl.ncrc2-default-prod-openmp/pp'),
)
)
am2_atmos_heat_wpwp_small = Run(
name='atmos_heat_wpwp_small',
description=(
"Created by Zhaoyi Shen. Prescribed PD SSTs (standard AM2 control "
"simulation) but with a prescribed heating in the upper troposphere "
"above the Indo-Pacific Warm Pool, defined as 5S-5N, 105E-125E."
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Zhaoyi.Shen/quickstart/m45_am2p14_1990_bc_l8_'
'WPWPs/gfdl.ncrc2-default-prod/pp'),
)
)
am2_reyoi_w_ice = Run(
name='reyoi_w_ice_file',
description='Standard climatological OI SSTs run but including ice file',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi_with_ice_file/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=1,
data_start_date=datetime.datetime(1982, 1, 1),
data_end_date=datetime.datetime(2012, 12, 31)
),
default_start_date=datetime.datetime(1983, 1, 1)
)
am2_test = Run(
name='test',
description='Dummy/testing run',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am2/am2test/'
'gfdl.ncrc2-default-prod/pp'),
data_dur=2,
data_end_date=datetime.datetime(1984, 12, 31),
)
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/am2_runs.py",
"copies": "1",
"size": "29101",
"license": "apache-2.0",
"hash": -5432856399557605000,
"line_mean": 34.3596597813,
"line_max": 79,
"alpha_frac": 0.6197037902,
"autogenerated": false,
"ratio": 2.718957301691115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8838661091891116,
"avg_score": 0,
"num_lines": 823
} |
"""aospy.Run objects library for simulations in GFDL AM3 model."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
_old_runs_dataloader = GFDLDataLoader(
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
data_dur=20
)
_new_runs_dataloader = GFDLDataLoader(
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2010, 12, 31),
data_dur=30
)
am3_cont = Run(
name='cont',
description=('1981-2000 Hurrell climatological annual cycle of SSTs and '
'sea ice, with PD atmospheric composition.'),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10/'
'gfdl.ncrc2-intel-prod/pp'),
)
)
am3_aero = Run(
name='aero',
description=(
'1981-2000 Hurrell climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual cycle of equilibrium SST '
'anomalies from a PI-to-PD aerosols simulation of AM2.1 with a mixed '
'layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10_aero/',
'gfdl.ncrc2-intel-prod/pp'),
),
)
am3_atm = Run(
name='aero_tm',
description=(
'1981-2000 Hurrell climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual tropical mean equilibrium '
'SST anomaly from a PI-to-PD aerosols simulation of AM2.1 with a '
'mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/',
'c48L48_am3p10_aero_trop_mean/gfdl.ncrc2-intel-prod/pp'),
),
)
am3_amtm = Run(
name='aero_mtm',
description=(
'1981-2000 Hurrell climatological annual cycle of SSTs and sea ice '
'repeated annually, subtracting annual tropical mean equilibrium SST '
'anomaly from a PI-to-PD aerosols simulation of AM2.1 with a mixed '
'layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10_aero_m_',
'trop_mean_fixed/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_apac = Run(
name='aero_pac',
description=(
"""1981-2000 Hurrell climatological annual cycle of SSTs and sea ice
repeated annually, overlaid in Pacific Ocean only with annual cycle of
equilibrium SST anomalies from a PI-to-PD aerosols simulation of AM2.1
with a mixed layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/'
'c48L48_am3p10_aero_pac/gfdl.ncrc2-intel-prod/pp'),
),
)
am3_aatl = Run(
name='aero_atl',
description=(
"""1981-2000 Hurrell climatological annual cycle of SSTs and sea ice
repeated annually, overlaid in Atlantic Ocean only with annual cycle of
equilibrium SST anomalies from a PI-to-PD aerosols simulation of AM2.1
with a mixed layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/'
'c48L48_am3p10_aero_atl/gfdl.ncrc2-intel-prod/pp'),
),
)
am3_aind = Run(
name='aero_ind',
description=(
"""1981-2000 Hurrell climatological annual cycle of SSTs and sea ice
repeated annually, overlaid in Indian Ocean only with annual cycle of
equilibrium SST anomalies from a PI-to-PD aerosols simulation of AM2.1
with a mixed layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/'
'c48L48_am3p10_aero_ind/gfdl.ncrc2-intel-prod/pp'),
),
)
am3_gas = Run(
name='gas',
description=(
"""1981-2000 Hurrell climatological annual cycle of SSTs and sea ice
repeated annually, overlaid with annual cycle of equilibrium SST
anomalies from a PI-to-PD WMGG and ozone simulation of AM2.1 with a
mixed layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10_gas/',
'gfdl.ncrc2-intel-prod/pp'),
),
)
am3_gtm = Run(
name='gas_tm',
description=(
"""1981-2000 Hurrell climatological annual cycle of SSTs and sea ice
repeated annually, overlaid with annual tropical mean equilibrium SST
anomaly from a PI-to-PD WMGG and ozone simulation of AM2.1 with a mixed
layer ocean. PD atmospheric composition."""
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10_gas_trop',
'_mean/gfdl.ncrc2-intel-prod/pp'),
),
)
am3_gmtm = Run(
name='gas_mtm',
description=(
'1981-2000 Hurrell climatological annual cycle of SSTs and '
'sea ice repeated annually, overlaid with annual cycle of '
'equilibrium SST anomalies minus their annual tropical mean '
'from a PI-to-PD WMGG and ozone simulation of AM2.1 with a '
'mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=_old_runs_dataloader,
data_direc=('/archive/Yi.Ming/fms/siena_201211/c48L48_am3p10_gas_m_',
'trop_mean/gfdl.ncrc2-intel-prod/pp'),
),
)
# am3_amip = Run(
# name='amip',
# # ens_mem_prefix='/archive/lwh/fms/riga_201104/c48L48_am3p9_',
# ens_mem_ext=['ext', 'ext2', 'ext3'],
# ens_mem_suffix='/gfdl.intel-prod/pp',
# data_dur=136,
# data_start_date=datetime.datetime(1870, 1, 1),
# data_end_date=datetime.datetime(2005, 12, 31),
# default_end_date=datetime.datetime(2004, 12, 31),
# )
am3_hc = Run(
name='hurrell_cont',
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell/'
'gfdl.ncrc3-intel-prod-openmp/pp'),
),
)
am3_hp1k = Run(
name='hurrell+1K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+1K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hp2k = Run(
name='hurrell+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+2K/'
'gfdl.ncrc3-intel-prod-openmp/pp'),
),
)
am3_hp4k = Run(
name='hurrell+4K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+4K/'
'gfdl.ncrc3-intel-prod-openmp/pp'),
),
)
am3_hp6k = Run(
name='hurrell+6K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+6K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hp8k = Run(
name='hurrell+8K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+8K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hp10k = Run(
name='hurrell+10K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell+10K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm1k = Run(
name='hurrell-1K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-1K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm2k = Run(
name='hurrell-2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-2K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm4k = Run(
name='hurrell-4K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-4K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm6k = Run(
name='hurrell-6K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-6K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm8k = Run(
name='hurrell-8K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-8K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm10k = Run(
name='hurrell-10K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-10K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hm15k = Run(
name='hurrell-15K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell-15K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hwpwp_p2k = Run(
name='hurrell_wpwp+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell_wpwp+2K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
data_dur=1,
),
)
am3_hc_static_veg = Run(
name='hurrell_static_veg_cont',
description=('Climatological SST annual cycle from Hurrell dataset '
'repeated annually, with static year 2000 vegetation'),
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell_static_veg/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hc_static_veg_p4k = Run(
name='hurrell_static_veg+4K',
description='Hurrell climatological SSTs w/ uniform +4K and static veg',
data_loader=GFDLDataLoader(
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell_static_veg+4K/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am3_hc_static_veg_10kyr = Run(
name='hurrell_static_veg_10kyr',
description=('Hurrell climatological SSTs w/ 10 ka obliquity and '
'precession and static year 2000 vegetation'),
data_loader=GFDLDataLoader(
template=_new_runs_dataloader,
data_direc=('/archive/Spencer.Hill/am3/am3clim_hurrell_static_'
'veg_10kyr_obliq_prec/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
| {
"repo_name": "spencerahill/aospy-obj-lib",
"path": "aospy_user/runs/am3_runs.py",
"copies": "1",
"size": "10614",
"license": "apache-2.0",
"hash": -1755003917876972000,
"line_mean": 34.4983277592,
"line_max": 79,
"alpha_frac": 0.6277557942,
"autogenerated": false,
"ratio": 2.6966463414634148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38244021356634145,
"avg_score": null,
"num_lines": null
} |
# APACHE 2.0 LICENSE
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class GameWithKeys:
def __init__(self, game, is_steam, is_uplay, is_origin):
self.id = game.pk
self.name = game.name
self.price = game.price
self.description = game.description
self.img_location = game.img_name
self.rating = game.rating
self.has_steam = is_steam
self.has_uplay = is_uplay
self.has_origin = is_origin
def name(self):
return self.name
def price(self):
return self.price
def id(self):
return self.id
def img_location(self):
return self.img_location
def description(self):
return self.img_location
def rating(self):
return self.rating
def has_steam(self):
return self.has_steam
def has_origin(self):
return self.has_origin
def has_uplay(self):
return self.has_uplay
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __cmp__(self, other):
return cmp(self.name, other.name)
| {
"repo_name": "sonictt1/material_game_store",
"path": "WebStoreObjects.py",
"copies": "1",
"size": "1918",
"license": "apache-2.0",
"hash": -6641149519570625000,
"line_mean": 27.0606060606,
"line_max": 62,
"alpha_frac": 0.6402502607,
"autogenerated": false,
"ratio": 3.930327868852459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070578129552459,
"avg_score": null,
"num_lines": null
} |
"""Apache Beam pipeline to run the prover.
This beam pipeline and DoFns runs the prover and creates proof-logs.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import apache_beam as beam
from apache_beam.metrics import Metrics
from tensorflow import logging
from typing import List, Optional, Text, Tuple
from google.protobuf import text_format
from deepmath.public import build_data
from deepmath.deephol import deephol_pb2
from deepmath.deephol import io_util
from deepmath.deephol import prover
from deepmath.deephol.deephol_loop import options_pb2
from deepmath.deephol.deephol_loop import prooflog_to_tfexamples_lib
from deepmath.deephol.deephol_loop.missing import recordio
from deepmath.deephol.deephol_loop.missing import runner
from deepmath.deephol.deephol_loop.missing import sstableio
from deepmath.proof_assistant import proof_assistant_pb2
class ProverDoFn(beam.DoFn):
"""Beam DoFn for our prover."""
def __init__(self, prover_options: deephol_pb2.ProverOptions):
self.prover_options = prover_options
self.proven_counter = Metrics.counter(self.__class__, 'proven')
self.attempted_counter = Metrics.counter(self.__class__, 'attempted')
self.failed_counter = Metrics.counter(self.__class__, 'failed')
self.accepts_counter = Metrics.counter(self.__class__, 'accepts task')
self.rejected_counter = Metrics.counter(self.__class__, 'rejected task')
self.does_not_accept_counter = Metrics.counter(self.__class__,
'does not accept task')
self.timeout_counter = Metrics.counter(self.__class__, 'timeout')
def start_bundle(self):
self.prover = prover.create_prover(self.prover_options)
def process(self, task: proof_assistant_pb2.ProverTask
) -> List[deephol_pb2.ProofLog]:
logging.info('Processing task:\n%s', text_format.MessageToString(task))
self.attempted_counter.inc()
if self.prover.accept_tasks:
self.accepts_counter.inc()
else:
self.does_not_accept_counter.inc()
proof_log = self.prover.prove(task)
timed_out = self.prover.timed_out()
if proof_log.rejected:
self.rejected_counter.inc()
if not proof_log.error_message:
self.proven_counter.inc()
else:
logging.info('Failed proof with "%s"', proof_log.error_message)
self.failed_counter.inc()
if timed_out:
self.timeout_counter.inc()
proof_log.build_data = build_data.BuildData()
return [proof_log]
def make_pipeline(prover_tasks: List[proof_assistant_pb2.ProverTask],
prover_options: deephol_pb2.ProverOptions, path_output: str):
"""A simple create-process-write Beam pipeline for proving theorems."""
def pipeline(root):
logs = (
root
| 'Create' >> beam.Create(prover_tasks)
| 'Prove' >> beam.ParDo(ProverDoFn(prover_options)))
_ = logs | 'Write' >> recordio.WriteToRecordIO(
file_path_prefix=path_output,
coder=beam.coders.ProtoCoder(deephol_pb2.ProofLog))
return logs
return pipeline
def key_value_of_proto(proto):
value = proto.SerializeToString()
key = hash(value)
return ('%x' % key, value)
class ProofLogToTFExamplesDoFn(beam.DoFn):
"""DoFn for converting proof logs to tf examples."""
def __init__(self, tactics_filename: str,
theorem_db: proof_assistant_pb2.TheoremDatabase,
scrub_parameters):
options = options_pb2.ConvertorOptions(
tactics_path=tactics_filename, scrub_parameters=scrub_parameters)
self.converter = prooflog_to_tfexamples_lib.create_processor(
options=options, theorem_database=theorem_db)
def start_bundle(self):
pass
def process(self, proof_log: deephol_pb2.ProofLog) -> List[Tuple[int, str]]:
return [
key_value_of_proto(example)
for example in self.converter.process_proof_log(proof_log)
]
def training_examples_pipeline(
proof_logs,
tactics_filename: Text,
theorem_db: proof_assistant_pb2.TheoremDatabase,
examples_sstables: List[Text],
scrub_parameters: options_pb2.ConvertorOptions.ScrubParametersEnum,
):
"""Create the pipeline to convert ProofLogs to Examples.
Args:
proof_logs: beam node for the proof logs.
tactics_filename: Name for the tactics file.
theorem_db: Theorem database file.
examples_sstables: List of strings with sstable pattern to write the
examples to.
scrub_parameters: Theorem parameters to scrub during examples generation.
"""
examples = proof_logs | ('ConvertToTFExamples' >> beam.ParDo(
ProofLogToTFExamplesDoFn(
str(tactics_filename), theorem_db, scrub_parameters)))
for i, examples_sstable in enumerate(examples_sstables):
examples_prefix = examples_sstable
num_shards = None,
logging.info('sstable: %s', examples_sstable)
if '@' in examples_sstable:
examples_prefix, num_shards = examples_sstable.split('@')
num_shards = int(num_shards)
_ = examples | ('WriteExamples%d' % i) >> (
sstableio.WriteToSSTable(
file_path_prefix=examples_prefix,
num_shards=num_shards,
key_coder=beam.coders.BytesCoder(),
value_coder=beam.coders.BytesCoder()))
def run_pipeline(examples_sstable: Optional[Text],
scrub_parameters: Optional[Text],
prover_tasks: List[proof_assistant_pb2.ProverTask],
prover_options: deephol_pb2.ProverOptions, path_output: str):
"""Create and run simple prover pipeline."""
prover.cache_embeddings(prover_options)
prover_pipeline = make_pipeline(prover_tasks, prover_options, path_output)
pipeline = prover_pipeline
if examples_sstable:
theorem_db = io_util.load_theorem_database_from_file(
str(prover_options.path_theorem_database))
def examples_pipeline(root):
"""Examples pipeline."""
scrub_str_enum_map = {
'NOTHING':
options_pb2.ConvertorOptions.NOTHING,
'TESTING':
options_pb2.ConvertorOptions.TESTING,
'VALIDATION_AND_TESTING':
options_pb2.ConvertorOptions.VALIDATION_AND_TESTING,
}
training_examples_pipeline(
proof_logs=prover_pipeline(root),
tactics_filename=prover_options.path_tactics,
theorem_db=theorem_db,
examples_sstables=[examples_sstable],
scrub_parameters=scrub_str_enum_map[scrub_parameters])
pipeline = examples_pipeline
runner.Runner().run(pipeline).wait_until_finish()
def program_started():
runner.program_started()
| {
"repo_name": "tensorflow/deepmath",
"path": "deepmath/deephol/deephol_loop/prover_runner.py",
"copies": "1",
"size": "6658",
"license": "apache-2.0",
"hash": 2790716001988461000,
"line_mean": 35.7845303867,
"line_max": 79,
"alpha_frac": 0.6880444578,
"autogenerated": false,
"ratio": 3.4143589743589744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4602403432158974,
"avg_score": null,
"num_lines": null
} |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import filecmp
import itertools
import logging
import os
import re
import shutil
import socket
import subprocess
import zope.interface
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import dvsni
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2enmod' binary.")
add("init-script", default=constants.CLI_DEFAULTS["init_script"],
help="Path to the Apache init script (used for server "
"reload/restart).")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Verify Apache is installed
for exe in (self.conf("ctl"), self.conf("enmod"),
self.conf("dismod"), self.conf("init-script")):
if not le_util.exe_exists(exe):
raise errors.NoInstallationError
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
temp_install(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None): # pylint: disable=unused-argument
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {}
path["cert_path"] = self.parser.find_dir(
"SSLCertificateFile", None, vhost.path)
path["cert_key"] = self.parser.find_dir(
"SSLCertificateKeyFile", None, vhost.path)
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
# Assign the final directives; order is maintained in find_dir
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
if not path["chain_path"]:
self.parser.add_dir(
vhost.path, "SSLCertificateChainFile", chain_path)
else:
self.aug.set(path["chain_path"][-1], chain_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name)
def _choose_vhost_from_list(self, target_name):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
reasonable_vhosts = self._non_default_vhosts()
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
for vhost in self.vhosts:
all_names.update(vhost.get_names())
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
host.aliases.add(self.parser.get_arg(alias))
if servername_match:
# Get last ServerName as each overwrites the previous
host.name = self.parser.get_arg(servername_match[-1])
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
if not self.parser.find_dir("Listen", port):
logger.debug("No Listen %s directive found. Setting the "
"Apache Server to Listen on port %s", port, port)
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Only one vhost per file is allowed")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
self._verify_no_redirects(general_vh)
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_redirects(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginError: When another redirection exists
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
raise errors.PluginError("Existing Redirect present on HTTP vhost.")
if rewrite_path:
# "No existing redirection for virtualhost"
if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS):
raise errors.PluginError("Unknown Existing RewriteRule")
for match, arg in itertools.izip(
rewrite_path, constants.REWRITE_HTTPS_ARGS):
if self.aug.get(match) != arg:
raise errors.PluginError("Unknown Existing RewriteRule")
raise errors.PluginError(
"Let's Encrypt has already enabled redirection")
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
try:
if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)):
return True
except OSError:
pass
return False
def enable_site(self, vhost):
"""Enables an available site, Apache restart required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and restarts Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Restart is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Restarts apache server.
.. todo:: This function will be converted to using reload
:raises .errors.MisconfigurationError: If unable to restart due
to a configuration problem, or if the restart subprocess
cannot be run.
"""
return apache_restart(self.conf("init-script"))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
apache_dvsni = dvsni.ApacheDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
apache_dvsni.add_chall(achall, i)
sni_response = apache_dvsni.perform()
if sni_response:
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[apache_dvsni.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime", "socache_shmcb"]
}
return deps.get(mod_name, [])
def apache_restart(apache_init_script):
"""Restarts the Apache Server.
:param str apache_init_script: Path to the Apache init script.
.. todo:: Try to use reload instead. (This caused timing problems before)
.. todo:: On failure, this should be a recovery_routine call with another
restart. This will confuse and inhibit developers from testing code
though. This change should happen after
the ApacheConfigurator has been thoroughly tested. The function will
need to be moved into the class again. Perhaps
this version can live on... for testing purposes.
:raises .errors.MisconfigurationError: If unable to restart due to a
configuration problem, or if the restart subprocess cannot be run.
"""
try:
proc = subprocess.Popen([apache_init_script, "restart"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError):
logger.fatal(
"Unable to restart the Apache process with %s", apache_init_script)
raise errors.MisconfigurationError(
"Unable to restart Apache process with %s" % apache_init_script)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr)
raise errors.MisconfigurationError(
"Error while restarting Apache:\n%s\n%s" % (stdout, stderr))
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast both to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
break
return avail_fp
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| {
"repo_name": "g1franc/lets-encrypt-preview",
"path": "letsencrypt-apache/letsencrypt_apache/configurator.py",
"copies": "1",
"size": "47978",
"license": "apache-2.0",
"hash": 5661686477286642000,
"line_mean": 37.0475812847,
"line_max": 90,
"alpha_frac": 0.5995456251,
"autogenerated": false,
"ratio": 4.198652314693271,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298197939793271,
"avg_score": null,
"num_lines": null
} |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import filecmp
import itertools
import logging
import os
import re
import shutil
import socket
import time
import zope.interface
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import tls_sni_01
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2enmod' binary.")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
le_util.add_deprecated_argument(add, "init-script", 1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Verify Apache is installed
for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")):
if not le_util.exe_exists(exe):
raise errors.NoInstallationError
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
install_ssl_options_conf(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None): # pylint: disable=unused-argument
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
self._clean_vhost(vhost)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {"cert_path": self.parser.find_dir("SSLCertificateFile", None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile", None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
logger.debug("Apache version is %s",
".".join(str(i) for i in self.version))
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path\
option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name, temp=False):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless temp is
True. If temp is True, there is no such guarantee and the result is
not cached.
:param str target_name: domain name
:param bool temp: whether the vhost is only used temporarily
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if temp:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name, temp)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if vhost.modmacro is True:
continue
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
vhosts = self._non_default_vhosts()
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)))
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
if not host.modmacro:
host.aliases.add(serveralias)
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
macro = False
if "/macro/" in path.lower():
macro = True
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
is_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for x in self.parser.find_dir("Listen")]
# In case no Listens are set (which really is a broken apache config)
if not listens:
listens = ["80"]
for listen in listens:
# For any listen statement, check if the machine also listens on Port 443.
# If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listens:
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
listens.append(port)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port) not in listens:
if port == "443":
args = ["%s:%s" % (ip, port)]
else:
# Non-standard ports should specify https protocol
args = ["%s:%s" % (ip, port), "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s:%s directive to %s\n" % (
ip, port, self.parser.loc["listen"])
listens.append("%s:%s" % (ip, port))
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Currently, we only support "
"configurations with one vhost per file")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile", "SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
self._verify_no_redirects(general_vh)
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_redirects(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
letsencrypt redirection WriteRule exists in virtual host.
errors.PluginError: When there exists directives that may hint
other redirection. (TODO: We should not throw a PluginError,
but that's for an other PR.)
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
raise errors.PluginError("Existing Redirect present on HTTP vhost.")
if rewrite_path:
# "No existing redirection for virtualhost"
if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS):
raise errors.PluginError("Unknown Existing RewriteRule")
for match, arg in itertools.izip(
rewrite_path, constants.REWRITE_HTTPS_ARGS):
if self.aug.get(match) != arg:
raise errors.PluginError("Unknown Existing RewriteRule")
raise errors.PluginEnhancementAlreadyPresent(
"Let's Encrypt has already enabled redirection")
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
try:
if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)):
return True
except OSError:
pass
return False
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
try:
le_util.run_script([self.conf("ctl"), "-k", "graceful"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.TLSSNI01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
chall_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
chall_doer.add_chall(achall, i)
sni_response = chall_doer.perform()
if sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[chall_doer.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime", "socache_shmcb"]
}
return deps.get(mod_name, [])
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast all to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
find_macro = avail_fp.lower().find("/macro")
if find_macro != -1:
avail_fp = avail_fp[:find_macro]
continue
break
return avail_fp
def install_ssl_options_conf(options_ssl):
"""
Copy Let's Encrypt's SSL options file into the system's config dir if
required.
"""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# letsencrypt for unprivileged users via setuid), this function will need
# to be modified.
# XXX if the user is in security-autoupdate mode, we should be willing to
# overwrite the options_ssl file at least if it's unmodified:
# https://github.com/letsencrypt/letsencrypt/issues/1123
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| {
"repo_name": "FuzzyHobbit/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/configurator.py",
"copies": "1",
"size": "54609",
"license": "apache-2.0",
"hash": 5659179938704364000,
"line_mean": 37.9507845934,
"line_max": 95,
"alpha_frac": 0.5928876193,
"autogenerated": false,
"ratio": 4.198431613746444,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5291319233046444,
"avg_score": null,
"num_lines": null
} |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import filecmp
import logging
import os
import re
import shutil
import socket
import time
import zope.component
import zope.interface
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import tls_sni_01
from letsencrypt_apache import obj
from letsencrypt_apache import parser
from collections import defaultdict
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("enmod", default=constants.os_constant("enmod"),
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.os_constant("dismod"),
help="Path to the Apache 'a2dismod' binary.")
add("le-vhost-ext", default=constants.os_constant("le_vhost_ext"),
help="SSL vhost configuration extension.")
add("server-root", default=constants.os_constant("server_root"),
help="Apache server root directory.")
add("vhost-root", default=constants.os_constant("vhost_root"),
help="Apache server VirtualHost configuration root")
add("challenge-location",
default=constants.os_constant("challenge_location"),
help="Directory path for challenge configuration.")
add("handle-modules", default=constants.os_constant("handle_mods"),
help="Let installer handle enabling required modules for you." +
"(Only Ubuntu/Debian currently)")
add("handle-sites", default=constants.os_constant("handle_sites"),
help="Let installer handle enabling sites for you." +
"(Only Ubuntu/Debian currently)")
le_util.add_deprecated_argument(add, argument_name="ctl", nargs=1)
le_util.add_deprecated_argument(
add, argument_name="init-script", nargs=1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir,
constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Verify Apache is installed
if not le_util.exe_exists(constants.os_constant("restart_cmd")[0]):
raise errors.NoInstallationError
# Make sure configuration is valid
self.config_test()
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
if not self._check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("vhost-root"),
self.version)
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
install_ssl_options_conf(self.mod_ssl_conf)
def _check_aug_version(self):
""" Checks that we have recent enough version of libaugeas.
If augeas version is recent enough, it will support case insensitive
regexp matching"""
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies
that it has located the three directives and finally modifies them
to point to the correct destination. After the certificate is
installed, the VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
self._clean_vhost(vhost)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {"cert_path": self.parser.find_dir("SSLCertificateFile",
None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile",
None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
logger.debug("Apache version is %s",
".".join(str(i) for i in self.version))
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your "
"version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path\
option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled if distro with enabled / available
if self.conf("handle-sites"):
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name, temp=False):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless temp is
True. If temp is True, there is no such guarantee and the result is
not cached.
:param str target_name: domain name
:param bool temp: whether the vhost is only used temporarily
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if temp:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name, temp)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for
vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
def included_in_wildcard(self, names, target_name):
"""Helper function to see if alias is covered by wildcard"""
target_name = target_name.split(".")[::-1]
wildcards = [domain.split(".")[1:] for domain in names if domain.startswith("*")]
for wildcard in wildcards:
if len(wildcard) > len(target_name):
continue
for idx, segment in enumerate(wildcard[::-1]):
if segment != target_name[idx]:
break
else:
# https://docs.python.org/2/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops
return True
return False
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 6 - Servername SSL
# Points 5 - Wildcard SSL
# Points 4 - Address name with SSL
# Points 3 - Servername no SSL
# Points 2 - Wildcard no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if vhost.modmacro is True:
continue
names = vhost.get_names()
if target_name in names:
points = 3
elif self.included_in_wildcard(names, target_name):
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 3
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
vhosts = self._non_default_vhosts()
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)))
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
if not host.modmacro:
host.aliases.add(serveralias)
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = get_file_path(path)
if self.conf("handle-sites"):
is_enabled = self.is_site_enabled(filename)
else:
is_enabled = True
macro = False
if "/macro/" in path.lower():
macro = True
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
is_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search base config, and all included paths for VirtualHosts
vhs = []
vhost_paths = {}
for vhost_path in self.parser.parser_paths.keys():
paths = self.aug.match(
("/files%s//*[label()=~regexp('%s')]" %
(vhost_path, parser.case_i("VirtualHost"))))
for path in paths:
new_vhost = self._create_vhost(path)
realpath = os.path.realpath(new_vhost.filep)
if realpath not in vhost_paths.keys():
vhs.append(new_vhost)
vhost_paths[realpath] = new_vhost.filep
elif realpath == new_vhost.filep:
# Prefer "real" vhost paths instead of symlinked ones
# ex: sites-enabled/vh.conf -> sites-available/vh.conf
# remove old (most likely) symlinked one
vhs = [v for v in vhs if v.filep != vhost_paths[realpath]]
vhs.append(new_vhost)
vhost_paths[realpath] = realpath
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
self.prepare_https_modules(temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for
x in self.parser.find_dir("Listen")]
# In case no Listens are set (which really is a broken apache config)
if not listens:
listens = ["80"]
if port in listens:
return
for listen in listens:
# For any listen statement, check if the machine also listens on
# Port 443. If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listens:
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
listens.append(port)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port) not in listens:
if port == "443":
args = ["%s:%s" % (ip, port)]
else:
# Non-standard ports should specify https protocol
args = ["%s:%s" % (ip, port), "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += ("Added Listen %s:%s directive to "
"%s\n") % (ip, port,
self.parser.loc["listen"])
listens.append("%s:%s" % (ip, port))
def prepare_https_modules(self, temp):
"""Helper method for prepare_server_https, taking care of enabling
needed modules
:param boolean temp: If the change is temporary
"""
if self.conf("handle-modules"):
if self.version >= (2, 4) and ("socache_shmcb_module" not in
self.parser.modules):
self.enable_mod("socache_shmcb", temp=temp)
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.os_constant("le_vhost_ext")``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Currently, we only support "
"configurations with one vhost per file")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
self.save()
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _sift_line(self, line):
"""Decides whether a line should be copied to a SSL vhost.
A canonical example of when sifting a line is required:
When the http vhost contains a RewriteRule that unconditionally
redirects any request to the https version of the same site.
e.g:
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [L,QSA,R=permanent]
Copying the above line to the ssl vhost would cause a
redirection loop.
:param str line: a line extracted from the http vhost.
:returns: True - don't copy line from http vhost to SSL vhost.
:rtype: bool
"""
if not line.lstrip().startswith("RewriteRule"):
return False
# According to: http://httpd.apache.org/docs/2.4/rewrite/flags.html
# The syntax of a RewriteRule is:
# RewriteRule pattern target [Flag1,Flag2,Flag3]
# i.e. target is required, so it must exist.
target = line.split()[2].strip()
# target may be surrounded with quotes
if target[0] in ("'", '"') and target[0] == target[-1]:
target = target[1:-1]
# Sift line if it redirects the request to a HTTPS site
return target.startswith("https://")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
sift = False
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
if self._sift_line(line):
if not sift:
new_file.write(
"# Some rewrite rules in this file were "
"were disabled on your HTTPS site,\n"
"# because they have the potential to "
"create redirection loops.\n")
sift = True
new_file.write("# " + line)
else:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
if sift:
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(
"Some rewrite rules copied from {0} were disabled in the "
"vhost for your HTTPS site located at {1} because they have "
"the potential to create redirection loops.".format(avail_fp,
ssl_fp),
reporter.MEDIUM_PRIORITY)
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile",
"SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_servername_alias(self, target_name, vhost):
fp = vhost.filep
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(fp, parser.case_i("VirtualHost")))
if not vh_p:
return
vh_path = vh_p[0]
if (self.parser.find_dir("ServerName", target_name, start=vh_path, exclude=False)
or self.parser.find_dir("ServerAlias", target_name, start=vh_path, exclude=False)):
return
if not self.parser.find_dir("ServerName", None, start=vh_path, exclude=False):
self.parser.add_dir(vh_path, "ServerName", target_name)
else:
self.parser.add_dir(vh_path, "ServerAlias", target_name)
self._add_servernames(vhost)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
# In Apache 2.2, when a NameVirtualHost directive is not
# set, "*" and "_default_" will conflict when sharing a port
addrs = set((addr,))
if addr.get_addr() in ("*", "_default_"):
addrs.update(obj.Addr((a, addr.get_port(),))
for a in ("*", "_default_"))
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr in addrs for
test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
break
if need_to_save:
self.save()
######################################################################
# Enhancements
######################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None,
start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if LetsEncrypt redirection already exists
self._verify_no_letsencrypt_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# letsencrypt RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warn("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
names = ssl_vhost.get_names()
for idx, name in enumerate(names):
args = ["%{SERVER_NAME}", "={0}".format(name), "[OR]"]
if idx == len(names) - 1:
args.pop()
self.parser.add_dir(general_vh.path, "RewriteCond", args)
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_letsencrypt_redirect(self, vhost):
"""Checks to see if a redirect was already installed by letsencrypt.
Checks to see if virtualhost already contains a rewrite rule that is
identical to Letsencrypt's redirection rewrite rule.
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
letsencrypt redirection WriteRule exists in virtual host.
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list)
pat = r'.*(directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_id = m.group(1)
rewrite_args_dict[dir_id].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for matches in rewrite_args_dict.values():
if [self.aug.get(x) for x in matches] in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Let's Encrypt has already enabled redirection")
def _is_rewrite_exists(self, vhost):
"""Checks if there exists a RewriteRule directive in vhost
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: True if a RewriteRule directive exists.
:rtype: bool
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
"""Checks if a RewriteEngine directive is on
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
rewrite_engine_path = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path:
return self.parser.get_arg(rewrite_engine_path[0])
return False
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = []
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for
addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(self.conf("vhost-root"),
redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
# Third filter - if none with same names, return generic
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost, generic=True):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"):
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
if not os.path.isdir(enabled_dir):
error_msg = ("Directory '{0}' does not exist. Please ensure "
"that the values for --apache-handle-sites and "
"--apache-server-root are correct for your "
"environment.".format(enabled_dir))
raise errors.ConfigurationError(error_msg)
for entry in os.listdir(enabled_dir):
try:
if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)):
return True
except OSError:
pass
return False
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain
vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables()
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
try:
le_util.run_script(constants.os_constant("restart_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script(constants.os_constant("conftest_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script(
constants.os_constant("version_cmd"))
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" %
constants.os_constant("version_cmd"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.TLSSNI01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
chall_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
chall_doer.add_chall(achall, i)
sni_response = chall_doer.perform()
if sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[chall_doer.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime"]
}
return deps.get(mod_name, [])
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast all to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
find_macro = avail_fp.lower().find("/macro")
if find_macro != -1:
avail_fp = avail_fp[:find_macro]
continue
break
return avail_fp
def install_ssl_options_conf(options_ssl):
"""
Copy Let's Encrypt's SSL options file into the system's config dir if
required.
"""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# letsencrypt for unprivileged users via setuid), this function will need
# to be modified.
# XXX if the user is in security-autoupdate mode, we should be willing to
# overwrite the options_ssl file at least if it's unmodified:
# https://github.com/letsencrypt/letsencrypt/issues/1123
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.os_constant("MOD_SSL_CONF_SRC"), options_ssl)
| {
"repo_name": "thanatos/lets-encrypt-preview",
"path": "letsencrypt-apache/letsencrypt_apache/configurator.py",
"copies": "1",
"size": "65352",
"license": "apache-2.0",
"hash": 2488028904338446000,
"line_mean": 38.7276595745,
"line_max": 125,
"alpha_frac": 0.5788652222,
"autogenerated": false,
"ratio": 4.255241567912488,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00002689531360806026,
"num_lines": 1645
} |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import fnmatch
import logging
import os
import re
import socket
import time
import zope.component
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.plugins import common
from certbot.plugins.util import path_surgery
from certbot_apache import augeas_configurator
from certbot_apache import constants
from certbot_apache import display_ops
from certbot_apache import tls_sni_01
from certbot_apache import obj
from certbot_apache import parser
from collections import defaultdict
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~certbot.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~certbot_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~certbot_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
description = "Apache Web Server plugin - Beta"
@classmethod
def add_parser_arguments(cls, add):
add("enmod", default=constants.os_constant("enmod"),
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.os_constant("dismod"),
help="Path to the Apache 'a2dismod' binary.")
add("le-vhost-ext", default=constants.os_constant("le_vhost_ext"),
help="SSL vhost configuration extension.")
add("server-root", default=constants.os_constant("server_root"),
help="Apache server root directory.")
add("vhost-root", default=None,
help="Apache server VirtualHost configuration root")
add("logs-root", default=constants.os_constant("logs_root"),
help="Apache server logs directory")
add("challenge-location",
default=constants.os_constant("challenge_location"),
help="Directory path for challenge configuration.")
add("handle-modules", default=constants.os_constant("handle_mods"),
help="Let installer handle enabling required modules for you." +
"(Only Ubuntu/Debian currently)")
add("handle-sites", default=constants.os_constant("handle_sites"),
help="Let installer handle enabling sites for you." +
"(Only Ubuntu/Debian currently)")
util.add_deprecated_argument(add, argument_name="ctl", nargs=1)
util.add_deprecated_argument(
add, argument_name="init-script", nargs=1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# Maps enhancements to vhosts we've enabled the enhancement for
self._enhanced_vhosts = defaultdict(set)
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self.vhostroot = None
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header,
"staple-ocsp": self._enable_ocsp_stapling}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir,
constants.MOD_SSL_CONF_DEST)
@property
def updated_mod_ssl_conf_digest(self):
"""Full absolute path to digest of updated SSL configuration file."""
return os.path.join(self.config.config_dir, constants.UPDATED_MOD_SSL_CONF_DIGEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Perform the actual Augeas initialization to be able to react
try:
self.init_augeas()
except ImportError:
raise errors.NoInstallationError("Problem in Augeas installation")
# Verify Apache is installed
restart_cmd = constants.os_constant("restart_cmd")[0]
if not util.exe_exists(restart_cmd):
if not path_surgery(restart_cmd):
raise errors.NoInstallationError(
'Cannot find Apache control command {0}'.format(restart_cmd))
# Make sure configuration is valid
self.config_test()
# Set Version
if self.version is None:
self.version = self.get_version()
logger.debug('Apache version is %s',
'.'.join(str(i) for i in self.version))
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
if not self._check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
# Parse vhost-root if defined on cli
if not self.conf("vhost-root"):
self.vhostroot = constants.os_constant("vhost_root")
else:
self.vhostroot = os.path.abspath(self.conf("vhost-root"))
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("vhost-root"),
self.version, configurator=self)
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
install_ssl_options_conf(self.mod_ssl_conf, self.updated_mod_ssl_conf_digest)
# Prevent two Apache plugins from modifying a config at once
try:
util.lock_dir_until_exit(self.conf("server-root"))
except (OSError, errors.LockError):
logger.debug("Encountered error:", exc_info=True)
raise errors.PluginError(
"Unable to lock %s", self.conf("server-root"))
def _check_aug_version(self):
""" Checks that we have recent enough version of libaugeas.
If augeas version is recent enough, it will support case insensitive
regexp matching"""
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies
that it has located the three directives and finally modifies them
to point to the correct destination. After the certificate is
installed, the VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within certbot though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
# Choose vhost before (possible) enabling of mod_ssl, to keep the
# vhost choice namespace similar with the pre-validation one.
vhost = self.choose_vhost(domain)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
# Add directives and remove duplicates
self._add_dummy_ssl_directives(vhost.path)
self._clean_vhost(vhost)
path = {"cert_path": self.parser.find_dir("SSLCertificateFile",
None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile",
None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warning(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate for %s to VirtualHost %s", domain, vhost.filep)
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your "
"version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path\
option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Enable the new vhost if needed
if not vhost.enabled:
self.enable_site(vhost)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
def choose_vhost(self, target_name, temp=False):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless temp is
True. If temp is True, there is no such guarantee and the result is
not cached.
:param str target_name: domain name
:param bool temp: whether the vhost is only used temporarily
:returns: ssl vhost associated with name
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if temp:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name, temp)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config, or split vhosts into separate files.",
target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for
vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
def included_in_wildcard(self, names, target_name):
"""Is target_name covered by a wildcard?
:param names: server aliases
:type names: `collections.Iterable` of `str`
:param str target_name: name to compare with wildcards
:returns: True if target_name is covered by a wildcard,
otherwise, False
:rtype: bool
"""
# use lowercase strings because fnmatch can be case sensitive
target_name = target_name.lower()
for name in names:
name = name.lower()
# fnmatch treats "[seq]" specially and [ or ] characters aren't
# valid in Apache but Apache doesn't error out if they are present
if "[" not in name and fnmatch.fnmatch(target_name, name):
return True
return False
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 6 - Servername SSL
# Points 5 - Wildcard SSL
# Points 4 - Address name with SSL
# Points 3 - Servername no SSL
# Points 2 - Wildcard no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if vhost.modmacro is True:
continue
names = vhost.get_names()
if target_name in names:
points = 3
elif self.included_in_wildcard(names, target_name):
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 3
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
vhosts = self._non_default_vhosts()
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)), force_interactive=True)
return util.get_filtered_names(all_names)
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _get_vhost_names(self, path):
"""Helper method for getting the ServerName and
ServerAlias values from vhost in path
:param path: Path to read ServerName and ServerAliases from
:returns: Tuple including ServerName and `list` of ServerAlias strings
"""
servername_match = self.parser.find_dir(
"ServerName", None, start=path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=path, exclude=False)
serveraliases = []
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
serveraliases.append(serveralias)
servername = None
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
return (servername, serveraliases)
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~certbot_apache.obj.VirtualHost`
"""
servername, serveraliases = self._get_vhost_names(host.path)
for alias in serveraliases:
if not host.modmacro:
host.aliases.add(alias)
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
"""
addrs = set()
try:
args = self.aug.match(path + "/arg")
except RuntimeError:
logger.warning("Encountered a problem while parsing file: %s, skipping", path)
return None
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = get_file_path(self.aug.get("/augeas/files%s/path" % get_file_path(path)))
if filename is None:
return None
macro = False
if "/macro/" in path.lower():
macro = True
vhost_enabled = self.parser.parsed_in_original(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
vhost_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~certbot_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search base config, and all included paths for VirtualHosts
file_paths = {}
internal_paths = defaultdict(set)
vhs = []
# Make a list of parser paths because the parser_paths
# dictionary may be modified during the loop.
for vhost_path in list(self.parser.parser_paths):
paths = self.aug.match(
("/files%s//*[label()=~regexp('%s')]" %
(vhost_path, parser.case_i("VirtualHost"))))
paths = [path for path in paths if
"virtualhost" in os.path.basename(path).lower()]
for path in paths:
new_vhost = self._create_vhost(path)
if not new_vhost:
continue
internal_path = get_internal_aug_path(new_vhost.path)
realpath = os.path.realpath(new_vhost.filep)
if realpath not in file_paths:
file_paths[realpath] = new_vhost.filep
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif (realpath == new_vhost.filep and
realpath != file_paths[realpath]):
# Prefer "real" vhost paths instead of symlinked ones
# ex: sites-enabled/vh.conf -> sites-available/vh.conf
# remove old (most likely) symlinked one
new_vhs = []
for v in vhs:
if v.filep == file_paths[realpath]:
internal_paths[realpath].remove(
get_internal_aug_path(v.path))
else:
new_vhs.append(v)
vhs = new_vhs
file_paths[realpath] = realpath
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif internal_path not in internal_paths[realpath]:
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param certbot_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~certbot_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
# If nonstandard port, add service definition for matching
if port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
self.prepare_https_modules(temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for
x in self.parser.find_dir("Listen")]
# In case no Listens are set (which really is a broken apache config)
if not listens:
listens = ["80"]
# Listen already in place
if self._has_port_already(listens, port):
return
listen_dirs = set(listens)
for listen in listens:
# For any listen statement, check if the machine also listens on
# Port 443. If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listen_dirs and port_service not in listen_dirs:
listen_dirs.add(port_service)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port_service) not in listen_dirs and (
"%s:%s" % (ip, port_service) not in listen_dirs):
listen_dirs.add("%s:%s" % (ip, port_service))
self._add_listens(listen_dirs, listens, port)
def _add_listens(self, listens, listens_orig, port):
"""Helper method for prepare_server_https to figure out which new
listen statements need adding
:param set listens: Set of all needed Listen statements
:param list listens_orig: List of existing listen statements
:param string port: Port number we're adding
"""
# Add service definition for non-standard ports
if port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
new_listens = listens.difference(listens_orig)
if port in new_listens or port_service in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port_service.split(" "))
self.save_notes += "Added Listen %s directive to %s\n" % (
port_service, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _has_port_already(self, listens, port):
"""Helper method for prepare_server_https to find out if user
already has an active Listen statement for the port we need
:param list listens: List of listen variables
:param string port: Port in question
"""
if port in listens:
return True
# Check if Apache is already listening on a specific IP
for listen in listens:
if len(listen.split(":")) > 1:
# Ugly but takes care of protocol def, eg: 1.1.1.1:443 https
if listen.split(":")[-1].split(" ")[0] == port:
return True
def prepare_https_modules(self, temp):
"""Helper method for prepare_server_https, taking care of enabling
needed modules
:param boolean temp: If the change is temporary
"""
if self.conf("handle-modules"):
if self.version >= (2, 4) and ("socache_shmcb_module" not in
self.parser.modules):
self.enable_mod("socache_shmcb", temp=temp)
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~certbot_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``certbot_apache.constants.os_constant("le_vhost_ext")``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
orig_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
self._copy_create_ssl_vhost_skeleton(nonssl_vhost, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
new_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
# The vhost was not found on the currently parsed paths
# Make Augeas aware of the new vhost
self.parser.parse_file(ssl_fp)
# Try to search again
new_matches = self.aug.match(
"/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
raise errors.PluginError(
"Could not reverse map the HTTPS VirtualHost to the original")
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
ssl_vhost.ancestor = nonssl_vhost
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_new_vh_path(self, orig_matches, new_matches):
""" Helper method for make_vhost_ssl for matching augeas paths. Returns
VirtualHost path from new_matches that's not present in orig_matches.
Paths are normalized, because augeas leaves indices out for paths
with only single directive with a similar key """
orig_matches = [i.replace("[1]", "") for i in orig_matches]
for match in new_matches:
if match.replace("[1]", "") not in orig_matches:
# Return the unmodified path
return match
return None
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
""" Get a file path for SSL vhost, uses user defined path as priority,
but if the value is invalid or not defined, will fall back to non-ssl
vhost filepath.
:param str non_ssl_vh_fp: Filepath of non-SSL vhost
:returns: Filepath for SSL vhost
:rtype: str
"""
if self.conf("vhost-root") and os.path.exists(self.conf("vhost-root")):
# Defined by user on CLI
fp = os.path.join(os.path.realpath(self.vhostroot),
os.path.basename(non_ssl_vh_fp))
else:
# Use non-ssl filepath
fp = os.path.realpath(non_ssl_vh_fp)
if fp.endswith(".conf"):
return fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return fp + self.conf("le_vhost_ext")
def _sift_rewrite_rule(self, line):
"""Decides whether a line should be copied to a SSL vhost.
A canonical example of when sifting a line is required:
When the http vhost contains a RewriteRule that unconditionally
redirects any request to the https version of the same site.
e.g:
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [L,QSA,R=permanent]
Copying the above line to the ssl vhost would cause a
redirection loop.
:param str line: a line extracted from the http vhost.
:returns: True - don't copy line from http vhost to SSL vhost.
:rtype: bool
"""
if not line.lower().lstrip().startswith("rewriterule"):
return False
# According to: http://httpd.apache.org/docs/2.4/rewrite/flags.html
# The syntax of a RewriteRule is:
# RewriteRule pattern target [Flag1,Flag2,Flag3]
# i.e. target is required, so it must exist.
target = line.split()[2].strip()
# target may be surrounded with quotes
if target[0] in ("'", '"') and target[0] == target[-1]:
target = target[1:-1]
# Sift line if it redirects the request to a HTTPS site
return target.startswith("https://")
def _copy_create_ssl_vhost_skeleton(self, vhost, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param obj.VirtualHost vhost: Original VirtualHost object
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
if os.path.exists(ssl_fp):
notes = "Appended new VirtualHost directive to file %s" % ssl_fp
files = set()
files.add(ssl_fp)
self.reverter.add_to_checkpoint(files, notes)
else:
self.reverter.register_file_creation(False, ssl_fp)
sift = False
try:
orig_contents = self._get_vhost_block(vhost)
ssl_vh_contents, sift = self._sift_rewrite_rules(orig_contents)
with open(ssl_fp, "a") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
new_file.write("\n".join(ssl_vh_contents))
# The content does not include the closing tag, so add it
new_file.write("</VirtualHost>\n")
new_file.write("</IfModule>\n")
# Add new file to augeas paths if we're supposed to handle
# activation (it's not included as default)
if not self.parser.parsed_in_current(ssl_fp):
self.parser.parse_file(ssl_fp)
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
if sift:
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(
"Some rewrite rules copied from {0} were disabled in the "
"vhost for your HTTPS site located at {1} because they have "
"the potential to create redirection loops.".format(
vhost.filep, ssl_fp), reporter.MEDIUM_PRIORITY)
self.aug.set("/augeas/files%s/mtime" % (self._escape(ssl_fp)), "0")
self.aug.set("/augeas/files%s/mtime" % (self._escape(vhost.filep)), "0")
def _sift_rewrite_rules(self, contents):
""" Helper function for _copy_create_ssl_vhost_skeleton to prepare the
new HTTPS VirtualHost contents. Currently disabling the rewrites """
result = []
sift = False
contents = iter(contents)
comment = ("# Some rewrite rules in this file were "
"disabled on your HTTPS site,\n"
"# because they have the potential to create "
"redirection loops.\n")
for line in contents:
A = line.lower().lstrip().startswith("rewritecond")
B = line.lower().lstrip().startswith("rewriterule")
if not (A or B):
result.append(line)
continue
# A RewriteRule that doesn't need filtering
if B and not self._sift_rewrite_rule(line):
result.append(line)
continue
# A RewriteRule that does need filtering
if B and self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append("# " + line)
continue
# We save RewriteCond(s) and their corresponding
# RewriteRule in 'chunk'.
# We then decide whether we comment out the entire
# chunk based on its RewriteRule.
chunk = []
if A:
chunk.append(line)
line = next(contents)
# RewriteCond(s) must be followed by one RewriteRule
while not line.lower().lstrip().startswith("rewriterule"):
chunk.append(line)
line = next(contents)
# Now, current line must start with a RewriteRule
chunk.append(line)
if self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append('\n'.join(
['# ' + l for l in chunk]))
continue
else:
result.append('\n'.join(chunk))
continue
return result, sift
def _get_vhost_block(self, vhost):
""" Helper method to get VirtualHost contents from the original file.
This is done with help of augeas span, which returns the span start and
end positions
:returns: `list` of VirtualHost block content lines without closing tag
"""
try:
span_val = self.aug.span(vhost.path)
except ValueError:
logger.fatal("Error while reading the VirtualHost %s from "
"file %s", vhost.name, vhost.filep, exc_info=True)
raise errors.PluginError("Unable to read VirtualHost from file")
span_filep = span_val[0]
span_start = span_val[5]
span_end = span_val[6]
with open(span_filep, 'r') as fh:
fh.seek(span_start)
vh_contents = fh.read(span_end-span_start).split("\n")
self._remove_closing_vhost_tag(vh_contents)
return vh_contents
def _remove_closing_vhost_tag(self, vh_contents):
"""Removes the closing VirtualHost tag if it exists.
This method modifies vh_contents directly to remove the closing
tag. If the closing vhost tag is found, everything on the line
after it is also removed. Whether or not this tag is included
in the result of span depends on the Augeas version.
:param list vh_contents: VirtualHost block contents to check
"""
for offset, line in enumerate(reversed(vh_contents)):
if line:
line_index = line.lower().find("</virtualhost>")
if line_index != -1:
content_index = len(vh_contents) - offset - 1
vh_contents[content_index] = line[:line_index]
break
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile",
"SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_servername_alias(self, target_name, vhost):
vh_path = vhost.path
sname, saliases = self._get_vhost_names(vh_path)
if target_name == sname or target_name in saliases:
return
if self._has_matching_wildcard(vh_path, target_name):
return
if not self.parser.find_dir("ServerName", None,
start=vh_path, exclude=False):
self.parser.add_dir(vh_path, "ServerName", target_name)
else:
self.parser.add_dir(vh_path, "ServerAlias", target_name)
self._add_servernames(vhost)
def _has_matching_wildcard(self, vh_path, target_name):
"""Is target_name already included in a wildcard in the vhost?
:param str vh_path: Augeas path to the vhost
:param str target_name: name to compare with wildcards
:returns: True if there is a wildcard covering target_name in
the vhost in vhost_path, otherwise, False
:rtype: bool
"""
matches = self.parser.find_dir(
"ServerAlias", start=vh_path, exclude=False)
aliases = (self.aug.get(match) for match in matches)
return self.included_in_wildcard(aliases, target_name)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
# In Apache 2.2, when a NameVirtualHost directive is not
# set, "*" and "_default_" will conflict when sharing a port
addrs = set((addr,))
if addr.get_addr() in ("*", "_default_"):
addrs.update(obj.Addr((a, addr.get_port(),))
for a in ("*", "_default_"))
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr in addrs for
test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
break
if need_to_save:
self.save()
def _escape(self, fp):
fp = fp.replace(",", "\\,")
fp = fp.replace("[", "\\[")
fp = fp.replace("]", "\\]")
fp = fp.replace("|", "\\|")
fp = fp.replace("=", "\\=")
fp = fp.replace("(", "\\(")
fp = fp.replace(")", "\\)")
fp = fp.replace("!", "\\!")
return fp
######################################################################
# Enhancements
######################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header", "staple-ocsp"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~certbot.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~certbot.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warning("Failed %s for %s", enhancement, domain)
raise
def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
"""Enables OCSP Stapling
In OCSP, each client (e.g. browser) would have to query the
OCSP Responder to validate that the site certificate was not revoked.
Enabling OCSP Stapling, would allow the web-server to query the OCSP
Responder, and staple its response to the offered certificate during
TLS. i.e. clients would not have to query the OCSP responder.
OCSP Stapling enablement on Apache implicitly depends on
SSLCertificateChainFile being set by other code.
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
"""
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None,
start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
if general_vh in self._enhanced_vhosts["redirect"]:
logger.debug("Already enabled redirect for this vhost")
return
# Check if Certbot redirection already exists
self._verify_no_certbot_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# certbot RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warning("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
names = ssl_vhost.get_names()
for idx, name in enumerate(names):
args = ["%{SERVER_NAME}", "={0}".format(name), "[OR]"]
if idx == len(names) - 1:
args.pop()
self.parser.add_dir(general_vh.path, "RewriteCond", args)
self._set_https_redirection_rewrite_rule(general_vh)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
self._enhanced_vhosts["redirect"].add(general_vh)
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _set_https_redirection_rewrite_rule(self, vhost):
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
def _verify_no_certbot_redirect(self, vhost):
"""Checks to see if a redirect was already installed by certbot.
Checks to see if virtualhost already contains a rewrite rule that is
identical to Certbot's redirection rewrite rule.
For graceful transition to new rewrite rules for HTTPS redireciton we
delete certbot's old rewrite rules and set the new one instead.
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
certbot redirection WriteRule exists in virtual host.
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list)
pat = r'(.*directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_path = m.group(1)
rewrite_args_dict[dir_path].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for dir_path, args_paths in rewrite_args_dict.items():
arg_vals = [self.aug.get(x) for x in args_paths]
# Search for past redirection rule, delete it, set the new one
if arg_vals in constants.OLD_REWRITE_HTTPS_ARGS:
self.aug.remove(dir_path)
self._set_https_redirection_rewrite_rule(vhost)
self.save()
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
if arg_vals in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
def _is_rewrite_exists(self, vhost):
"""Checks if there exists a RewriteRule directive in vhost
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: True if a RewriteRule directive exists.
:rtype: bool
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
"""Checks if a RewriteEngine directive is on
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
rewrite_engine_path_list = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path_list:
for re_path in rewrite_engine_path_list:
# A RewriteEngine directive may also be included in per
# directory .htaccess files. We only care about the VirtualHost.
if 'virtualhost' in re_path.lower():
return self.parser.get_arg(re_path)
return False
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~certbot_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(self._escape(redirect_filepath)))
self.vhosts.append(new_vhost)
self._enhanced_vhosts["redirect"].add(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = []
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog %s/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for
addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args),
self.conf("logs-root")))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(self.vhostroot,
redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
# Add new include to configuration if it doesn't exist yet
if not self.parser.parsed_in_current(redirect_filepath):
self.parser.parse_file(redirect_filepath)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
if ssl_vhost.ancestor:
return ssl_vhost.ancestor
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
# Third filter - if none with same names, return generic
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost, generic=True):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"):
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain
vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if vhost.enabled:
return
# Handle non-debian systems
if not self.conf("handle-sites"):
if not self.parser.parsed_in_original(vhost.filep):
# Add direct include to root conf
self.parser.add_include(self.parser.loc["default"], vhost.filep)
vhost.enabled = True
return
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
try:
os.symlink(vhost.filep, enabled_path)
except OSError as err:
if os.path.islink(enabled_path) and os.path.realpath(
enabled_path) == vhost.filep:
# Already in shape
vhost.enabled = True
return
else:
logger.warning(
"Could not symlink %s to %s, got error: %s", enabled_path,
vhost.filep, err.strerror)
errstring = ("Encountered error while trying to enable a " +
"newly created VirtualHost located at {0} by " +
"linking to it from {1}")
raise errors.NotSupportedError(errstring.format(vhost.filep,
enabled_path))
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables()
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for certbot.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
try:
util.run_script(constants.os_constant("restart_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
util.run_script(constants.os_constant("conftest_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = util.run_script(constants.os_constant("version_cmd"))
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" %
constants.os_constant("version_cmd"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.TLSSNI01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
chall_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
chall_doer.add_chall(achall, i)
sni_response = chall_doer.perform()
if sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[chall_doer.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime"]
}
return deps.get(mod_name, [])
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
if not vhost_path or not vhost_path.startswith("/files/"):
return None
return _split_aug_path(vhost_path)[0]
def get_internal_aug_path(vhost_path):
"""Get the Augeas path for a vhost with the file path removed.
:param str vhost_path: Augeas virtual host path
:returns: Augeas path to vhost relative to the containing file
:rtype: str
"""
return _split_aug_path(vhost_path)[1]
def _split_aug_path(vhost_path):
"""Splits an Augeas path into a file path and an internal path.
After removing "/files", this function splits vhost_path into the
file path and the remaining Augeas path.
:param str vhost_path: Augeas virtual host path
:returns: file path and internal Augeas path
:rtype: `tuple` of `str`
"""
# Strip off /files
file_path = vhost_path[6:]
internal_path = []
# Remove components from the end of file_path until it becomes valid
while not os.path.exists(file_path):
file_path, _, internal_path_part = file_path.rpartition("/")
internal_path.append(internal_path_part)
return file_path, "/".join(reversed(internal_path))
def install_ssl_options_conf(options_ssl, options_ssl_digest):
"""Copy Certbot's SSL options file into the system's config dir if required."""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# certbot for unprivileged users via setuid), this function will need
# to be modified.
return common.install_version_controlled_file(options_ssl, options_ssl_digest,
constants.os_constant("MOD_SSL_CONF_SRC"), constants.ALL_SSL_OPTIONS_HASHES)
| {
"repo_name": "jsha/letsencrypt",
"path": "certbot-apache/certbot_apache/configurator.py",
"copies": "1",
"size": "79961",
"license": "apache-2.0",
"hash": 2641867605399568000,
"line_mean": 38.4285009862,
"line_max": 92,
"alpha_frac": 0.5836720401,
"autogenerated": false,
"ratio": 4.21313030191264,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024278891159593607,
"num_lines": 2028
} |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import itertools
import logging
import os
import re
import shutil
import socket
import subprocess
import zope.interface
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import dvsni
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2enmod' binary.")
add("init-script", default=constants.CLI_DEFAULTS["init_script"],
help="Path to the Apache init script (used for server "
"reload/restart).")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
temp_install(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path, chain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {}
path["cert_path"] = self.parser.find_dir(
"SSLCertificateFile", None, vhost.path)
path["cert_key"] = self.parser.find_dir(
"SSLCertificateKeyFile", None, vhost.path)
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
# Assign the final directives; order is maintained in find_dir
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
if not path["chain_path"]:
self.parser.add_dir(
vhost.path, "SSLCertificateChainFile", chain_path)
else:
self.aug.set(path["chain_path"][-1], chain_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name)
def _choose_vhost_from_list(self, target_name):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
reasonable_vhosts = self._non_default_vhosts()
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
for vhost in self.vhosts:
all_names.update(vhost.get_names())
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
host.aliases.add(self.parser.get_arg(alias))
if servername_match:
# Get last ServerName as each overwrites the previous
host.name = self.parser.get_arg(servername_match[-1])
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
logger.info("Loading mod_ssl into Apache Server")
self.enable_mod("ssl")
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
if not self.parser.find_dir("Listen", port):
logger.debug("No Listen %s directive found. Setting the "
"Apache Server to Listen on port %s", port, port)
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Only one vhost per file is allowed")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
self._verify_no_redirects(general_vh)
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_redirects(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginError: When another redirection exists
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
raise errors.PluginError("Existing Redirect present on HTTP vhost.")
if rewrite_path:
# "No existing redirection for virtualhost"
if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS):
raise errors.PluginError("Unknown Existing RewriteRule")
for match, arg in itertools.izip(
rewrite_path, constants.REWRITE_HTTPS_ARGS):
if self.aug.get(match) != arg:
raise errors.PluginError("Unknown Existing RewriteRule")
raise errors.PluginError(
"Let's Encrypt has already enabled redirection")
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
if os.path.realpath(os.path.join(enabled_dir, entry)) == avail_fp:
return True
return False
def enable_site(self, vhost):
"""Enables an available site, Apache restart required.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if vhost.ssl:
# TODO: Make this based on addresses
self.prepare_server_https("443")
if self.save_notes:
self.save()
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and restarts Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
if (not os.path.isdir(os.path.join(self.parser.root, "mods-available"))
or not os.path.isdir(
os.path.join(self.parser.root, "mods-enabled"))):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
self._enable_mod_debian(mod_name, temp)
self.save_notes += "Enabled %s module in Apache" % mod_name
logger.debug("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Restart is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Restarts apache server.
.. todo:: This function will be converted to using reload
:raises .errors.MisconfigurationError: If unable to restart due to a
configuration problem, or if the restart subprocess cannot be run.
"""
return apache_restart(self.conf("init-script"))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
apache_dvsni = dvsni.ApacheDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
apache_dvsni.add_chall(achall, i)
sni_response = apache_dvsni.perform()
if sni_response:
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[apache_dvsni.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
def apache_restart(apache_init_script):
"""Restarts the Apache Server.
:param str apache_init_script: Path to the Apache init script.
.. todo:: Try to use reload instead. (This caused timing problems before)
.. todo:: On failure, this should be a recovery_routine call with another
restart. This will confuse and inhibit developers from testing code
though. This change should happen after
the ApacheConfigurator has been thoroughly tested. The function will
need to be moved into the class again. Perhaps
this version can live on... for testing purposes.
:raises .errors.MisconfigurationError: If unable to restart due to a
configuration problem, or if the restart subprocess cannot be run.
"""
try:
proc = subprocess.Popen([apache_init_script, "restart"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError):
logger.fatal(
"Unable to restart the Apache process with %s", apache_init_script)
raise errors.MisconfigurationError(
"Unable to restart Apache process with %s" % apache_init_script)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr)
raise errors.MisconfigurationError(
"Error while restarting Apache:\n%s\n%s" % (stdout, stderr))
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast both to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
break
return avail_fp
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| {
"repo_name": "ruo91/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/configurator.py",
"copies": "2",
"size": "46229",
"license": "apache-2.0",
"hash": -6891212435212530000,
"line_mean": 36.9548440066,
"line_max": 90,
"alpha_frac": 0.599775033,
"autogenerated": false,
"ratio": 4.198056665455867,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020333294953011076,
"num_lines": 1218
} |
apache_conf = """
# Sample apache config for Fabulous django deployment.
NameVirtualHost *:80
<VirtualHost *:80>
WSGIDaemonProcess %(project_name)s \
processes=2 threads=5 display-name=%(project_name)s
#
# URL mapping
#
WSGIScriptAlias / %(conf_path)s/wsgi.py
Alias /media/ %(media_path)s
#
# Filesystem permissions (necessary for above to work)
#
<Directory />
Order deny,allow
deny from all
</Directory>
<Directory %(conf_path)s>
WSGIProcessGroup %(project_name)s
Order allow,deny
allow from all
</Directory>
<Directory %(media_path)s>
Order allow,deny
allow from all
</Directory>
<Directory %(data_path)s>
Order deny,allow
deny from all
</Directory>
</VirtualHost>
"""
wsgi_script = """
import os
import sys
# Add project to our path
sys.path.insert(0, '%(django_project_path)s')
# Activate virtualenv support
activate_this = '%(virtualenv_path)s/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
os.environ['DJANGO_SETTINGS_MODULE'] = '%(django_settings_module)s'
os.environ['PYTHON_EGG_CACHE'] = '%(egg_cache_path)s'
# Preload django environment
settings = __import__(os.environ['DJANGO_SETTINGS_MODULE'])
import django.core.management
django.core.management.setup_environ(settings)
utility = django.core.management.ManagementUtility()
command = utility.fetch_command('runserver')
command.validate()
import django.conf, django.utils
django.utils.translation.activate(django.conf.settings.LANGUAGE_CODE)
# Hand off to django handler
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
"""
| {
"repo_name": "ryanwitt/django-liberace",
"path": "liberace/templates.py",
"copies": "1",
"size": "1716",
"license": "bsd-2-clause",
"hash": -8779147127072716000,
"line_mean": 23.5142857143,
"line_max": 69,
"alpha_frac": 0.6882284382,
"autogenerated": false,
"ratio": 3.5527950310559007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47410234692559006,
"avg_score": null,
"num_lines": null
} |
"""ApacheDVSNI"""
import logging
import os
from letsencrypt_apache import parser
class ApacheDvsni(object):
"""Class performs DVSNI challenges within the Apache configurator.
:ivar configurator: ApacheConfigurator object
:type configurator: :class:`~apache.configurator.ApacheConfigurator`
:ivar list achalls: Annotated :class:`~letsencrypt.achallenges.DVSNI`
challenges.
:param list indices: Meant to hold indices of challenges in a
larger array. ApacheDvsni is capable of solving many challenges
at once which causes an indexing issue within ApacheConfigurator
who must return all responses in order. Imagine ApacheConfigurator
maintaining state about where all of the SimpleHTTPS Challenges,
Dvsni Challenges belong in the response array. This is an optional
utility.
:param str challenge_conf: location of the challenge config file
"""
VHOST_TEMPLATE = """\
<VirtualHost {vhost}>
ServerName {server_name}
UseCanonicalName on
SSLStrictSNIVHostCheck on
LimitRequestBody 1048576
Include {ssl_options_conf_path}
SSLCertificateFile {cert_path}
SSLCertificateKeyFile {key_path}
DocumentRoot {document_root}
</VirtualHost>
"""
def __init__(self, configurator):
self.configurator = configurator
self.achalls = []
self.indices = []
self.challenge_conf = os.path.join(
configurator.config.config_dir, "le_dvsni_cert_challenge.conf")
# self.completed = 0
def add_chall(self, achall, idx=None):
"""Add challenge to DVSNI object to perform at once.
:param achall: Annotated DVSNI challenge.
:type achall: :class:`letsencrypt.achallenges.DVSNI`
:param int idx: index to challenge in a larger array
"""
self.achalls.append(achall)
if idx is not None:
self.indices.append(idx)
def perform(self):
"""Peform a DVSNI challenge."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save()
addresses = []
default_addr = "*:443"
for achall in self.achalls:
vhost = self.configurator.choose_vhost(achall.domain)
if vhost is None:
logging.error(
"No vhost exists with servername or alias of: %s",
achall.domain)
logging.error("No _default_:443 vhost exists")
logging.error("Please specify servernames in the Apache config")
return None
# TODO - @jdkasten review this code to make sure it makes sense
self.configurator.make_server_sni_ready(vhost, default_addr)
for addr in vhost.addrs:
if "_default_" == addr.get_addr():
addresses.append([default_addr])
break
else:
addresses.append(list(vhost.addrs))
responses = []
# Create all of the challenge certs
for achall in self.achalls:
responses.append(self._setup_challenge_cert(achall))
# Setup the configuration
self._mod_config(addresses)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _setup_challenge_cert(self, achall, s=None):
# pylint: disable=invalid-name
"""Generate and write out challenge certificate."""
cert_path = self.get_cert_file(achall)
# Register the path before you write out the file
self.configurator.reverter.register_file_creation(True, cert_path)
cert_pem, response = achall.gen_cert_and_response(s)
# Write out challenge cert
with open(cert_path, "w") as cert_chall_fd:
cert_chall_fd.write(cert_pem)
return response
def _mod_config(self, ll_addrs):
"""Modifies Apache config files to include challenge vhosts.
Result: Apache config includes virtual servers for issued challs
:param list ll_addrs: list of list of
:class:`letsencrypt.plugins.apache.obj.Addr` to apply
"""
# TODO: Use ip address of existing vhost instead of relying on FQDN
config_text = "<IfModule mod_ssl.c>\n"
for idx, lis in enumerate(ll_addrs):
config_text += self._get_config_text(self.achalls[idx], lis)
config_text += "</IfModule>\n"
self._conf_include_check(self.configurator.parser.loc["default"])
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with open(self.challenge_conf, "w") as new_conf:
new_conf.write(config_text)
def _conf_include_check(self, main_config):
"""Adds DVSNI challenge conf file into configuration.
Adds DVSNI challenge include file if it does not already exist
within mainConfig
:param str main_config: file path to main user apache config file
"""
if len(self.configurator.parser.find_dir(
parser.case_i("Include"), self.challenge_conf)) == 0:
# print "Including challenge virtual host(s)"
self.configurator.parser.add_dir(
parser.get_aug_path(main_config),
"Include", self.challenge_conf)
def _get_config_text(self, achall, ip_addrs):
"""Chocolate virtual server configuration text
:param achall: Annotated DVSNI challenge.
:type achall: :class:`letsencrypt.achallenges.DVSNI`
:param list ip_addrs: addresses of challenged domain
:class:`list` of type :class:`~apache.obj.Addr`
:returns: virtual host configuration text
:rtype: str
"""
ips = " ".join(str(i) for i in ip_addrs)
document_root = os.path.join(
self.configurator.config.config_dir, "dvsni_page/")
# TODO: Python docs is not clear how mutliline string literal
# newlines are parsed on different platforms. At least on
# Linux (Debian sid), when source file uses CRLF, Python still
# parses it as "\n"... c.f.:
# https://docs.python.org/2.7/reference/lexical_analysis.html
return self.VHOST_TEMPLATE.format(
vhost=ips, server_name=achall.nonce_domain,
ssl_options_conf_path=self.configurator.parser.loc["ssl_options"],
cert_path=self.get_cert_file(achall), key_path=achall.key.file,
document_root=document_root).replace("\n", os.linesep)
def get_cert_file(self, achall):
"""Returns standardized name for challenge certificate.
:param achall: Annotated DVSNI challenge.
:type achall: :class:`letsencrypt.achallenges.DVSNI`
:returns: certificate file name
:rtype: str
"""
return os.path.join(
self.configurator.config.work_dir, achall.nonce_domain + ".crt")
| {
"repo_name": "felixrieseberg/lets-encrypt-preview",
"path": "letsencrypt_apache/dvsni.py",
"copies": "1",
"size": "7071",
"license": "apache-2.0",
"hash": 4050047115574304000,
"line_mean": 34.1791044776,
"line_max": 80,
"alpha_frac": 0.6284825343,
"autogenerated": false,
"ratio": 4.017613636363636,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019234561435835766,
"num_lines": 201
} |
"""ApacheDVSNI"""
import os
from letsencrypt.plugins import common
from letsencrypt_apache import obj
from letsencrypt_apache import parser
class ApacheDvsni(common.Dvsni):
"""Class performs DVSNI challenges within the Apache configurator.
:ivar configurator: ApacheConfigurator object
:type configurator: :class:`~apache.configurator.ApacheConfigurator`
:ivar list achalls: Annotated :class:`~letsencrypt.achallenges.DVSNI`
challenges.
:param list indices: Meant to hold indices of challenges in a
larger array. ApacheDvsni is capable of solving many challenges
at once which causes an indexing issue within ApacheConfigurator
who must return all responses in order. Imagine ApacheConfigurator
maintaining state about where all of the SimpleHTTP Challenges,
Dvsni Challenges belong in the response array. This is an optional
utility.
:param str challenge_conf: location of the challenge config file
"""
VHOST_TEMPLATE = """\
<VirtualHost {vhost}>
ServerName {server_name}
UseCanonicalName on
SSLStrictSNIVHostCheck on
LimitRequestBody 1048576
Include {ssl_options_conf_path}
SSLCertificateFile {cert_path}
SSLCertificateKeyFile {key_path}
DocumentRoot {document_root}
</VirtualHost>
"""
def __init__(self, *args, **kwargs):
super(ApacheDvsni, self).__init__(*args, **kwargs)
self.challenge_conf = os.path.join(
self.configurator.conf("server-root"),
"le_dvsni_cert_challenge.conf")
def perform(self):
"""Perform a DVSNI challenge."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save()
# Prepare the server for HTTPS
self.configurator.prepare_server_https(
str(self.configurator.config.dvsni_port), True)
responses = []
# Create all of the challenge certs
for achall in self.achalls:
responses.append(self._setup_challenge_cert(achall))
# Setup the configuration
dvsni_addrs = self._mod_config()
self.configurator.make_addrs_sni_ready(dvsni_addrs)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self):
"""Modifies Apache config files to include challenge vhosts.
Result: Apache config includes virtual servers for issued challs
:returns: All DVSNI addresses used
:rtype: set
"""
dvsni_addrs = set()
config_text = "<IfModule mod_ssl.c>\n"
for achall in self.achalls:
achall_addrs = self.get_dvsni_addrs(achall)
dvsni_addrs.update(achall_addrs)
config_text += self._get_config_text(achall, achall_addrs)
config_text += "</IfModule>\n"
self._conf_include_check(self.configurator.parser.loc["default"])
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with open(self.challenge_conf, "w") as new_conf:
new_conf.write(config_text)
return dvsni_addrs
def get_dvsni_addrs(self, achall):
"""Return the Apache addresses needed for DVSNI."""
vhost = self.configurator.choose_vhost(achall.domain)
# TODO: Checkout _default_ rules.
dvsni_addrs = set()
default_addr = obj.Addr(("*", str(self.configurator.config.dvsni_port)))
for addr in vhost.addrs:
if "_default_" == addr.get_addr():
dvsni_addrs.add(default_addr)
else:
dvsni_addrs.add(
addr.get_sni_addr(self.configurator.config.dvsni_port))
return dvsni_addrs
def _conf_include_check(self, main_config):
"""Adds DVSNI challenge conf file into configuration.
Adds DVSNI challenge include file if it does not already exist
within mainConfig
:param str main_config: file path to main user apache config file
"""
if len(self.configurator.parser.find_dir(
parser.case_i("Include"), self.challenge_conf)) == 0:
# print "Including challenge virtual host(s)"
self.configurator.parser.add_dir(
parser.get_aug_path(main_config),
"Include", self.challenge_conf)
def _get_config_text(self, achall, ip_addrs):
"""Chocolate virtual server configuration text
:param achall: Annotated DVSNI challenge.
:type achall: :class:`letsencrypt.achallenges.DVSNI`
:param list ip_addrs: addresses of challenged domain
:class:`list` of type `~.obj.Addr`
:returns: virtual host configuration text
:rtype: str
"""
ips = " ".join(str(i) for i in ip_addrs)
document_root = os.path.join(
self.configurator.config.work_dir, "dvsni_page/")
# TODO: Python docs is not clear how mutliline string literal
# newlines are parsed on different platforms. At least on
# Linux (Debian sid), when source file uses CRLF, Python still
# parses it as "\n"... c.f.:
# https://docs.python.org/2.7/reference/lexical_analysis.html
return self.VHOST_TEMPLATE.format(
vhost=ips,
server_name=achall.gen_response(achall.account_key).z_domain,
ssl_options_conf_path=self.configurator.mod_ssl_conf,
cert_path=self.get_cert_path(achall),
key_path=self.get_key_path(achall),
document_root=document_root).replace("\n", os.linesep)
| {
"repo_name": "rlustin/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/dvsni.py",
"copies": "28",
"size": "5735",
"license": "apache-2.0",
"hash": 6575487269193468000,
"line_mean": 32.5380116959,
"line_max": 80,
"alpha_frac": 0.6359197908,
"autogenerated": false,
"ratio": 3.904016337644656,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""ApacheDVSNI"""
import os
from letsencrypt.plugins import common
from letsencrypt_apache import parser
class ApacheDvsni(common.Dvsni):
"""Class performs DVSNI challenges within the Apache configurator.
:ivar configurator: ApacheConfigurator object
:type configurator: :class:`~apache.configurator.ApacheConfigurator`
:ivar list achalls: Annotated :class:`~letsencrypt.achallenges.DVSNI`
challenges.
:param list indices: Meant to hold indices of challenges in a
larger array. ApacheDvsni is capable of solving many challenges
at once which causes an indexing issue within ApacheConfigurator
who must return all responses in order. Imagine ApacheConfigurator
maintaining state about where all of the SimpleHTTP Challenges,
Dvsni Challenges belong in the response array. This is an optional
utility.
:param str challenge_conf: location of the challenge config file
"""
VHOST_TEMPLATE = """\
<VirtualHost {vhost}>
ServerName {server_name}
UseCanonicalName on
SSLStrictSNIVHostCheck on
LimitRequestBody 1048576
Include {ssl_options_conf_path}
SSLCertificateFile {cert_path}
SSLCertificateKeyFile {key_path}
DocumentRoot {document_root}
</VirtualHost>
"""
def perform(self):
"""Peform a DVSNI challenge."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save()
addresses = []
default_addr = "*:443"
for achall in self.achalls:
vhost = self.configurator.choose_vhost(achall.domain)
# TODO - @jdkasten review this code to make sure it makes sense
self.configurator.make_server_sni_ready(vhost, default_addr)
for addr in vhost.addrs:
if "_default_" == addr.get_addr():
addresses.append([default_addr])
break
else:
addresses.append(list(vhost.addrs))
responses = []
# Create all of the challenge certs
for achall in self.achalls:
responses.append(self._setup_challenge_cert(achall))
# Setup the configuration
self._mod_config(addresses)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self, ll_addrs):
"""Modifies Apache config files to include challenge vhosts.
Result: Apache config includes virtual servers for issued challs
:param list ll_addrs: list of list of `~.common.Addr` to apply
"""
# TODO: Use ip address of existing vhost instead of relying on FQDN
config_text = "<IfModule mod_ssl.c>\n"
for idx, lis in enumerate(ll_addrs):
config_text += self._get_config_text(self.achalls[idx], lis)
config_text += "</IfModule>\n"
self._conf_include_check(self.configurator.parser.loc["default"])
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with open(self.challenge_conf, "w") as new_conf:
new_conf.write(config_text)
def _conf_include_check(self, main_config):
"""Adds DVSNI challenge conf file into configuration.
Adds DVSNI challenge include file if it does not already exist
within mainConfig
:param str main_config: file path to main user apache config file
"""
if len(self.configurator.parser.find_dir(
parser.case_i("Include"), self.challenge_conf)) == 0:
# print "Including challenge virtual host(s)"
self.configurator.parser.add_dir(
parser.get_aug_path(main_config),
"Include", self.challenge_conf)
def _get_config_text(self, achall, ip_addrs):
"""Chocolate virtual server configuration text
:param achall: Annotated DVSNI challenge.
:type achall: :class:`letsencrypt.achallenges.DVSNI`
:param list ip_addrs: addresses of challenged domain
:class:`list` of type `~.common.Addr`
:returns: virtual host configuration text
:rtype: str
"""
ips = " ".join(str(i) for i in ip_addrs)
document_root = os.path.join(
self.configurator.config.work_dir, "dvsni_page/")
# TODO: Python docs is not clear how mutliline string literal
# newlines are parsed on different platforms. At least on
# Linux (Debian sid), when source file uses CRLF, Python still
# parses it as "\n"... c.f.:
# https://docs.python.org/2.7/reference/lexical_analysis.html
return self.VHOST_TEMPLATE.format(
vhost=ips, server_name=achall.nonce_domain,
ssl_options_conf_path=self.configurator.parser.loc["ssl_options"],
cert_path=self.get_cert_path(achall),
key_path=self.get_key_path(achall),
document_root=document_root).replace("\n", os.linesep)
| {
"repo_name": "stewnorriss/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/dvsni.py",
"copies": "4",
"size": "5114",
"license": "apache-2.0",
"hash": 2907816355525054000,
"line_mean": 33.7891156463,
"line_max": 78,
"alpha_frac": 0.6384434885,
"autogenerated": false,
"ratio": 4.05229793977813,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6690741428278131,
"avg_score": null,
"num_lines": null
} |
"""ApacheLibcloud datastore."""
from datetime import datetime
from io import BytesIO
from itertools import islice
from cloud_browser.app_settings import settings
from cloud_browser.cloud import base, errors
from cloud_browser.common import SEP, requires
###############################################################################
# Constants / Conditional Imports
###############################################################################
try:
import libcloud # pylint: disable=F0401
except ImportError:
libcloud = None # pylint: disable=C0103
DATE_FORMAT = "%a, %d %b %Y %H:%M:%S %Z"
###############################################################################
# Classes
###############################################################################
class ApacheLibcloudExceptionWrapper(errors.CloudExceptionWrapper):
"""ApacheLibcloud :mod:`cloudfiles` exception translator."""
@classmethod
@requires(libcloud, "libcloud")
def lazy_translations(cls):
"""Lazy translations."""
types = libcloud.storage.types
return {
types.ContainerDoesNotExistError: errors.NoContainerException,
types.ObjectDoesNotExistError: errors.NoObjectException,
}
class ApacheLibcloudObject(base.CloudObject):
"""ApacheLibcloud object wrapper."""
def _get_object(self):
"""Return native storage object."""
return self.container.native_container.get_object(self.name)
def _read(self):
"""Return contents of object."""
stream = self.native_obj.as_stream()
content = BytesIO()
content.writelines(stream)
content.seek(0)
return content.read()
@classmethod
def from_libcloud(cls, container, obj):
"""Create object from `libcloud.storage.base.Object`."""
try:
last_modified = obj.extra["last_modified"]
last_modified = datetime.strptime(last_modified, DATE_FORMAT)
except (KeyError, ValueError):
last_modified = None
return cls(
container,
name=obj.name,
size=obj.size,
content_encoding=obj.extra.get("content_encoding"),
content_type=obj.extra.get("content_type"),
last_modified=last_modified,
obj_type=cls.type_cls.FILE,
)
class ApacheLibcloudContainer(base.CloudContainer):
"""ApacheLibcloud container wrapper."""
#: Storage object child class.
obj_cls = ApacheLibcloudObject
#: Exception translations.
wrap_libcloud_errors = ApacheLibcloudExceptionWrapper()
def _get_container(self):
"""Return native container object."""
return self.conn.native_conn.get_container(self.name)
@wrap_libcloud_errors
def get_objects(
self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT
):
"""Get objects."""
client = self.conn.native_conn
path = path.rstrip(SEP) + SEP if path else ""
dirs = set()
def get_files_and_directories(items):
for item in items:
suffix = item.name[len(path) :]
subdirs = suffix.split(SEP)
is_file = len(subdirs) == 1
if is_file:
yield self.obj_cls.from_libcloud(self, item)
continue
subdir = subdirs[0]
if subdir in dirs:
continue
dirs.add(subdir)
yield self.obj_cls(
self,
name=(path + SEP + subdir).lstrip(SEP),
obj_type=self.obj_cls.type_cls.SUBDIR,
)
objs = client.iterate_container_objects(self.native_container, path)
objs = get_files_and_directories(objs)
objs = islice(objs, limit)
return list(objs)
@wrap_libcloud_errors
def get_object(self, path):
"""Get single object."""
obj = self.native_container.get_object(path)
return self.obj_cls.from_libcloud(self, obj)
@classmethod
def from_libcloud(cls, conn, container):
"""Create container from `libcloud.storage.base.Container`."""
return cls(conn, container.name)
class ApacheLibcloudConnection(base.CloudConnection):
"""ApacheLibcloud connection wrapper."""
#: Container child class.
cont_cls = ApacheLibcloudContainer
#: Exception translations.
wrap_libcloud_errors = ApacheLibcloudExceptionWrapper()
def __init__(
self, provider, account, secret_key, host=None, port=None, secure=True
):
"""Initializer."""
super(ApacheLibcloudConnection, self).__init__(account, secret_key)
self.provider = provider
self.host = host
self.port = port
self.secure = secure
@wrap_libcloud_errors
@requires(libcloud, "libcloud")
def _get_connection(self):
"""Return native connection object."""
driver = libcloud.get_driver(libcloud.DriverType.STORAGE, self.provider.lower())
return driver(
self.account,
self.secret_key,
host=self.host,
port=self.port,
secure=self.secure,
)
@wrap_libcloud_errors
def _get_containers(self):
"""Return available containers."""
return [
self.cont_cls.from_libcloud(self, container)
for container in self.native_conn.iterate_containers()
]
@wrap_libcloud_errors
def _get_container(self, path):
"""Return single container."""
container = self.native_conn.get_container(path)
return self.cont_cls.from_libcloud(self, container)
| {
"repo_name": "ryan-roemer/django-cloud-browser",
"path": "cloud_browser/cloud/apache_libcloud.py",
"copies": "1",
"size": "5718",
"license": "mit",
"hash": 7292140827018596000,
"line_mean": 30.4175824176,
"line_max": 88,
"alpha_frac": 0.5772997552,
"autogenerated": false,
"ratio": 4.541699761715647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5618999516915648,
"avg_score": null,
"num_lines": null
} |
from flask.ext.restful import reqparse
########################################################################################################################
# #
# Common parser for images #
# #
########################################################################################################################
depictor_base_arg_parser = reqparse.RequestParser()
# If we should reparse connectivity, aromaticity, stereochemistry, hydrogens and formal charges
depictor_base_arg_parser.add_argument('reparse', type=bool, default=False, location='args')
# If we should keep the molecule title (if using an SDF or other file format with titles)
depictor_base_arg_parser.add_argument('keeptitle', type=bool, default=False, location='args')
# The title location (top or bottom), if we have a title
depictor_base_arg_parser.add_argument('titleloc', type=str, default='top', location='args')
# The image format (png, svg, pdf, etc.)
depictor_base_arg_parser.add_argument('format', type=str, default='png', location='args')
# The molecule title
depictor_base_arg_parser.add_argument('title', type=str, default='', location='args')
# If the molecule is gzipped then base64 encoded
depictor_base_arg_parser.add_argument('gz', type=bool, default=False, location='args')
# Bond scales with the size of the image
depictor_base_arg_parser.add_argument('scalebonds', type=bool, default=False, location='args')
# Background color of image
depictor_base_arg_parser.add_argument('background', type=str, default="#ffffff00", location='args')
# Debug mode
depictor_base_arg_parser.add_argument('debug', type=bool, default=False, location='args')
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/resources/depict/base.py",
"copies": "1",
"size": "2812",
"license": "apache-2.0",
"hash": -8475538827221935000,
"line_mean": 57.5833333333,
"line_max": 120,
"alpha_frac": 0.6081081081,
"autogenerated": false,
"ratio": 4.326153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434261954253846,
"avg_score": null,
"num_lines": null
} |
from flask import Response
import base64
import zlib
# noinspection PyUnresolvedReferences
import sys
from openeye.oechem import *
from openeye.oedepict import *
############################
# Python 2/3 Compatibility #
############################
try:
from gzip import compress
except ImportError:
# Compress for Python 2.x
def compress(s):
# Imports just for this function
import StringIO
import gzip
# Write the compressed string
sio = StringIO.StringIO()
with gzip.GzipFile(fileobj=sio, mode='w') as gz:
gz.write(s)
gz.close()
return sio.getvalue()
# To support unicode as UTF-8 in Python 2 and 3
if sys.version_info < (3,):
def to_utf8(u):
return u.encode('utf-8')
else:
def to_utf8(u):
return u
########################################################################################################################
# #
# CONSTANT DICTIONARIES #
# #
# The following dictionaries are used to translate supported file formats, etc. Feel free to add more to these. They #
# are meant to only be used within the utility functions exposed by this module. #
# #
########################################################################################################################
# Dictionary of supported image MIME types
__mime_types = {
'svg': 'image/svg+xml',
'png': 'image/png',
'pdf': 'application/pdf',
'ps': 'application/postscript'
}
# Dictionary of OpenEye title locations
__title_locations = {
'top': OETitleLocation_Top,
'bottom': OETitleLocation_Bottom
}
# Substructure highlight styles
__highlight_styles = {
'default': OEHighlightStyle_Default,
'ballandstick': OEHighlightStyle_BallAndStick,
'stick': OEHighlightStyle_Stick,
'color': OEHighlightStyle_Color,
'cogwheel': OEHighlightStyle_Cogwheel
}
########################################################################################################################
# #
# Utility Functions #
# #
########################################################################################################################
def get_color_from_rgba(rgba):
"""
Get an OpenEye color from an RRGGBBAA string
:param rgba: The RRGGBBAA hex string
:return: An OEColor object corresponding to the RGBA color
"""
rgba = rgba.replace('#', '')
# Check if we have valid hex
try:
int(rgba, 16)
except ValueError:
raise ValueError("Invalid RGBA string: {0}".format(rgba))
return OEColor("#{0}".format(rgba))
def get_title_location(location):
"""
Returns an OETitleLocation or None if the title location is not recognized
:param location: The text title location (top | bottom)
:return: The corresponding OETitleLocation or None if location is not a valid OETitleLocation
"""
return __title_locations.get(location.lower())
def get_image_mime_type(ext):
"""
Returns an image MIME type from common image extensions
:param ext: The image extension
:return: The image MIME type or None if the image extension is not known
"""
return __mime_types.get(ext.replace('.', '').lower())
def get_highlight_style(style):
"""
Returns an OEHighlightStyle corresponding to a text style name
:param style: The text style name
:type style: str
:return: The OEHighlightStyle or None if the style name is not known
"""
return __highlight_styles.get(style.lower())
def render_error_image(width, height, message="Error depicting molecule"):
"""
Render an image with error text
:param width: The image width
:type width: int or float
:param height: The image height
:type height: int or float
:param message: The error text to put on the image (WARNING: does not wrap)
:type message: str
:return: An HTTP response with the error image
"""
image = OEImage(width, height)
font = OEFont(OEFontFamily_Helvetica, OEFontStyle_Default, 20, OEAlignment_Center, OERed)
image.DrawText(OE2DPoint(image.GetWidth()/2.0, image.GetHeight()/2.0), message, font, image.GetWidth())
# Render the image
img_content = OEWriteImageToString('png', image)
return Response(img_content, mimetype='image/png')
def compress_string(s):
"""
Gzip and then b64 encode a string
:param s: The string to encode
:type s: str
:return: The b64 encoded gzipped string
:rtype: str
"""
return base64.b64encode(compress(s.encode("utf-8"))).decode("utf-8")
def inflate_string(s):
"""
Inflate a gzipped and b64 encoded string
:param s: The string to inflate
:type s: str
:return: The inflated string
:rtype: str
"""
return zlib.decompress(base64.b64decode(s.encode('utf-8')), zlib.MAX_WBITS | 16).decode('utf-8')
def read_molecule_from_string(mol_string, extension, gz=False, reparse=False):
"""
Read a molecule from a molecule string
:param mol_string: The molecule represented as a string
:type mol_string: str
:param extension: The file extension indicating the file format of mol_string
:type extension: str
:param gz: Whether mol_string is a base64-encoded gzip
:type gz: bool
:param reparse: Whether we should reparse connectivity, bond orders, stereo, etc.,
:type reparse: bool
:return: The OEGraphMol representation of the molecule
:rtype: OEGraphMol
"""
mol = OEGraphMol()
# Create the molecule input stream
ifs = oemolistream()
# Get the molecule format
if extension.lower() == "smiles":
mol_format = OEFormat_SMI
else:
mol_format = OEGetFileType(to_utf8(extension))
if mol_format == OEFormat_UNDEFINED:
raise Exception("Invalid molecule format: " + extension)
ifs.SetFormat(mol_format)
# Open stream to the molecule string
if gz:
ok = ifs.openstring(inflate_string(mol_string))
else:
ok = ifs.openstring(mol_string)
# If opening the molecule string was not OK
if not ok:
raise Exception("Error opening molecule")
# If we opened the stream then read the molecule
ok = OEReadMolecule(ifs, mol)
# If reading the molecule was not OK
if not ok:
raise Exception("Invalid molecule")
# If we are reparsing the molecule
if reparse:
OEDetermineConnectivity(mol)
OEFindRingAtomsAndBonds(mol)
OEPerceiveBondOrders(mol)
OEAssignImplicitHydrogens(mol)
OEAssignFormalCharges(mol)
return mol
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/common/util.py",
"copies": "1",
"size": "8398",
"license": "apache-2.0",
"hash": 871569826599552900,
"line_mean": 34.8888888889,
"line_max": 120,
"alpha_frac": 0.5579899976,
"autogenerated": false,
"ratio": 4.399161864850707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5457151862450706,
"avg_score": null,
"num_lines": null
} |
from oemicroservices.api import app
# FIXME Known 2015.Feb error: threaded=True will cause the OEDocking call in interaction.py to core dump
if __name__ == '__main__':
app.run(debug=False, threaded=False)
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "server.py",
"copies": "1",
"size": "1062",
"license": "apache-2.0",
"hash": -2187679873858167300,
"line_mean": 39.8461538462,
"line_max": 104,
"alpha_frac": 0.7598870056,
"autogenerated": false,
"ratio": 3.962686567164179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5222573572764179,
"avg_score": null,
"num_lines": null
} |
from openeye.oechem import *
class OEHasResidueName(OEUnaryAtomPred):
"""
Predicate functor to get atoms with a specific residue name
"""
def __init__(self, resn):
"""
Default constructor
:param resn: The residue name
:type resn: str
:return:
"""
OEUnaryAtomPred.__init__(self)
self.resn = resn
def __call__(self, atom):
"""
Automatically called on each atom in an OEMol
:param atom: The atom the functor is evaluating
:type atom: OEAtomBase
:return: True if the functor evaluates to true
"""
res = OEAtomGetResidue(atom)
return res.GetName() == self.resn
def CreateCopy(self):
# __disown__ is required to allow C++ to take ownership of this
# object and its memory
return OEHasResidueName(self.resn).__disown__()
def generate_ligand_functor(chain=None, resi=None, resn=None):
"""
Generate the predicate functor to select the ligand atoms out of the
protein-ligand complex. This should be called with at least one
of the parameters not None.
:param chain: The chain ID of the ligand
:type chain: str
:param resi: The residue number of the ligand
"type resi: int
:param resn: The residue name of the ligand
:type resn: str
:return: A functor for selecting a ligand
:rtype: OEUnaryAtomPred
"""
functor = OEIsTrueAtom()
# If we got a chain ID
if chain is not None:
functor = OEAndAtom(functor, OEHasChainID(ord(chain[0])))
# If we got a residue number
if resi is not None:
functor = OEAndAtom(functor, OEHasResidueNumber(int(resi)))
# If we got a residue name
if resn is not None:
functor = OEAndAtom(functor, OEHasResidueName(resn))
return functor
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/common/functor.py",
"copies": "1",
"size": "2688",
"license": "apache-2.0",
"hash": 8019828669527212000,
"line_mean": 33.0253164557,
"line_max": 72,
"alpha_frac": 0.6752232143,
"autogenerated": false,
"ratio": 3.712707182320442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9887930396620441,
"avg_score": 0,
"num_lines": 79
} |
from setuptools import setup
setup(
name='OEMicroservices',
version='1.2',
packages=['oemicroservices', 'oemicroservices.test', 'oemicroservices.common', 'oemicroservices.resources',
'oemicroservices.resources.depict', 'oemicroservices.resources.convert'],
url='https://github.com/OpenEye-Contrib/OEMicroservices',
license='MIT',
author='Scott Arne Johnson',
author_email='scott.johnson6@merck.com',
description='Collection of useful microservices using the OpenEye toolkits',
test_suite='oemicroservices.test',
install_requires=['flask', 'flask-restful']
)
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "setup.py",
"copies": "1",
"size": "1465",
"license": "apache-2.0",
"hash": -3204714593086559000,
"line_mean": 39.6944444444,
"line_max": 111,
"alpha_frac": 0.7467576792,
"autogenerated": false,
"ratio": 3.785529715762274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5032287394962274,
"avg_score": null,
"num_lines": null
} |
from unittest import TestCase
import os
from openeye.oechem import *
from oemicroservices.common.functor import generate_ligand_functor
# Define the resource files relative to this test file because setup.py will run from the root package directory
# but some IDEs will run the tests from within the tests directory. We can be friendly to everybody.
PDB_FILE = os.path.join(os.path.dirname(__file__), 'assets/4s0v.pdb')
class TestInteractionDepictor(TestCase):
def test_generate_ligand_functor(self):
"""
Test generating a simple ligand functor
"""
# Read the molecule
mol = OEGraphMol()
ifs = oemolistream(PDB_FILE)
OEReadMolecule(ifs, mol)
# Generate the taxol functor
functor = generate_ligand_functor(resn='SUV')
self.assertEqual(55, OECount(mol, functor), 'Count residue atoms with functor')
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/test/test_misc.py",
"copies": "1",
"size": "1738",
"license": "apache-2.0",
"hash": -1692197566039932000,
"line_mean": 37.6222222222,
"line_max": 112,
"alpha_frac": 0.7359033372,
"autogenerated": false,
"ratio": 3.8451327433628317,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5081036080562832,
"avg_score": null,
"num_lines": null
} |
from unittest import TestCase
import os
try:
# Python 3.x
from urllib.parse import quote
except ImportError:
# Python 2.x
from urllib import quote
from oemicroservices.common.util import compress_string
from oemicroservices.api import app
# Define the resource files relative to this test file because setup.py will run from the root package directory
# but some IDEs will run the tests from within the tests directory. We can be friendly to everybody.
LIGAND_FILE = os.path.join(os.path.dirname(__file__), 'assets/suv.pdb')
# TODO Implement image comparison tests - rendering occurs differently on each platform, so must use similarity
class TestMoleculeDepictor(TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def test_get_smiles(self):
response = self.app.get('/v1/depict/structure/smiles?val=c1ccccc1&debug=true')
self.assertEqual("200 OK", response.status)
def test_get_b64_smiles(self):
# Compress and encode
url_compressed = quote(compress_string('c1ccccc1'))
response = self.app.get('/v1/depict/structure/smiles?val={0}&debug=true&gz=true'.format(url_compressed))
self.assertEqual("200 OK", response.status)
def test_get_b64_pdb(self):
with open(LIGAND_FILE, 'r') as f:
ligand = f.read()
# Compress and encode
url_compressed = quote(compress_string(ligand))
response = self.app.get('/v1/depict/structure/pdb?val={0}&debug=true&gz=true'.format(url_compressed))
self.assertEqual("200 OK", response.status)
def test_invalid_file_format(self):
response = self.app.get('/v1/depict/structure/invalid?val=c1ccccc1&debug=true')
self.assertEqual("400 BAD REQUEST", response.status)
self.assertEqual('{"error": "Invalid molecule format: invalid"}', response.data.decode('utf-8'))
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/test/test_moleculeDepictor.py",
"copies": "1",
"size": "2754",
"license": "apache-2.0",
"hash": 2083266917536613400,
"line_mean": 39.5,
"line_max": 112,
"alpha_frac": 0.7171387073,
"autogenerated": false,
"ratio": 3.6867469879518073,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9898150306691238,
"avg_score": 0.0011470777121139312,
"num_lines": 68
} |
import json
from flask.ext.restful import Resource, request
from flask import Response
from openeye.oechem import *
from openeye.oedepict import *
from openeye.oegrapheme import *
from openeye.oedocking import *
from oemicroservices.resources.depict.base import depictor_base_arg_parser
from oemicroservices.common.functor import generate_ligand_functor
from oemicroservices.common.util import (
render_error_image,
get_image_mime_type,
get_color_from_rgba,
get_title_location,
read_molecule_from_string)
########################################################################################################################
# #
# Molecule depictor argument parser #
# #
########################################################################################################################
# Extend the standard image_parser
interaction_arg_parser = depictor_base_arg_parser.copy()
# The image width
interaction_arg_parser.add_argument('width', type=int, default=800, location='args')
# The image height
interaction_arg_parser.add_argument('height', type=int, default=600, location='args')
# Include a legend with the image
interaction_arg_parser.add_argument('legend', type=bool, default=True, location='args')
# Parameters for POST: Find the ligand
interaction_arg_parser.add_argument('chain', type=str, location='args') # Ligand chain ID
interaction_arg_parser.add_argument('resi', type=int, location='args') # Ligand residue number
interaction_arg_parser.add_argument('resn', type=str, location='args') # Ligand residue name
########################################################################################################################
# #
# Utility Functions #
# #
########################################################################################################################
def _render_image(receptor, ligand, args):
"""
Render a receptor-ligand interaction image
:param receptor: The receptor
:type receptor OEMol
:param ligand: The bound ligand
:type ligand: OEMol
:param args: The parsed URL query string dictionary
:type args: dict
:return: A Flask Response with the rendered image
:rtype: Response
"""
# *********************************************************************
# * Parse Parameters *
# *********************************************************************
width = args['width'] # Image width
height = args['height'] # Image height
title = args['title'] # Image title
use_molecule_title = bool(args['keeptitle']) # Use the molecule title in the molecule file
bond_scaling = bool(args['scalebonds']) # Bond width scales with size
image_format = args['format'] # The output image format
image_mimetype = get_image_mime_type(image_format) # MIME type corresponding to the image format
title_location = get_title_location(args['titleloc']) # The OpenEye title location (if we have a title)
legend = bool(args['legend']) # Display a legend
background = get_color_from_rgba(args['background']) # Background color
# Make sure we got valid inputs
if not image_mimetype:
raise Exception("Invalid MIME type")
if not title_location:
title_location = OETitleLocation_Top
# *********************************************************************
# * Create the Image *
# *********************************************************************
image = OEImage(width, height)
# Compute the image frame sizes
cwidth = width if legend == 0 else 0.80 * width
lwidth = width if legend == 0 else 0.20 * width
loffset = 0.0 if legend == 0 else 0.8 * width
cframe = OEImageFrame(image, cwidth, height, OE2DPoint(0.0, 0.0))
lframe = OEImageFrame(image, lwidth, height, OE2DPoint(loffset, 0.0))
# Prepare the depiction
opts = OE2DActiveSiteDisplayOptions(cframe.GetWidth(), cframe.GetHeight())
# Additional visualization options
opts.SetBondWidthScaling(bond_scaling)
opts.SetBackgroundColor(background)
# Perceive interactions
asite = OEFragmentNetwork(receptor, ligand)
if not asite.IsValid():
raise Exception("The active site is not valid")
# Add optional title
if title:
asite.SetTitle(title)
opts.SetTitleLocation(title_location)
elif use_molecule_title:
asite.SetTitle(ligand.GetTitle())
opts.SetTitleLocation(title_location)
else:
asite.SetTitle("")
opts.SetTitleLocation(OETitleLocation_Hidden)
# Add interactions
OEAddDockingInteractions(asite)
OEPrepareActiveSiteDepiction(asite)
# Render the active site
adisp = OE2DActiveSiteDisplay(asite, opts)
OERenderActiveSite(cframe, adisp)
# Render the legend
if args['legend'] != 0:
lopts = OE2DActiveSiteLegendDisplayOptions(10, 1)
OEDrawActiveSiteLegend(lframe, adisp, lopts)
# Respond with the image
img_content = OEWriteImageToString(image_format, image)
return Response(img_content, mimetype=image_mimetype)
########################################################################################################################
# #
# InteractionDepictor #
# Depict receptor-ligand interactions of pre-split molecules #
# #
# Expects a POST: #
# #
# { #
# ligand: { #
# value: A string that contains the ligand structure #
# format: The file format of the ligand string (e.g. sdf, pdb, oeb, etc.) #
# gz: If the ligand string is gzipped and then b64 encoded #
# }, #
# receptor: { #
# value: A string that contains the receptor structure #
# format: The file format of the receptor string (e.g. sdf, pdb, oeb, egc.) #
# gz: If the receptor string is gzipped and then b64 encoded #
# } #
# } #
# #
########################################################################################################################
class InteractionDepictor(Resource):
"""
Generate a receptor-ligand interaction map given a receptor and ligand in a JSON object
"""
def __init__(self):
# Call the superclass initializers
super(InteractionDepictor, self).__init__()
# noinspection PyMethodMayBeStatic
def __validate_schema(self, obj):
"""
Validate schema for JSON POST'ed to the resource
:param obj: The parsed JSON object
"""
# Check data
if not obj:
raise Exception("No POST data received")
if not isinstance(obj, dict):
raise Exception("Unexpected POST data received")
# Check ligand
if 'ligand' not in obj:
raise Exception("No ligand data provided in POST")
if 'value' not in obj['ligand']:
raise Exception("No value for ligand file provided in POST")
if 'format' not in obj['ligand']:
raise Exception("No format for ligand file provided in POST")
# Check receptor
if 'receptor' not in obj:
raise Exception("No receptor data provided in POST")
if 'value' not in obj['receptor']:
raise Exception("No value for receptor file provided in POST")
if 'format' not in obj['receptor']:
raise Exception("No format for receptor file provided in POST")
def post(self):
"""
Render JSON that has been POST'ed to this resource
:return: A Flask Response with the rendered image
:rtype: Response
"""
# Parse the query options
args = interaction_arg_parser.parse_args()
try:
# We exepct a JSON object in request.data with the protein and ligand data structures
payload = json.loads(request.data.decode("utf-8"))
self.__validate_schema(payload)
# Try to read the ligand from the payload
try:
ligand = read_molecule_from_string(
payload['ligand']['value'],
payload['ligand']['format'],
payload['ligand']['gz'] if 'gz' in payload['ligand'] else False,
args['reparse']
)
except Exception as ex:
message = "Error reading ligand"
if args['debug']:
message += ": {0}".format(str(ex))
raise Exception(message)
# Try to read the receptor from the payload
try:
receptor = read_molecule_from_string(
payload['receptor']['value'],
payload['receptor']['format'],
payload['receptor']['gz'] if 'gz' in payload['receptor'] else False,
args['reparse']
)
except Exception as ex:
message = "Error reading receptor"
if args['debug']:
message += ": {0}".format(str(ex))
raise Exception(message)
# Render the image
return _render_image(receptor, ligand, args)
# On error render a PNG with an error message
except Exception as ex:
if args['debug']:
return Response(json.dumps({"error": str(ex)}), status=400, mimetype='application/json')
else:
return render_error_image(args['width'], args['height'], str(ex))
########################################################################################################################
# #
# FindLigandInteractionDepictor #
# Finds the ligand within a complex and depicts the ligand-receptor interactions #
# #
# The POST is the raw ligand-receptor complex file string #
# #
########################################################################################################################
class FindLigandInteractionDepictor(Resource):
"""
Generate a receptor-ligand interaction map given a receptor and ligand in a JSON object
"""
def __init__(self):
# Call the superclass initializers
super(FindLigandInteractionDepictor, self).__init__()
# noinspection PyMethodMayBeStatic
def post(self, fmt):
"""
Render a raw receptor-ligand that has been POST'ed to this resource by first searching for the ligand
:return: A Flask Response with the rendered image
:rtype: Response
"""
# Parse the query options
args = interaction_arg_parser.parse_args()
try:
# Try to read the receptor-ligand complex
try:
mol = read_molecule_from_string(
request.data.decode("utf-8"),
fmt,
args['gz'],
args['reparse']
)
except Exception as ex:
message = "Error reading molecule file"
if args['debug']:
message += ": {0}".format(str(ex))
raise Exception(message)
# Generate the ligand selection functor
if args['chain'] or args['resi'] or args['resn']:
functor = generate_ligand_functor(args['chain'], args['resi'], args['resn'])
else:
raise Exception("No ligand selection options given")
# Split the ligand from the complex
ligand = OEGraphMol()
OESubsetMol(ligand, mol, functor, False, False)
# Check the ligand
if not ligand or ligand.NumAtoms() == 0:
raise Exception("No atoms matched ligand selection")
# Delete the ligand from the complex
for atom in mol.GetAtoms(functor):
mol.DeleteAtom(atom)
# Error check receptor
if not mol or mol.NumAtoms() == 0:
raise Exception("No atoms in receptor")
return _render_image(mol, ligand, args)
except Exception as ex:
if args['debug']:
return Response(json.dumps({"error": str(ex)}), status=400, mimetype='application/json')
else:
return render_error_image(args['width'], args['height'], str(ex))
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/resources/depict/interaction.py",
"copies": "1",
"size": "16191",
"license": "apache-2.0",
"hash": 8333972789464820000,
"line_mean": 47.915407855,
"line_max": 120,
"alpha_frac": 0.454882342,
"autogenerated": false,
"ratio": 5.167890201085222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6122772543085223,
"avg_score": null,
"num_lines": null
} |
import json
from flask.ext.restful import Resource, request
from flask import Response
from openeye.oechem import *
from openeye.oedepict import *
from oemicroservices.resources.depict.base import depictor_base_arg_parser
from oemicroservices.common.util import (
render_error_image,
get_image_mime_type,
get_color_from_rgba,
get_title_location,
get_highlight_style,
read_molecule_from_string)
########################################################################################################################
# #
# Molecule depictor argument parser #
# #
########################################################################################################################
# Extend the standard image_parser
depictor_arg_parser = depictor_base_arg_parser.copy()
# The image width
depictor_arg_parser.add_argument('width', type=int, default=400, location='args')
# The image height
depictor_arg_parser.add_argument('height', type=int, default=400, location='args')
# Substructure to highlight (multiple values allowed)
depictor_arg_parser.add_argument('highlight', type=str, action='append', location='args')
# Hex code for coloring the substructure
depictor_arg_parser.add_argument('highlightcolor', type=str, default='#7070FF', location='args')
# Style in which to render the highlighted substructure
depictor_arg_parser.add_argument('highlightstyle', type=str, default='default', location='args')
# Only for GET: the molecule string
depictor_arg_parser.add_argument('val', type=str, location='args')
########################################################################################################################
# #
# MoleculeDepictor #
# RESTful resource for depicting small molecules #
# #
########################################################################################################################
class MoleculeDepictor(Resource):
"""
Render a small molecule in 2D
"""
def __init__(self):
# Initialize superclass
super(MoleculeDepictor, self).__init__()
def get(self, fmt):
"""
Render an image with the molecule passed through the URL
:param fmt: The image format
:type fmt: str
:return: The rendered image
:rtype: Response
"""
# Parse the query options
args = depictor_arg_parser.parse_args()
try:
# Read the molecule
mol = read_molecule_from_string(args['val'], fmt, bool(args['gz']), bool(args['reparse']))
# Render the image
return self.__render_image(mol, args)
# On error render a PNG with an error message
except Exception as ex:
if args['debug']:
return Response(json.dumps({"error": str(ex)}), status=400, mimetype='application/json')
else:
return render_error_image(args['width'], args['height'], str(ex))
def post(self, fmt):
"""
Render an image with the molecule POST'ed to this resource
:param fmt: The molecule format
:type fmt: str
:return: The rendered image
:rtype: Response
"""
# Parse the query options
args = depictor_arg_parser.parse_args()
try:
# Read the molecule
mol = read_molecule_from_string(request.data.decode("utf-8"), fmt, bool(args['gz']), bool(args['reparse']))
# Render the image
return self.__render_image(mol, args)
# On error render a PNG with an error message
except Exception as ex:
if args['debug']:
return Response(json.dumps({"error": str(ex)}), status=400, mimetype='application/json')
else:
return render_error_image(args['width'], args['height'], str(ex))
# noinspection PyMethodMayBeStatic
def __render_image(self, mol, args):
"""
Render a small molecule image
:param mol: The molecule
:param args: The parsed URL query string dictionary
:return: A Flask Response with the rendered image
:rtype: Response
"""
# *********************************************************************
# * Parse Parameters *
# *********************************************************************
width = args['width'] # Image width
height = args['height'] # Image height
title = args['title'] # Image title
use_molecule_title = bool(args['keeptitle']) # Use the molecule title in the molecule file
bond_scaling = bool(args['scalebonds']) # Bond width scales with size
image_format = args['format'] # The output image format
image_mimetype = get_image_mime_type(image_format) # MIME type corresponding to the image format
highlight_style = get_highlight_style(args['highlightstyle']) # The substructure highlights style
title_location = get_title_location(args['titleloc']) # The title location (if we have a title)
highlight = args['highlight'] # SMARTS substructures to highlight
background = get_color_from_rgba(args['background']) # Background color
color = get_color_from_rgba(args['highlightcolor']) # Highlight color
# Make sure we got valid inputs
if not image_mimetype:
raise Exception("Invalid MIME type")
# Defaults for invalid inputs
if not highlight_style:
highlight_style = OEHighlightStyle_Default
if not title_location:
title_location = OETitleLocation_Top
# *********************************************************************
# * Create the Image *
# *********************************************************************
image = OEImage(width, height)
# Prepare the depiction
OEPrepareDepiction(mol, False, True)
opts = OE2DMolDisplayOptions(image.GetWidth(), image.GetHeight(), OEScale_AutoScale)
# If we provided a title
if title:
mol.SetTitle(title)
opts.SetTitleLocation(title_location)
# Else hide if we didn't provide a title and we're *not* using the molecule title
elif not use_molecule_title:
mol.SetTitle("")
opts.SetTitleLocation(OETitleLocation_Hidden)
# Other configuration options
opts.SetBondWidthScaling(bond_scaling)
opts.SetBackgroundColor(background)
# Prepare the display
disp = OE2DMolDisplay(mol, opts)
# Do any substructure matching
if highlight:
for querySmiles in highlight:
subs = OESubSearch(querySmiles)
for match in subs.Match(mol, True):
OEAddHighlighting(disp, color, highlight_style, match)
# Render the image
OERenderMolecule(image, disp)
# Return the image in the response
img_content = OEWriteImageToString(image_format, image)
return Response(img_content, mimetype=image_mimetype)
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/resources/depict/molecule.py",
"copies": "1",
"size": "8982",
"license": "apache-2.0",
"hash": -4955658497356552000,
"line_mean": 45.78125,
"line_max": 120,
"alpha_frac": 0.5160320641,
"autogenerated": false,
"ratio": 4.965174129353234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5981206193453235,
"avg_score": null,
"num_lines": null
} |
import json
# noinspection PyUnresolvedReferences
import sys
from flask.ext.restful import Resource, request
from flask import Response
from openeye.oechem import *
from oemicroservices.common.util import compress_string, read_molecule_from_string
############################
# Python 2/3 Compatibility #
############################
# To support unicode as UTF-8 in Python 2 and 3
if sys.version_info < (3,):
def to_utf8(u):
return u.encode('utf-8')
else:
def to_utf8(u):
return u
########################################################################################################################
# #
# MoleculeConvert #
# Convert between molecule file formats #
# #
# Expects a POST: #
# #
# { #
# molecule: { #
# value: A string that contains the molecule file string (*Required) #
# input : { #
# format: The file format of the input molecule string (e.g. sdf, pdb, oeb, etc.) (*Required) #
# gz: If the input molecule string is gzip + b64 encoded #
# }, #
# output: { #
# format: The file format of the output molecule string (e.g. sdf, pdb, oeb, etc.) (*Required) #
# gz: If the output molecule string should be gzip + b64 encoded #
# } #
# } #
# } #
# #
# Returns the following: #
# #
# { #
# molecule: { #
# value: A string containing the output molecule file string #
# format: The output file format #
# gz: If the output molecule string is gzip + b64 encoded #
# } #
# } #
########################################################################################################################
class MoleculeConvert(Resource):
"""
Convert between molecule file formats
"""
def __init__(self):
# Initialize superclass
super(MoleculeConvert, self).__init__()
# noinspection PyMethodMayBeStatic
def __validate_schema(self, obj):
"""
Validate schema for JSON POST'ed to the resource
:param obj: The parsed JSON object POST'ed to this resource
"""
if not obj:
raise Exception("No POST data received")
if not isinstance(obj, dict):
raise Exception("Unexpected POST data received")
if 'molecule' not in obj:
raise Exception("No molecule information provided")
if 'value' not in obj['molecule']:
raise Exception("No molecule file provided")
if 'input' not in obj['molecule']:
raise Exception("No input information provided")
if 'format' not in obj['molecule']['input']:
raise Exception("No input format provided")
if 'output' not in obj['molecule']:
raise Exception("No output information provided")
if 'format' not in obj['molecule']['output']:
raise Exception("No output format provided")
def post(self):
"""
Convert a molecule to another file format
:return: A Flask Response with the rendered image
:rtype: Response
"""
# Parse the query options
try:
# We exepct a JSON object in request.data with the protein and ligand data structures
payload = json.loads(request.data.decode("utf-8"))
# Checks to make sure we have everything we need in payload
self.__validate_schema(payload)
# Read the molecule
mol = read_molecule_from_string(
payload['molecule']['value'],
payload['molecule']['input']['format'],
payload['molecule']['input']['gz'] if 'gz' in payload['molecule']['input'] else False,
payload['molecule']['input']['reparse'] if 'reparse' in payload['molecule']['input'] else False
)
# Prepare the molecule for writing
ofs = oemolostream()
if payload['molecule']['output']['format'] == "smiles":
ofs_format = OEFormat_SMI
else:
ofs_format = OEGetFileType(to_utf8(payload['molecule']['output']['format']))
if ofs_format == OEFormat_UNDEFINED:
raise Exception("Unknown output file type: " + payload['molecule']['output']['format'])
ofs.SetFormat(ofs_format)
ofs.openstring()
OEWriteMolecule(ofs, mol)
# Get molecule output stream
if 'gz' in payload['molecule']['output'] and payload['molecule']['output']['gz']:
output = compress_string(ofs.GetString().decode('utf-8'))
else:
output = ofs.GetString().decode('utf-8')
return Response(json.dumps(
{
'molecule': {
'value': output,
'format': payload['molecule']['output']['format'],
'gz': payload['molecule']['output']['gz'] if 'gz' in payload['molecule']['output'] else False
}
}
), status=200, mimetype='application/json')
except Exception as ex:
return Response(json.dumps({"error": str(ex)}), status=400, mimetype='application/json')
| {
"repo_name": "OpenEye-Contrib/OEMicroservices",
"path": "oemicroservices/resources/convert/convert.py",
"copies": "1",
"size": "8652",
"license": "apache-2.0",
"hash": 6899156891277649000,
"line_mean": 53.7594936709,
"line_max": 120,
"alpha_frac": 0.3973647712,
"autogenerated": false,
"ratio": 6.016689847009736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6914054618209735,
"avg_score": null,
"num_lines": null
} |
# Apache License 2.0
import sys
import os
import glob
import verify
class VerifyAll:
def verify_file(self, file, log):
class ErrorCounter:
def __init__(self):
self.count = 0
def error_handler(self, label, msg):
self.count += 1
return False
error_counter = ErrorCounter()
urlctx = verify.UrlCtx('', file, sys.stdin)
ctx = verify.VerifyCtx(urlctx, verify.run, error_counter.error_handler)
if file == '-':
url = file
else:
url = 'file://' + file
ctx.run(urlctx, url, ctx, log)
if error_counter.count != 0:
print('Found errors in {}'.format(file))
return False
else:
return True
def verify(self):
successes = 0
logname = 'verify.log'
with open(logname, 'w') as log:
print('Writing log to {}'.format(logname))
for root, dirs, files in os.walk('.'):
for file in glob.iglob(os.path.join(root, '*.gh')):
log.write('\nVerifying {}\n'.format(file))
if self.verify_file(file, log):
successes += 1
log.write('\nverify_all done\n')
print('{} files successfully verified'.format(successes))
if __name__ == '__main__':
if len(sys.argv) != 1:
print("""
Usage: python verify_all.py
This will verify all .gh files in the current directory and subdirectories.
""")
sys.exit(1)
VerifyAll().verify()
| {
"repo_name": "kryptine/ghilbert",
"path": "verify_all.py",
"copies": "2",
"size": "1574",
"license": "apache-2.0",
"hash": 7963067282689203000,
"line_mean": 26.6140350877,
"line_max": 79,
"alpha_frac": 0.5235069886,
"autogenerated": false,
"ratio": 4.077720207253886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01958366192480891,
"num_lines": 57
} |
''' Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 Luigi Capoferri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import pybel as pb
import requests
import json
import urllib2
import re
import sys
from operator import itemgetter
def createdict(filein):
with open(filein) as db:
dtp=False
uid=False
mol=[]
names=[]
for lineraw in db:
line=lineraw.strip()
if line=='> <DTP names>':
dtp=True
continue
if dtp==True and line=='':
dtp=False
if dtp==True:
names.append(line.upper())
if line=='> <Standard InChIKey>':
uid=True
continue
if uid==True:
id=line.replace('InChIKey=', '')
uid=False
if line=="$$$$":
mol.append([id,names])
names=[]
return mol
def listmols(filein):
mols=[]
with open(filein) as inputfn:
cpdid=1
for line in inputfn:
line=line.rstrip()
if line[0]=="#":
header=line[1:].split(";")
print header
else:
mol={}
mol["cpdid"]=cpdid
for n,item in enumerate(line.split(";")):
mol[header[n]]=item
#filter references in the name of molecule
p = re.compile( '\s\[{0,1}[0-9]{1,4}[a-z]{0,1}\]{0,1},{0,1}')
mol['Substance']=p.sub('',mol['Substance'])
#filter double entries
try:
oldn=map(itemgetter('Substance'), mols).index(mol['Substance'])
# print "oldn ", oldn
mols[oldn]['Reaction']+=";"+mol['Reaction']
except ValueError:
# print mol
mols.append(mol)
cpdid+=1
#((re.sub(r'[^\x00-\x7F]+|\s','', line).strip()).lower())
return mols
def getUID(listname):
sdf=[]
notfound=[]
for drug in listname:
print drug['Substance']
found=False
for name in re.split(', | ',drug['Substance']): #drug['Substance'].split(', '):
if found==True:
break
else:
try:
id = urllib2.urlopen("http://cactus.nci.nih.gov/chemical/structure/%s/stdinchikey" % name.lower()).read().replace('InChIKey=', '')
struct = urllib2.urlopen("http://cactus.nci.nih.gov/chemical/structure/%s/sdf"%name.lower())
struct = struct.read()
# print "http://cactus.nci.nih.gov/chemical/structure/%s/stdinchikey" % drug
# id= [x for x,y in dictmols if drug in y][0]
found=True
idnci=name.lower()
except urllib2.HTTPError:
# print "InChIKey not found"
# print "http://cactus.nci.nih.gov/chemical/structure/%s/stdinchikey" % drug
continue
# print id
if found==True:
mol=pb.readstring("sdf", struct)
#print struct
# mol.make3D()
mol.data["InChIKey"]=id
mol.data["Retrieved"]=idnci
mol.data.update(drug)
datachembl=getinfochembl(id)
if datachembl:
mol.data.update(datachembl)
sdf.append(mol)
else:
notfound.append(drug)
print "not found on NCI"
return sdf,notfound
#def getdoi(reference):
def getinfochembl(id):
try:
data = json.loads(requests.get("http://www.ebi.ac.uk/chemblws/compounds/stdinchikey/%s.json" % id).content)
# print data["compound"]["smiles"]
mol={}
mol["smi"]=data["compound"]["smiles"]
# mol.draw(show=False,filename=None,usecoords=False,update=True)
mol["ChEMBLId"]=data["compound"]["chemblId"]
# print mol.data.keys()
data = json.loads(requests.get("http://www.ebi.ac.uk/chemblws/compounds/%s/bioactivities.json" % mol["ChEMBLId"]).content)
nref=1
for reference in data['bioactivities']:
# print reference['target_name']
if reference['target_chemblid']=="CHEMBL3356": # Cytochrome P450 1A2
# print reference
# if reference['reference']!="Unspecified":
# getdoi(reference['reference'])
mol['reference'+str(nref)]=reference['reference']
nref+=1
# for i in mol.data:
# print mol.data[i]
# mol.data['SOM']=''
return mol
except ValueError:
print "compound not found on ChEMBL"
def getassaychembl(idtarget,maxassay):
#try:
infoTarget=json.loads(requests.get("https://www.ebi.ac.uk/chemblws/targets/CHEMBL%s.json" % idtarget).content)
print ("Searching activity data for:")
print ('Target: %s (%s)'%(infoTarget['target']['description'],infoTarget['target']['chemblId']))
print ("Synonyms: %s"%infoTarget['target']['synonyms'])
print ("Total %s bioactivity for %s compounds"%(infoTarget['target']['compoundCount'],infoTarget['target']['bioactivityCount']))
data = json.loads(requests.get("https://www.ebi.ac.uk/chemblws/targets/CHEMBL%s/bioactivities.json" % idtarget).content)
assays=sorted([int(x['assay_chemblid'].replace('CHEMBL','')) for x in data['bioactivities']])
statsass=[]
for item in list(set(assays)):
statsass.append([item, len([x for x in assays if x==item])])
statsass.sort(key=lambda x: x[1], reverse=True)
print "%10s%10s %10s"%('n data','chemblID','Description')
for i in range(0,maxassay):
idass=statsass[i][0]
infoass=json.loads(requests.get("https://www.ebi.ac.uk/chemblws/assays/CHEMBL%d.json" % idass).content)
print "%10d%10d "%(statsass[i][1],statsass[i][0]),infoass["assay"]['assayDescription']
return
def getmol(chemblid):
cpdinfo=json.loads(requests.get("https://www.ebi.ac.uk/chemblws/compounds/CHEMBL%d.json"%chemblid).content)
if 'stdInChiKey' in cpdinfo['compound']:
inchi=cpdinfo['compound']['stdInChiKey']
else:
return None
try:
struct = urllib2.urlopen("http://cactus.nci.nih.gov/chemical/structure/%s/sdf"%inchi)
except Exception,e:
print "%s NOT FOUND: %s"%(inchi,str(e))
return None
mol=pb.readstring("sdf", struct.read())
if 'preferredCompoundName' in cpdinfo['compound']:
mol.data['Name']=cpdinfo['compound']['preferredCompoundName']
print mol.data['Name']
else:
print "CHEMBL%d"%chemblid
return mol
def getcpdfromassaychembl(idtarget,idassay):
#try:
print "****** Receptor Information ********"
infoTarget=json.loads(requests.get("https://www.ebi.ac.uk/chemblws/targets/CHEMBL%s.json" % idtarget).content)
print ("Searching activity data for:")
print ('Target: %s (%s)'%(infoTarget['target']['description'],infoTarget['target']['chemblId']))
print ("Synonyms: %s"%infoTarget['target']['synonyms'])
print "****** Assay Information ********"
infoass=json.loads(requests.get("https://www.ebi.ac.uk/chemblws/assays/CHEMBL%d.json" % idassay).content)
for item in infoass['assay']:
print "%s: %s"%(item, infoass['assay'][item])
print "GETTING COMPOUNDS"
data = json.loads(requests.get("https://www.ebi.ac.uk/chemblws/targets/CHEMBL%s/bioactivities.json" % idtarget).content)
sdf=[]
for item in data['bioactivities']:
if item[u'assay_chemblid']=='CHEMBL%d'%idassay:
chemblid=int(item['ingredient_cmpd_chemblid'].replace('CHEMBL',''))
cpd=getmol(chemblid)
if cpd==None:
continue
for info in item:
cpd.data[str(info)]=str(item[info])
sdf.append(cpd)
print ("Found data for %d compounds"%(len(sdf)))
return sdf
parser = argparse.ArgumentParser(description='Collect molecule informations from chembl server, starting from common names')
parser.add_argument('-t', required=True, dest='target', help='chembl ID target')
parser.add_argument('-a', required=False, dest='assay', help='chembl ID assay', default=None)
parser.add_argument('-na', required=False, dest='nassay', help='max number of assays to show', default=10, type=int)
args = parser.parse_args()
target=args.target
assay=args.assay
nassay=args.nassay
if assay==None:
getassaychembl(target,nassay)
sys.exit(0)
else:
assay=int(assay)
sdf=getcpdfromassaychembl(target,assay)
print "%d Molecules found"%len(sdf)
outSDfile = pb.Outputfile("sdf", "listcpd.sdf",overwrite=True)
for mol in sdf:
# if 'Name' in mol.data:
# print mol.data["Name"]
# else:
# print mol.data["ingredient_cmpd_chemblid"]
outSDfile.write(mol)
outSDfile.close()
| {
"repo_name": "ihateprog/Test",
"path": "getbiochembl.py",
"copies": "1",
"size": "20284",
"license": "apache-2.0",
"hash": -7155220196167023000,
"line_mean": 43.8761061947,
"line_max": 150,
"alpha_frac": 0.6580063104,
"autogenerated": false,
"ratio": 4.0406374501992035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5198643760599204,
"avg_score": null,
"num_lines": null
} |
#Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
varName = 'pr'
gridLonStep=0.44
gridLatStep=0.44
#needed vars for the script
target_datasets =[]
tSeries =[]
results =[]
labels =[] # could just as easily b the names for each subregion
region_counter = 0
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing datasets so they are the same shape ... """
print("Processing datasets so they are the same shape")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#find climatology monthly for obs and models
CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member].values, target_datasets[member].times = utils.calc_climatology_monthly(target_datasets[member])
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
""" Step 4: Subregion stuff """
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3,-10.2,12.0, 20.0),
Bounds( 15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0,-10.0, 0.0),
Bounds(30.0, 40.0,-15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.0)]
region_list=[["R"+str(i+1)] for i in xrange(13)]
for regions in region_list:
firstTime = True
subset_name = regions[0]+"_CRU31"
#labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(list_of_regions[region_counter], CRU31, subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries=[]
firstTime = False
for member, each_target_dataset in enumerate(target_datasets):
subset_name = regions[0]+"_"+target_datasets[member].name
#labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(list_of_regions[region_counter],target_datasets[member],subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries=[]
plotter.draw_time_series(np.array(results), CRU31.times, labels, regions[0], ptitle=regions[0],fmt='png')
results =[]
tSeries =[]
labels =[]
region_counter+=1
| {
"repo_name": "MJJoyce/climate",
"path": "examples/time_series_with_regions.py",
"copies": "2",
"size": "5020",
"license": "apache-2.0",
"hash": -8047421824674275000,
"line_mean": 33.6206896552,
"line_max": 120,
"alpha_frac": 0.7217131474,
"autogenerated": false,
"ratio": 2.7193932827735643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9193832697141532,
"avg_score": 0.04945474660640648,
"num_lines": 145
} |
# Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(lat_min=LAT_MIN, lat_max=LAT_MAX,
lon_min=LON_MIN, lon_max=LON_MAX, start=START, end=END)
varName = 'pr'
gridLonStep = 0.44
gridLatStep = 0.44
# needed vars for the script
target_datasets = []
tSeries = []
results = []
labels = [] # could just as easily b the names for each subregion
region_counter = 0
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(
10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing datasets so they are the same shape ... """
print("Processing datasets so they are the same shape")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[
member])
target_datasets[member] = dsp.normalize_dataset_datetimes(
target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(
target_datasets[member], new_lats, new_lons)
# find climatology monthly for obs and models
CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member].values, target_datasets[
member].times = utils.calc_climatology_monthly(target_datasets[member])
# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"
# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
""" Step 4: Subregion stuff """
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3, -10.2, 12.0, 20.0),
Bounds(15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0, -10.0, 0.0),
Bounds(30.0, 40.0, -15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.0)]
region_list = [["R" + str(i + 1)] for i in xrange(13)]
for regions in region_list:
firstTime = True
subset_name = regions[0] + "_CRU31"
# labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(CRU31, list_of_regions[region_counter], subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries = []
firstTime = False
for member, each_target_dataset in enumerate(target_datasets):
subset_name = regions[0] + "_" + target_datasets[member].name
# labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(target_datasets[member],
list_of_regions[region_counter],
subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries = []
plotter.draw_time_series(np.array(results), CRU31.times, labels, regions[
0], ptitle=regions[0], fmt='png')
results = []
tSeries = []
labels = []
region_counter += 1
| {
"repo_name": "huikyole/climate",
"path": "examples/time_series_with_regions.py",
"copies": "2",
"size": "5382",
"license": "apache-2.0",
"hash": -5687372764472458000,
"line_mean": 34.642384106,
"line_max": 80,
"alpha_frac": 0.6791155704,
"autogenerated": false,
"ratio": 2.9170731707317072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45961887411317076,
"avg_score": null,
"num_lines": null
} |
#Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
varName = 'pr'
gridLonStep=0.44
gridLatStep=0.44
#needed vars for the script
target_datasets =[]
tSeries =[]
results =[]
labels =[] # could just as easily b the names for each subregion
region_counter = 0
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing datasets so they are the same shape ... """
print("Processing datasets so they are the same shape")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#find climatology monthly for obs and models
CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member].values, target_datasets[member].times = utils.calc_climatology_monthly(target_datasets[member])
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
""" Step 4: Subregion stuff """
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3,-10.2,12.0, 20.0),
Bounds( 15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0,-10.0, 0.0),
Bounds(30.0, 40.0,-15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.0)]
region_list=[["R"+str(i+1)] for i in xrange(13)]
for regions in region_list:
firstTime = True
subset_name = regions[0]+"_CRU31"
#labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(list_of_regions[region_counter], CRU31, subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries=[]
firstTime = False
for member, each_target_dataset in enumerate(target_datasets):
subset_name = regions[0]+"_"+target_datasets[member].name
#labels.append(subset_name) #for legend, uncomment this line
subset = dsp.subset(list_of_regions[region_counter],target_datasets[member],subset_name)
tSeries = utils.calc_time_series(subset)
results.append(tSeries)
tSeries=[]
plotter.draw_time_series(np.array(results), CRU31.times, labels, regions[0], ptitle=regions[0],fmt='png')
results =[]
tSeries =[]
labels =[]
region_counter+=1
| {
"repo_name": "riverma/climate",
"path": "examples/time_series_with_regions.py",
"copies": "2",
"size": "4892",
"license": "apache-2.0",
"hash": -8943550389226853000,
"line_mean": 33.695035461,
"line_max": 120,
"alpha_frac": 0.7205641864,
"autogenerated": false,
"ratio": 2.7027624309392264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4423326617339226,
"avg_score": null,
"num_lines": null
} |
# Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import urllib
from os import path
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "pr_africa_taylor"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(lat_min=LAT_MIN, lat_max=LAT_MAX,
lon_min=LON_MIN, lon_max=LON_MAX, start=START, end=END)
# variable that we are analyzing
varName = 'pr'
# regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
# some vars for this evaluation
target_datasets_ensemble = []
target_datasets = []
ref_datasets = []
# Download necessary NetCDF file if not present
if path.exists(FILE_1):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if path.exists(FILE_2):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if path.exists(FILE_3):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGM3"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(
10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Resample Datasets so they are the same shape """
print("Resampling datasets ...")
print("... on units")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
print("... temporal")
CRU31 = dsp.temporal_rebin(CRU31, temporal_resolution='monthly')
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[
member])
target_datasets[member] = dsp.temporal_rebin(
target_datasets[member], temporal_resolution='monthly')
target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
# Regrid
print("... regrid")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(
target_datasets[member], new_lats, new_lons)
# find the mean values
# way to get the mean. Note the function exists in util.py as def
# calc_climatology_year(dataset):
CRU31.values = utils.calc_temporal_mean(CRU31)
# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"
# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member].values = utils.calc_temporal_mean(target_datasets[
member])
allNames = []
for target in target_datasets:
allNames.append(target.name)
# calculate the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for
# the evaluation
target_datasets,
# 1 or more metrics to use in
# the evaluation
[taylor_diagram]) # , mean_bias,spatial_std_dev_ratio, pattern_correlation])
RCMs_to_CRU_evaluation.run()
taylor_data = RCMs_to_CRU_evaluation.results[0]
plotter.draw_taylor_diagram(taylor_data,
allNames,
"CRU31",
fname=OUTPUT_PLOT,
fmt='png',
frameon=False)
| {
"repo_name": "jarifibrahim/climate",
"path": "examples/multi_model_taylor_diagram.py",
"copies": "2",
"size": "5106",
"license": "apache-2.0",
"hash": 9039999506548922000,
"line_mean": 33.7346938776,
"line_max": 124,
"alpha_frac": 0.6627497062,
"autogenerated": false,
"ratio": 3.2920696324951644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49548193386951644,
"avg_score": null,
"num_lines": null
} |
#Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
from os import path
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "pr_africa_taylor"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
#variable that we are analyzing
varName = 'pr'
#regridding parameters
gridLonStep=0.5
gridLatStep=0.5
#some vars for this evaluation
target_datasets_ensemble=[]
target_datasets =[]
ref_datasets =[]
# Download necessary NetCDF file if not present
if path.exists(FILE_1):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if path.exists(FILE_2):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if path.exists(FILE_3):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGM3"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Resample Datasets so they are the same shape """
print("Resampling datasets ...")
print("... on units")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
print("... temporal")
CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30))
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
#Regrid
print("... regrid")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#find the mean values
#way to get the mean. Note the function exists in util.py as def calc_climatology_year(dataset):
CRU31.values,_ = utils.calc_climatology_year(CRU31)
CRU31.values = np.expand_dims(CRU31.values, axis=0)
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member].values,_ = utils.calc_climatology_year(target_datasets[member])
target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)
allNames =[]
for target in target_datasets:
allNames.append(target.name)
#calculate the metrics
pattern_correlation = metrics.PatternCorrelation()
spatial_std_dev = metrics.StdDevRatio()
#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for the evaluation
target_datasets,
# 1 or more metrics to use in the evaluation
[spatial_std_dev, pattern_correlation])#, mean_bias,spatial_std_dev_ratio, pattern_correlation])
RCMs_to_CRU_evaluation.run()
rcm_std_dev = [results[0] for results in RCMs_to_CRU_evaluation.results]
rcm_pat_cor = [results[1] for results in RCMs_to_CRU_evaluation.results]
taylor_data = np.array([rcm_std_dev, rcm_pat_cor]).transpose()
new_taylor_data = np.squeeze(np.array(taylor_data))
plotter.draw_taylor_diagram(new_taylor_data,
allNames,
"CRU31",
fname=OUTPUT_PLOT,
fmt='png',
frameon=False)
| {
"repo_name": "pwcberry/climate",
"path": "examples/multi_model_taylor_diagram.py",
"copies": "2",
"size": "5025",
"license": "apache-2.0",
"hash": 2719667160667418600,
"line_mean": 33.8958333333,
"line_max": 135,
"alpha_frac": 0.7064676617,
"autogenerated": false,
"ratio": 3.043609933373713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9552533723717882,
"avg_score": 0.039508774271166086,
"num_lines": 144
} |
#Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "portrait_diagram"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
#variable that we are analyzing
varName = 'pr'
#regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
#some vars for this evaluation
target_datasets_ensemble = []
target_datasets = []
allNames = []
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing Datasets so they are the same shape """
print("Processing datasets ...")
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
print("... on units")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#find the total annual mean. Note the function exists in util.py as def calc_climatology_year(dataset):
_,CRU31.values = utils.calc_climatology_year(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
_, target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for target in target_datasets:
allNames.append(target.name)
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3,-10.2,12.0, 20.0),
Bounds( 15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0,-10.0, 0.0),
Bounds(30.0, 40.0,-15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.00)]
region_list=["R"+str(i+1) for i in xrange(13)]
#metrics
pattern_correlation = metrics.PatternCorrelation()
#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for the evaluation
target_datasets,
# 1 or more metrics to use in the evaluation
[pattern_correlation],
# list of subregion Bounds Objects
list_of_regions)
RCMs_to_CRU_evaluation.run()
new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)
plotter.draw_portrait_diagram(np.transpose(new_patcor),allNames, region_list, fname=OUTPUT_PLOT, fmt='png', cmap='coolwarm_r')
| {
"repo_name": "MBoustani/climate",
"path": "examples/subregions_portrait_diagram.py",
"copies": "2",
"size": "5313",
"license": "apache-2.0",
"hash": -3772879987851073500,
"line_mean": 35.4154929577,
"line_max": 126,
"alpha_frac": 0.6802183324,
"autogenerated": false,
"ratio": 2.854916711445459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.942191208214376,
"avg_score": 0.022644592340340053,
"num_lines": 142
} |
# Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "portrait_diagram"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
# variable that we are analyzing
varName = 'pr'
# regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
# some vars for this evaluation
target_datasets_ensemble = []
target_datasets = []
allNames = []
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(
10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing Datasets so they are the same shape """
print("Processing datasets ...")
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
print("... on units")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[
member])
target_datasets[member] = dsp.normalize_dataset_datetimes(
target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(
target_datasets[member], new_lats, new_lons)
# find the total annual mean. Note the function exists in util.py as def
# calc_climatology_year(dataset):
_, CRU31.values = utils.calc_climatology_year(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
_, target_datasets[member].values = utils.calc_climatology_year(target_datasets[
member])
# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"
# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for target in target_datasets:
allNames.append(target.name)
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3, -10.2, 12.0, 20.0),
Bounds(15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0, -10.0, 0.0),
Bounds(30.0, 40.0, -15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.00)]
region_list = ["R" + str(i + 1) for i in xrange(13)]
# metrics
pattern_correlation = metrics.PatternCorrelation()
# create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for
# the evaluation
target_datasets,
# 1 or more metrics to use in
# the evaluation
[pattern_correlation],
# list of subregion Bounds
# Objects
list_of_regions)
RCMs_to_CRU_evaluation.run()
new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)
plotter.draw_portrait_diagram(np.transpose(
new_patcor), allNames, region_list, fname=OUTPUT_PLOT, fmt='png', cmap='coolwarm_r')
| {
"repo_name": "jarifibrahim/climate",
"path": "examples/subregions_portrait_diagram.py",
"copies": "2",
"size": "5713",
"license": "apache-2.0",
"hash": 3883286699853921300,
"line_mean": 36.3422818792,
"line_max": 93,
"alpha_frac": 0.6325923333,
"autogenerated": false,
"ratio": 3.0682062298603654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700798563160365,
"avg_score": null,
"num_lines": null
} |
#Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
from os import path
import urllib
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "portrait_diagram"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 01, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
#variable that we are analyzing
varName = 'pr'
#regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
#some vars for this evaluation
target_datasets_ensemble = []
target_datasets = []
allNames = []
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Processing Datasets so they are the same shape """
print("Processing datasets ...")
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
print("... on units")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')
print("... spatial regridding")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#find the total annual mean. Note the function exists in util.py as def calc_climatology_year(dataset):
_,CRU31.values = utils.calc_climatology_year(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
_, target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for target in target_datasets:
allNames.append(target.name)
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5),
Bounds(0.0, 10.0, 29.0, 37.5),
Bounds(10.0, 20.0, 25.0, 32.5),
Bounds(20.0, 33.0, 25.0, 32.5),
Bounds(-19.3,-10.2,12.0, 20.0),
Bounds( 15.0, 30.0, 15.0, 25.0),
Bounds(-10.0, 10.0, 7.3, 15.0),
Bounds(-10.9, 10.0, 5.0, 7.3),
Bounds(33.9, 40.0, 6.9, 15.0),
Bounds(10.0, 25.0, 0.0, 10.0),
Bounds(10.0, 25.0,-10.0, 0.0),
Bounds(30.0, 40.0,-15.0, 0.0),
Bounds(33.0, 40.0, 25.0, 35.00)]
region_list=["R"+str(i+1) for i in xrange(13)]
#metrics
pattern_correlation = metrics.PatternCorrelation()
#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for the evaluation
target_datasets,
# 1 or more metrics to use in the evaluation
[pattern_correlation],
# list of subregion Bounds Objects
list_of_regions)
RCMs_to_CRU_evaluation.run()
new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)
plotter.draw_portrait_diagram(new_patcor,allNames, region_list, fname=OUTPUT_PLOT, fmt='png', cmap='coolwarm_r')
| {
"repo_name": "pwcberry/climate",
"path": "examples/subregions_portrait_diagram.py",
"copies": "2",
"size": "5162",
"license": "apache-2.0",
"hash": -1179751274766546700,
"line_mean": 35.1366906475,
"line_max": 112,
"alpha_frac": 0.6778380473,
"autogenerated": false,
"ratio": 2.8393839383938393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4517221985693839,
"avg_score": null,
"num_lines": null
} |
# Apache OCW lib immports
from ocw.dataset import Dataset, Bounds
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import datetime
import numpy as np
import numpy.ma as ma
OUTPUT_PLOT = "subregions"
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START_SUB = datetime.datetime(2000, 01, 1)
END_SUB = datetime.datetime(2007, 12, 31)
# regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
# Regrid
print("... regrid")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
list_of_regions = [
Bounds(-10.0, 0.0, 29.0, 36.5, START_SUB, END_SUB),
Bounds(0.0, 10.0, 29.0, 37.5, START_SUB, END_SUB),
Bounds(10.0, 20.0, 25.0, 32.5, START_SUB, END_SUB),
Bounds(20.0, 33.0, 25.0, 32.5, START_SUB, END_SUB),
Bounds(-19.3, -10.2, 12.0, 20.0, START_SUB, END_SUB),
Bounds(15.0, 30.0, 15.0, 25.0, START_SUB, END_SUB),
Bounds(-10.0, 10.0, 7.3, 15.0, START_SUB, END_SUB),
Bounds(-10.9, 10.0, 5.0, 7.3, START_SUB, END_SUB),
Bounds(33.9, 40.0, 6.9, 15.0, START_SUB, END_SUB),
Bounds(10.0, 25.0, 0.0, 10.0, START_SUB, END_SUB),
Bounds(10.0, 25.0, -10.0, 0.0, START_SUB, END_SUB),
Bounds(30.0, 40.0, -15.0, 0.0, START_SUB, END_SUB),
Bounds(33.0, 40.0, 25.0, 35.0, START_SUB, END_SUB)]
# for plotting the subregions
plotter.draw_subregions(list_of_regions, new_lats,
new_lons, OUTPUT_PLOT, fmt='png')
| {
"repo_name": "jarifibrahim/climate",
"path": "examples/subregions_rectangular_boundaries.py",
"copies": "2",
"size": "1712",
"license": "apache-2.0",
"hash": 8874012017345808000,
"line_mean": 31.568627451,
"line_max": 57,
"alpha_frac": 0.636682243,
"autogenerated": false,
"ratio": 2.371191135734072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4007873378734072,
"avg_score": null,
"num_lines": null
} |
#Apache OCW lib immports
import ocw.data_source.local as local
import ocw.plotter as plotter
import ocw.utils as utils
from ocw.evaluation import Evaluation
import ocw.metrics as metrics
# Python libraries
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib import rcParams
from matplotlib.patches import Polygon
import string
def Map_plot_bias_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name, row, column):
'''Draw maps of observed multi-year climatology and biases of models"'''
# calculate climatology of observation data
obs_clim = utils.calc_temporal_mean(obs_dataset)
# determine the metrics
map_of_bias = metrics.TemporalMeanBias()
# create the Evaluation object
bias_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[map_of_bias, map_of_bias])
# run the evaluation (bias calculation)
bias_evaluation.run()
rcm_bias = bias_evaluation.results[0]
fig = plt.figure()
lat_min = obs_dataset.lats.min()
lat_max = obs_dataset.lats.max()
lon_min = obs_dataset.lons.min()
lon_max = obs_dataset.lons.max()
string_list = list(string.ascii_lowercase)
ax = fig.add_subplot(row,column,1)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)
x,y = m(lons, lats)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,obs_clim,levels = plotter._nice_intervals(obs_dataset.values, 10), extend='both',cmap='PuOr')
ax.annotate('(a) \n' + obs_name,xy=(lon_min, lat_min))
cax = fig.add_axes([0.02, 1.-float(1./row), 0.01, 1./row*0.6])
plt.colorbar(max, cax = cax)
clevs = plotter._nice_intervals(rcm_bias, 11)
for imodel in np.arange(len(model_datasets)):
ax = fig.add_subplot(row, column,2+imodel)
m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawstates(linewidth=0.5, color='w')
max = m.contourf(x,y,rcm_bias[imodel,:],levels = clevs, extend='both', cmap='RdBu_r')
ax.annotate('('+string_list[imodel+1]+') \n '+model_names[imodel],xy=(lon_min, lat_min))
cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
plt.colorbar(max, cax = cax)
plt.subplots_adjust(hspace=0.01,wspace=0.05)
plt.show()
fig.savefig(file_name,dpi=600,bbox_inches='tight')
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
file_name):
# calculate climatological mean fields
obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
for dataset in model_datasets:
dataset.values = utils.calc_temporal_mean(dataset)
# Metrics (spatial standard deviation and pattern correlation)
# determine the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()
# create the Evaluation object
taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
model_datasets, # list of target datasets for the evaluation
[taylor_diagram])
# run the evaluation (bias calculation)
taylor_evaluation.run()
taylor_data = taylor_evaluation.results[0]
plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
def Time_series_subregion(obs_subregion_mean, obs_name, model_subregion_mean, model_names, seasonal_cycle,
file_name, row, column, x_tick=['']):
nmodel, nt, nregion = model_subregion_mean.shape
if seasonal_cycle:
obs_data = ma.mean(obs_subregion_mean.reshape([1,nt/12,12,nregion]), axis=1)
model_data = ma.mean(model_subregion_mean.reshape([nmodel,nt/12,12,nregion]), axis=1)
nt = 12
else:
obs_data = obs_subregion_mean
model_data = model_subregion_mean
x_axis = np.arange(nt)
x_tick_values = x_axis
fig = plt.figure()
rcParams['xtick.labelsize'] = 6
rcParams['ytick.labelsize'] = 6
for iregion in np.arange(nregion):
ax = fig.add_subplot(row, column, iregion+1)
x_tick_labels = ['']
if iregion+1 > column*(row-1):
x_tick_labels = x_tick
else:
x_tick_labels=['']
ax.plot(x_axis, obs_data[0, :, iregion], color='r', lw=2, label=obs_name)
for imodel in np.arange(nmodel):
ax.plot(x_axis, model_data[imodel, :, iregion], lw=0.5, label = model_names[imodel])
ax.set_xlim([-0.5,nt-0.5])
ax.set_xticks(x_tick_values)
ax.set_xticklabels(x_tick_labels)
ax.set_title('Region %02d' % (iregion+1), fontsize=8)
ax.legend(bbox_to_anchor=(-0.2, row/2), loc='center' , prop={'size':7}, frameon=False)
fig.subplots_adjust(hspace=0.7, wspace=0.5)
plt.show()
fig.savefig(file_name, dpi=600, bbox_inches='tight')
def Portrait_diagram_subregion(obs_subregion_mean, obs_name, model_subregion_mean, model_names, seasonal_cycle,
file_name, normalize=True):
nmodel, nt, nregion = model_subregion_mean.shape
if seasonal_cycle:
obs_data = ma.mean(obs_subregion_mean.reshape([1,nt/12,12,nregion]), axis=1)
model_data = ma.mean(model_subregion_mean.reshape([nmodel,nt/12,12,nregion]), axis=1)
nt = 12
else:
obs_data = obs_subregion_mean
model_data = model_subregion_mean
subregion_metrics = ma.zeros([4, nregion, nmodel])
for imodel in np.arange(nmodel):
for iregion in np.arange(nregion):
# First metric: bias
subregion_metrics[0, iregion, imodel] = metrics.calc_bias(model_data[imodel, :, iregion], obs_data[0, :, iregion], average_over_time = True)
# Second metric: standard deviation
subregion_metrics[1, iregion, imodel] = metrics.calc_stddev_ratio(model_data[imodel, :, iregion], obs_data[0, :, iregion])
# Third metric: RMSE
subregion_metrics[2, iregion, imodel] = metrics.calc_rmse(model_data[imodel, :, iregion], obs_data[0, :, iregion])
# Fourth metric: correlation
subregion_metrics[3, iregion, imodel] = metrics.calc_correlation(model_data[imodel, :, iregion], obs_data[0, :, iregion])
if normalize:
for iregion in np.arange(nregion):
subregion_metrics[0, iregion, : ] = subregion_metrics[0, iregion, : ]/ma.std(obs_data[0, :, iregion])*100.
subregion_metrics[1, iregion, : ] = subregion_metrics[1, iregion, : ]*100.
subregion_metrics[2, iregion, : ] = subregion_metrics[2, iregion, : ]/ma.std(obs_data[0, :, iregion])*100.
region_names = ['R%02d' % i for i in np.arange(nregion)+1]
for imetric, metric in enumerate(['bias','std','RMSE','corr']):
plotter.draw_portrait_diagram(subregion_metrics[imetric, :, :], region_names, model_names, file_name+'_'+metric,
xlabel='model',ylabel='region')
def Map_plot_subregion(subregions, ref_dataset, directory):
lons, lats = np.meshgrid(ref_dataset.lons, ref_dataset.lats)
fig = plt.figure()
ax = fig.add_subplot(111)
m = Basemap(ax=ax, projection='cyl',llcrnrlat = lats.min(), urcrnrlat = lats.max(),
llcrnrlon = lons.min(), urcrnrlon = lons.max(), resolution = 'l')
m.drawcoastlines(linewidth=0.75)
m.drawcountries(linewidth=0.75)
m.etopo()
x, y = m(lons, lats)
#subregion_array = ma.masked_equal(subregion_array, 0)
#max=m.contourf(x, y, subregion_array, alpha=0.7, cmap='Accent')
for subregion in subregions:
draw_screen_poly(subregion[1], m, 'w')
plt.annotate(subregion[0],xy=(0.5*(subregion[1][2]+subregion[1][3]), 0.5*(subregion[1][0]+subregion[1][1])), ha='center',va='center', fontsize=8)
plt.show()
fig.savefig(directory+'map_subregion', bbox_inches='tight')
def draw_screen_poly(boundary_array, m, linecolor='k'):
''' Draw a polygon on a map
:param boundary_array: [lat_north, lat_south, lon_east, lon_west]
:param m : Basemap object
'''
lats = [boundary_array[0], boundary_array[0], boundary_array[1], boundary_array[1]]
lons = [boundary_array[3], boundary_array[2], boundary_array[2], boundary_array[3]]
x, y = m( lons, lats )
xy = zip(x,y)
poly = Polygon( xy, facecolor='none',edgecolor=linecolor )
plt.gca().add_patch(poly)
| {
"repo_name": "pwcberry/climate",
"path": "examples/configuration_file_examples/metrics_and_plots.py",
"copies": "1",
"size": "9338",
"license": "apache-2.0",
"hash": -6898481135363852000,
"line_mean": 40.6392694064,
"line_max": 154,
"alpha_frac": 0.6157635468,
"autogenerated": false,
"ratio": 3.195756331279945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9210296848971464,
"avg_score": 0.02024460582169609,
"num_lines": 219
} |
"""ApacheParser is a member object of the ApacheConfigurator class."""
import copy
import fnmatch
import logging
import os
import re
import subprocess
import sys
import six
from certbot import errors
from certbot_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot=None, version=(2, 4),
configurator=None):
# Note: Order is important here.
# Needed for calling save() with reverter functionality that resides in
# AugeasConfigurator superclass of ApacheConfigurator. This resolves
# issues with aug.load() after adding new files / defines to parse tree
self.configurator = configurator
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self.parse_file(self.loc["root"])
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
self.existing_paths = copy.deepcopy(self.parser_paths)
# Must also attempt to parse additional virtual host root
if vhostroot:
self.parse_file(os.path.abspath(vhostroot) + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def add_include(self, main_config, inc_path):
"""Add Include for a new configuration file if one does not exist
:param str main_config: file path to main Apache config file
:param str inc_path: path of file to include
"""
if len(self.find_dir(case_i("Include"), inc_path)) == 0:
logger.debug("Adding Include %s to %s",
inc_path, get_aug_path(main_config))
self.add_dir(
get_aug_path(main_config),
"Include", inc_path)
# Add new path to parser paths
new_dir = os.path.dirname(inc_path)
new_file = os.path.basename(inc_path)
if new_dir in self.existing_paths.keys():
# Add to existing path
self.existing_paths[new_dir].append(new_file)
else:
# Create a new path
self.existing_paths[new_dir] = [new_file]
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in six.moves.zip(
iterator, iterator):
mod_name = self.get_arg(match_name)
mod_filename = self.get_arg(match_filename)
if mod_name and mod_filename:
self.modules.add(mod_name)
self.modules.add(os.path.basename(mod_filename)[:-2] + "c")
else:
logger.debug("Could not read LoadModule directive from " +
"Augeas path: {0}".format(match_name[6:]))
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warning("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
if not value:
return None
else:
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self.parse_file(os.path.join(arg, "*"))
else:
self.parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
if sys.version_info < (3, 6):
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
else: # pragma: no cover
# Since Python 3.6, it returns a different pattern like (?s:.*\.load)\Z
return fnmatch.translate(clean_fn_match)[4:-3]
def parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Ensure that we have the latest Augeas DOM state on disk before
# calling aug.load() which reloads the state from disk
if self.configurator:
self.configurator.ensure_augeas_state()
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def parsed_in_current(self, filep):
"""Checks if the file path is parsed by current Augeas parser config
ie. returns True if the file is found on a path that's found in live
Augeas configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.parser_paths)
def parsed_in_original(self, filep):
"""Checks if the file path is parsed by existing Apache config.
ie. returns True if the file is found on a path that matches Include or
IncludeOptional statement in the Apache configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.existing_paths)
def _parsed_by_parser_paths(self, filep, paths):
"""Helper function that searches through provided paths and returns
True if file path is found in the set"""
for directory in paths.keys():
for filename in paths[directory]:
if fnmatch.fnmatch(filep, os.path.join(directory, filename)):
return True
return False
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
| {
"repo_name": "jsha/letsencrypt",
"path": "certbot-apache/certbot_apache/parser.py",
"copies": "1",
"size": "27616",
"license": "apache-2.0",
"hash": 263746932729016600,
"line_mean": 36.9862448418,
"line_max": 91,
"alpha_frac": 0.5740874855,
"autogenerated": false,
"ratio": 4.195047850524078,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5269135336024078,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.