text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
|
faun/django_test
|
tests/regressiontests/utils/html.py
|
Python
|
bsd-3-clause
| 4,041
|
[
"ADF"
] |
7e0dbd3a258eef037434c10613c26f858afd789d60e2bcae171c324d1242a138
|
import unittest
import sys
import re
import logging
sys.path.append('../PyRoute')
from TradeCodes import TradeCodes
from Star import Star
from Galaxy import Sector, Galaxy
class TestTradeCode(unittest.TestCase):
def setUp(self):
self.star1 = Star.parse_line_into_star(
"0103 Irkigkhan C9C4733-9 Fl { 0 } (E69+0) [4726] B - - 123 8 Im M2 V ",
Sector(' Core', ' 0, 0'), 'fixed', 'fixed')
self.star2 = Star.parse_line_into_star(
"0104 Shana Ma E551112-7 Lo Po { -3 } (301-3) [1113] B - - 913 9 Im K2 IV M7 V ",
Sector(' Core', ' 0, 0'), 'fixed', 'fixed')
self.logger = logging.getLogger("PyRoute")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel("INFO")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def testLo(self):
code = TradeCodes("Lo")
self.assertTrue(code.pcode is None)
self.assertEqual(u'Lo', str(code))
self.assertTrue(code.low)
self.assertFalse(code.high)
def testOrdering(self):
code = TradeCodes("Wa Ag Ni")
self.assertEqual('Wa', code.pcode)
self.assertEqual(u'Ag Ni Wa', str(code))
self.assertTrue(code.agricultural)
self.assertTrue(code.nonindustrial)
def testColony(self):
code = TradeCodes(u"Ph C:0404")
self.assertEqual([u'C:0404'], code.owned, code.owned)
self.assertEqual([u'C:Spin-0404'], code.colonies("Spinward Marches"), code.colonies("Spinward Marches"))
self.assertEqual([], code.owners('Spinward Marches'))
def testOwned(self):
code = TradeCodes(u"Ag O:1011")
self.assertEqual([u'O:1011'], code.owned, code.owned)
self.assertEqual([u'O:Dene-1011'], code.owners('Deneb'))
self.assertEqual([], code.colonies('Deneb'))
def testSophonts(self):
code = TradeCodes(u"(Wiki)")
self.assertEqual([u'WikiW'], code.homeworld, code.homeworld)
self.assertEqual([u'WikiW'], code.sophonts, code.sophonts)
def testSophontsPartial(self):
code = TradeCodes(u"(Wiki)4")
self.assertEqual([u'Wiki4'], code.homeworld, code.homeworld)
self.assertEqual([u'Wiki4'], code.sophonts)
def testWorldSophont(self):
code = TradeCodes("Ag Huma4")
self.assertFalse(code.homeworld)
self.assertEqual(['Huma4'], code.sophonts)
self.assertEqual(['Ag'], code.codeset)
def testWorldSophontsMultiple(self):
code = TradeCodes("Ag Wiki4 Huma2")
self.assertFalse(code.homeworld)
self.assertEqual(['Wiki4', 'Huma2'], code.sophonts)
self.assertEqual(['Ag'], code.codeset)
def testSophontCombined(self):
code = TradeCodes("Ri (Wiki) Huma4 Alph2 (Deneb)2")
self.assertTrue(len(code.homeworld) > 0)
self.assertEqual(['Huma4', 'Alph2', 'WikiW', 'Dene2'], code.sophonts, msg=code.sophonts)
self.assertEqual(['WikiW', 'Dene2'], code.homeworld, msg=code.homeworld)
self.assertEqual(['Ri'], code.codeset, code.codeset)
def testCodeCheck(self):
code = TradeCodes("Fl")
self.assertTrue(code.check_world_codes(self.star1))
def testCodeCheck2(self):
code = TradeCodes("Po Lo")
self.assertTrue(code.check_world_codes(self.star2))
self.assertTrue(code.poor)
self.assertTrue(code.low)
def testCodeCheckFails(self):
code = TradeCodes("Wa")
with self.assertLogs(self.logger, level='ERROR') as log:
self.assertFalse(code.check_world_codes(self.star1))
# assert that what we expected was logged
self.assertEqual(2, len(log.output))
self.assertEqual(
[
'ERROR:PyRoute.TradeCodes:Irkigkhan (Core 0103)-C9C4733-9 Calculated "Fl" not in trade codes [\'Wa\']',
'ERROR:PyRoute.TradeCodes:Irkigkhan (Core 0103)-C9C4733-9 Found invalid "Wa" in trade codes: [\'Wa\']'
],
log.output)
with self.assertLogs(self.logger, level='ERROR') as log:
self.assertFalse(code.check_world_codes(self.star2))
# assert that what we expected was logged
self.assertEqual(3, len(log.output))
self.assertEqual(
[
'ERROR:PyRoute.TradeCodes:Shana Ma (Core 0104)-E551112-7 Calculated "Po" not in trade codes [\'Wa\']',
'ERROR:PyRoute.TradeCodes:Shana Ma (Core 0104)-E551112-7 Found invalid "Wa" in trade codes: [\'Wa\']',
'ERROR:PyRoute.TradeCodes:Shana Ma (Core 0104) - Calculated "Lo" not in trade codes [\'Wa\']',
],
log.output
)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
makhidkarun/traveller_pyroute
|
Tests/testTradeCode.py
|
Python
|
mit
| 5,025
|
[
"Galaxy"
] |
f18fd9e9cdd96aa9deff563c8ec8eac32ca60be7306876fbd9d435c62a6a684f
|
import collections
import numpy
import os
import pandas
import pybedtools
import pysam
from grocsvs import step
from grocsvs import structuralvariants
from grocsvs.stages import final_clustering
from grocsvs.stages import refine_grid_search_breakpoints
pandas.options.display.max_rows = 500
CHUNKSIZE = 10
def nchunks(events):
return numpy.ceil(len(events)/float(CHUNKSIZE)).astype(int)
class MergeGenotypesStep(step.StepChunk):
@staticmethod
def get_steps(options):
yield MergeGenotypesStep(options)
def __init__(self, options):
self.options = options
def __str__(self):
return ".".join([self.__class__.__name__])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
paths = {
"genotypes": os.path.join(directory, "genotypes.tsv")
}
return paths
def run(self):
events = load_events(self.options)
genotypes = []
for chunk in range(nchunks(events)):
inpath = GenotypingStep(self.options, chunk).outpaths(final=True)["genotypes"]
genotypes.append(pandas.read_table(inpath))
genotypes = pandas.concat(genotypes, ignore_index=True)
genotypes["chromx"] = genotypes["chromx"].astype("string")
genotypes["chromy"] = genotypes["chromy"].astype("string")
counts = genotypes.groupby("cluster").count()["total"]
for row in genotypes.itertuples():
print row.Index, row.cluster, counts[row.cluster]
genotypes.loc[row.Index, "cluster_size"] = int(counts[row.cluster])
genotypes["dist"] = (genotypes["x"] - genotypes["y"]).abs()
genotypes.loc[genotypes["chromx"]!=genotypes["chromy"], "dist"] = numpy.nan
genotypes.to_csv(self.outpaths(final=False)["genotypes"], sep="\t", index=False)
class GenotypingStep(step.StepChunk):
@staticmethod
def get_steps(options):
events = load_events(options)
for chunk in range(nchunks(events)):
yield GenotypingStep(options, chunk)
def __init__(self, options, chunk):
self.options = options
self.chunk = chunk
def __str__(self):
return ".".join([self.__class__.__name__, str(self.chunk)])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
file_name = "genotypes.{}.tsv".format(self.chunk)
paths = {
"genotypes": os.path.join(directory, file_name)
}
return paths
def run(self):
clustered_events = load_events(self.options, self.chunk)
breakpoints = clustered_events.loc[clustered_events["kind"]=="breakpoint"]
self.logger.log("Genotyping...")
genotypes = genotype_breakpoints(breakpoints, self.options)
columns = [c for c in self.get_columns() if c in genotypes]
genotypes = genotypes[columns]
self.logger.log("Detecting segmental duplications...")
try:
frag_length_filter(genotypes, self.options)
except:
self.logger.log("warning: could not accurately apply fragment length filter;"
" this can happen when there is not genome-wide data availabe")
segdup_detector(genotypes, self.options)
compare_to_blacklist(genotypes, self.options)
apply_filters(genotypes, self.options)
outpath = self.outpaths(final=False)["genotypes"]
genotypes.to_csv(outpath, sep="\t", index=False)
def get_columns(self):
columns = ["chromx", "x", "chromy", "y", "orientation", "cluster",
"kind", "assembled", "p", "shared", "total"]
for sample, dataset in self.options.iter_10xdatasets():
columns.extend(map(lambda x: "{}_{}".format(sample.name, x), ["p_resampling", "shared", "total", "x_hap0", "x_hap1", "y_hap0", "y_hap1"]))
return columns
def load_events(options, chunk=None):
final_clustering_step = final_clustering.FinalClusterSVsStep(options)
clustered_events_path = final_clustering_step.outpaths(final=True)["edges"]
clustered_events = pandas.read_table(clustered_events_path)
clustered_events["chromx"] = clustered_events["chromx"].astype("string")
clustered_events["chromy"] = clustered_events["chromy"].astype("string")
if chunk is not None:
clustered_events = clustered_events.iloc[chunk*CHUNKSIZE:((chunk+1)*CHUNKSIZE)]
# if self.options.debug:
# self.logger.log("LOADING ONLY 100 EVENTS....")
# clustered_events = clustered_events.iloc[:100]
return clustered_events
# def get_good_bc_counts_by_dataset(options):
# good_bc_counts_by_dataset = {}
# for sample, dataset in options.iter_10xdatasets():
# sample_info = options.sample_info(sample.name)
# dataset_info = sample_info[dataset.id]
# good_bc_counts_by_dataset[dataset.id] = dataset_info["good_bc_count"]
# return good_bc_counts_by_dataset
def genotype_breakpoints(breakpoints, options):
# TODO: gah
dist1 = -500
dist2 = 5000
genotypes = []
good_bc_counts_by_dataset, barcode_frequencies_by_dataset = \
refine_grid_search_breakpoints.get_barcode_info(options)
for i, breakpoint in enumerate(breakpoints.itertuples()):
if i % 10 == 0:
print i, "of", len(breakpoints)
genotype = refine_grid_search_breakpoints.quantify_breakpoint(
breakpoint.chromx, breakpoint.x,
breakpoint.chromy, breakpoint.y,
breakpoint.orientation,
options, good_bc_counts_by_dataset,
barcode_frequencies_by_dataset,
dist1, dist2,
with_phasing=True)
genotype = genotype.rename({"new_x":"x", "new_y":"y"})
genotype["kind"] = "breakpoint"
genotype["cluster"] = breakpoint.cluster
genotype["assembled"] = breakpoint.assembled
genotypes.append(genotype)
genotypes = pandas.DataFrame(genotypes)
return genotypes
# def genotype_facing(clustered_events, options):
# facing = clustered_events.loc[clustered_events["kind"]=="facing"]
# import graphing
# n1y = graphing.Node(facing["chromx"], facing["x"], facing["orientation"][0])
# n2x = graphing.Node(facing["chromy"], facing["y"], facing["orientation"][1])
def compare_to_blacklist(genotypes, options, distance=10000):
bad = collections.defaultdict(set)
for blacklist_path in options.blacklists:
blacklist_bed = ensure_file_is_bed(blacklist_path)
for which in "xy":
svs_bed = _convert_svs_to_bed(genotypes, which)
c = svs_bed.closest(blacklist_bed, d=True) \
.to_dataframe(names=["chrom","start","end","event_id",
"black_chrom","black_start","black_end","black_type",
"distance"])
close = c.loc[c["distance"].abs() < distance]
for row in close.itertuples():
bad[row.event_id].add(row.black_type)
genotypes["blacklist"] = ""
genotypes["quality"] = "PASS"
for event_id, reasons in bad.items():
if event_id not in genotypes.index: continue
genotypes.loc[event_id, "blacklist"] = ",".join(sorted(set(reasons)))
genotypes.loc[event_id, "quality"] = "FAIL"
def ensure_file_is_bed(path):
if path.endswith(".bedpe"):
table = pandas.read_table(path)
columns = ["chromx", "startx", "endx", "chromy", "starty", "endy", "name"]
i = 0
while len(columns) < len(table.columns):
columns.append("extra_{}".format(i))
table.columns = columns
bedx = pandas.DataFrame()
bedx["chrom"] = table["chromx"]
bedx["start"] = table["startx"]
bedx["end"] = table["endx"]
bedx["name"] = table["name"]
bedy = pandas.DataFrame()
bedy["chrom"] = table["chromy"]
bedy["start"] = table["starty"]
bedy["end"] = table["endy"]
bedy["name"] = table["name"]
bed = pandas.concat([bedx, bedy], ignore_index=True)
return pybedtools.BedTool.from_dataframe(bed).sort()
return pybedtools.BedTool(path).sort()
def _convert_svs_to_bed(table, col):
svs = pandas.DataFrame()
svs["chrom"] = table["chrom{}".format(col)]
svs["start"] = table[col]
svs["end"] = table[col] + 1
svs["cluster"] = table.index
svs_bed = pybedtools.BedTool.from_dataframe(svs).sort()
return svs_bed
## Segmental duplication detection
def segdup_detector(genotypes, options):
nearby_snv_counts = []
for i, event in genotypes.iterrows():
cur_nearby_snvs_x = count_nearby_snvs(options, event["chromx"], event["x"])
cur_nearby_snvs_y = count_nearby_snvs(options, event["chromy"], event["y"])
nearby_snv_counts.append(cur_nearby_snvs_x+cur_nearby_snvs_y)
genotypes["nearby_snvs"] = nearby_snv_counts
def count_nearby_snvs(options, chrom, pos):
nearby_snvs = 0
start = max(0, pos - 500)
end = pos + 500
for sample, dataset in options.iter_10xdatasets():
ref_counts = []
non_ref_counts = []
cur_ref_counts, cur_non_ref_counts = count_ref_reads(
dataset.bam, options.reference, chrom, start, end)
# ref_counts.append(cur_ref_counts)
# non_ref_counts.append(cur_non_ref_counts)
# ref_counts = numpy.sum(ref_counts, axis=0)
# non_ref_counts = numpy.sum(non_ref_counts, axis=0)
cur_nearby_snvs = (cur_non_ref_counts/(cur_non_ref_counts+cur_ref_counts).astype(float) > 0.20).sum()
nearby_snvs = max(nearby_snvs, cur_nearby_snvs)
return nearby_snvs
def count_ref_reads(bampath, reference, chrom, start, end):
ref_counts = numpy.zeros(end-start)
non_ref_counts = numpy.zeros(end-start)
bam = pysam.AlignmentFile(bampath)
# stepper = "all" skips dupe, unmapped, secondary, and qcfail reads
start = max(0, start)
for col in bam.pileup(chrom, start, end, truncate=True, stepper="all"):
refnuc = reference.fasta[chrom][col.reference_pos].upper()
nuc_counts = collections.Counter()
for read in col.pileups:
if read.query_position is None:
# nuc_counts["indel"] += 1
pass
else:
nuc_counts[read.alignment.query_sequence[read.query_position]] += 1
ref_counts[col.reference_pos-start] = nuc_counts[refnuc]
non_ref_counts[col.reference_pos-start] = sum(nuc_counts.values()) - nuc_counts[refnuc]
return ref_counts, non_ref_counts
def count_nearby_Ns(options, genotypes):
nearby_Ns = []
for i, event in genotypes.iterrows():
chromx, startx, endx = event["chromx"], event["x"]-5000, event["x"]+5000
chromy, starty, endy = event["chromy"], event["y"]-5000, event["y"]+5000
startx = max(0, startx)
starty = max(0, starty)
seqx = options.reference.fasta[chromx][startx:endx].upper()
seqy = options.reference.fasta[chromy][starty:endy].upper()
nearby_Ns.append(max(seqx.count("N"), seqy.count("N")))
return numpy.array(nearby_Ns)
def apply_filters(genotypes, options):
# TODO: should probably more intelligently combine these filters to produce
# some sort of quality score
# TODO: should learn this from the data (otherwise may not work with
# non-human data)
genotypes.loc[genotypes["nearby_snvs"] >= 15, "quality"] = "FAIL"
nearby_Ns = count_nearby_Ns(options, genotypes)
bad_nearby_N = (nearby_Ns > 50)
genotypes.loc[bad_nearby_N, "quality"] = "FAIL"
bad_nearby_N_vector = (",N=" + pandas.Series(nearby_Ns[bad_nearby_N].astype(str))).values
genotypes.loc[bad_nearby_N, "blacklist"] += bad_nearby_N_vector
print genotypes.loc[bad_nearby_N, "blacklist"]
def frag_length_filter(genotypes, options):
dist1 = -500
dist2 = 5000
flgs = {}
for sample, dataset in options.iter_10xdatasets():
sample_info = options.sample_info(sample.name)
dataset_info = sample_info[dataset.id]
flgs[sample.name] = FragLengthGenotyper(dataset_info["frag_length_distributions"])
# for n, cluster in genotypes.groupby("cluster"):
for event in genotypes.itertuples():
frag_length_filter = False
for sample, dataset in options.iter_10xdatasets():
fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new(
options, sample, dataset,
event.chromx, event.x, event.chromy, event.y, event.orientation,
dist1, dist2)
if len(merged) < 5:
continue
lengths = calc_frag_lengths(event.x, event.y, event.orientation, merged)
frag_length_filter |= flgs[sample.name].genotype(lengths)
genotypes.loc[event.Index, "{}_lengths_50".format(sample.name)] = numpy.percentile(lengths, 50)
genotypes.loc[event.Index, "{}_lengths_90".format(sample.name)] = numpy.percentile(lengths, 90)
genotypes.loc[event.Index, "frag_length_passes"] = frag_length_filter
print genotypes
def calc_frag_lengths(x, y, orientation, merged_frags):
"""
given a merged fragments table, calculate the fragment lengths assuming the
fragments span the provided x and y breakpoints; adds frag lengths inplace
"""
if orientation[0] == "+":
partlen_x = x - merged_frags["start_pos_x"]
else:
partlen_x = merged_frags["end_pos_x"] - x
if orientation[1] == "+":
partlen_y = y - merged_frags["start_pos_y"]
else:
partlen_y = merged_frags["end_pos_y"] - y
merged_len = partlen_x + partlen_y
return merged_len
class FragLengthGenotyper(object):
def __init__(self, distributions):
self._set_distributions(distributions)
self.quantiles = {}
for quantile in [0.2, 0.5, 0.8]:
self.quantiles[quantile] = [numpy.percentile(d, quantile*100)
for d in self.distributions]
self.cutoffs = {}
for quantile in sorted(self.quantiles):
self.cutoffs[quantile] = numpy.percentile(
self.quantiles[quantile], [2.5, 97.5])
self.median_median = numpy.median(self.quantiles[0.5])
def _set_distributions(self, distributions):
"""
do some filtering on the fragment length distributions to remove
outliers
"""
medians = [numpy.median(d) for d in distributions]
sizes = [len(d) for d in distributions]
size_cutoffs = numpy.percentile(sizes, [2.5, 97.5])
distributions = [d for d in distributions
if (size_cutoffs[0]<len(d)<size_cutoffs[1])]
median_cutoffs = numpy.percentile(medians, [2.5, 97.5])
distributions = [d for d in distributions
if (median_cutoffs[0]<numpy.median(d)<median_cutoffs[1])]
self.distributions = distributions
def genotype(self, frag_lengths, lenient=False):
geno = True
# which = {}
# exceptions = []
for quantile in sorted(self.quantiles):
curquantile = numpy.percentile(frag_lengths, quantile*100)
curcutoffs = self.cutoffs[quantile].copy()
if quantile != 0.5:
curcutoffs *= [0.95, 1.05]
if lenient:
curcutoffs *= [0.95, 1.05]
print quantile, curquantile, curcutoffs
if not (curcutoffs[0]<curquantile<curcutoffs[1]):
geno = False
# which[quantile] = "F"
# exceptions.append((curquantile, quantile))
# else:
# which[quantile] = "T"
return geno#, which, exceptions
def genotype_after_shifting(self, frag_lengths, lenient=False):
offset = self.get_offset_at_medians(frag_lengths)
shifted = frag_lengths + offset
return self.genotype(shifted, lenient=lenient)
def get_offset_at_medians(self, frag_lengths):
median = numpy.median(frag_lengths)
offset = self.median_median - median
return offset
|
grocsvs/grocsvs
|
src/grocsvs/stages/genotyping.py
|
Python
|
mit
| 16,435
|
[
"pysam"
] |
1f530caa9772f49ab93f2db354575c37ca389d1ba4014706f4401b938aea18a3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-ai
------------
Tests for `django-ai` models module.
"""
import random
import numpy as np
from bayespy.nodes import Gaussian
from django.test import (TestCase, )
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
# from django.urls import reverse
from django.contrib.auth.models import User
from django_ai.bayesian_networks import models
from django_ai.bayesian_networks.bayespy_constants import (
DIST_GAUSSIAN_ARD, DIST_GAMMA, DIST_GAUSSIAN, DET_ADD,
DIST_DIRICHLET, DIST_WISHART, DIST_CATEGORICAL, DIST_MIXTURE, )
from django_ai.bayesian_networks.utils import (
parse_node_args, mahalanobis_distance, )
from tests.test_models import models as test_models
class TestBN(TestCase):
def setUp(self):
# Set the seeds
random.seed(123456)
np.random.seed(123456)
# Set up the user
self.user, _ = User.objects.get_or_create(
username='testadmin', email='testadmin@example.com',
is_superuser=True
)
self.user.set_password("12345")
self.user.save()
self.client.login(username='testadmin', password='12345')
# BN 1
self.bn1, _ = models.BayesianNetwork.objects.get_or_create(
name="BN for tests - 1")
self.mu, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="mu",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN_ARD,
distribution_params="0, 1e-6",
graph_interval="-10, 20"
)
self.tau, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="tau",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAMMA,
distribution_params="1e-6, 1e-6",
graph_interval="1e-6, 0.1"
)
self.ui_avg1, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="userinfo.avg1",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=True,
distribution=DIST_GAUSSIAN_ARD,
distribution_params="mu, tau",
)
self.ui_avg1_col, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.ui_avg1,
ref_model=ContentType.objects.get(model="userinfo",
app_label="test_models"),
ref_column="avg1",
)
self.e1, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn1,
description="mu -> userinfo.avg1",
parent=self.mu,
child=self.ui_avg1
)
self.e2, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn1,
description="tau -> userinfo.avg1",
parent=self.tau,
child=self.ui_avg1
)
# BN 2
self.bn2, _ = models.BayesianNetwork.objects.get_or_create(
name="BN for tests - 2")
self.x1, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="x1",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params="[0, 0], [[1, 0], [0,1]]",
)
self.x2, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="x2",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params="[1, 1], [[1, 0], [0,1]]",
)
self.z, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="z",
node_type=models.BayesianNetworkNode.NODE_TYPE_DETERMINISTIC,
is_observable=False,
deterministic=DET_ADD,
deterministic_params="x1, x2",
)
self.bn2e1, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn2,
description="x1 -> z",
parent=self.x1,
child=self.z
)
self.bn2e2, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn2,
description="x2 -> z",
parent=self.x2,
child=self.z
)
# BN 3 (Clustering)
self.bn3, _ = models.BayesianNetwork.objects.get_or_create(
name="Clustering (testing)",
network_type=models.BayesianNetwork.BN_TYPE_CLUSTERING,
engine_meta_iterations=10,
results_storage="dmf:test_models.userinfo.cluster_1",
counter_threshold=2,
threshold_actions=":recalculate",
)
self.alpha, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="alpha",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_DIRICHLET,
distribution_params="numpy.full(10, 1e-05)",
)
self.Z, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Z",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_CATEGORICAL,
distribution_params="alpha, plates=(:dl_Y, ), :ifr",
)
self.mu_c, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="mu",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params=("numpy.zeros(2), [[1e-5,0], [0, 1e-5]], "
"plates=(10, )"),
)
self.Lambda, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Lambda",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_WISHART,
distribution_params="2, [[1e-5,0], [0, 1e-5]], plates=(10, )",
)
self.Y, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Y",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=True,
distribution=DIST_MIXTURE,
distribution_params=("Z, @bayespy.nodes.Gaussian(), "
"mu, Lambda, :noplates"),
)
#
self.Y_col_avg_logged, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.Y,
ref_model=ContentType.objects.get(
model="userinfo", app_label="test_models"),
ref_column="avg_time_pages"
)
self.Y_col_avg_pages_a, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.Y,
ref_model=ContentType.objects.get(
model="userinfo", app_label="test_models"),
ref_column="avg_time_pages_a"
)
#
self.alpha_to_Z, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="alpha -> Z",
parent=self.alpha,
child=self.Z
)
self.Z_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="Z -> Y",
parent=self.Z,
child=self.Y
)
self.mu_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="mu -> Y",
parent=self.mu_c,
child=self.Y
)
self.Lambda_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="Lambda -> Y",
parent=self.Lambda,
child=self.Y
)
def test_bn_inference(self):
self.bn1.perform_inference(recalculate=True)
Q = self.bn1.engine_object
mu = Q['mu'].get_moments()[0]
tau = Q['tau'].get_moments()[0]
# For avoiding rounding and float differences
self.assertEqual(str(mu)[:5], '9.809')
self.assertEqual(str(tau)[:5], '0.039')
def test_bn_cached_eo(self):
self.bn1.get_engine_object()
expected_output = self.bn1.engine_object
actual_output = self.bn1.get_engine_object()
self.assertEqual(expected_output, actual_output)
def test_ww_bn_reset_inference(self):
"""
Django parallel test running has issues, the 'ww' in the test name
is to make it run it at the end where no problems arise
"""
self.setUp()
expected_clean_metadata = {
"clusters_labels": {},
"prev_clusters_labels": {},
"clusters_means": {},
"prev_clusters_means": {},
"clusters_sizes": {},
"prev_clusters_sizes": {},
"columns": [],
}
# Avoid unneccesary calculation
self.bn3.engine_meta_iterations = 1
#
self.bn3.perform_inference()
self.assertTrue(self.bn3.engine_object is not None)
self.assertTrue(self.bn3.engine_object_timestamp is not None)
self.assertTrue(self.bn3.metadata != expected_clean_metadata)
results = test_models.UserInfo.objects.values_list(
"cluster_1", flat=True)
self.assertTrue(any(list(results)))
self.bn3.reset_inference()
self.assertTrue(self.bn3.engine_object is None)
self.assertTrue(self.bn3.engine_object_timestamp is None)
self.assertTrue(self.bn3.metadata == expected_clean_metadata)
results = test_models.UserInfo.objects.values_list(
"cluster_1", flat=True)
self.assertTrue(not any(list(results)))
def test_bn_deterministic_nodes(self):
# Initialize the EO
self.bn2.get_engine_object(reconstruct=True, save=True)
self.z.refresh_from_db()
z_eo = self.z.get_engine_object()
expected_moments = [np.array([1., 1.]),
np.array([[3., 1.], [1., 3.]])]
moments = z_eo.get_moments()
self.assertTrue(all(expected_moments[0] == moments[0]))
self.assertTrue(all(expected_moments[1][0] == moments[1][0]))
self.assertTrue(all(expected_moments[1][1] == moments[1][1]))
def test_bn_validation(self):
# Test invalid syntax
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf-examples.models.blabla"
self.bn3.full_clean()
# Test invalid engine
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:examples.models.blabla"
self.bn3.full_clean()
# Test 'dfm' invalid path
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:examples.models"
self.bn3.full_clean()
# Test 'dfm' invalid model
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:tests.non-existant-model"
self.bn3.full_clean()
# Test 'dfm' invalid field
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:tests.UserInfo.n-e-field"
self.bn3.full_clean()
# Test 'dfm' correct content
self.bn3.results_storage = "dmf:test_models.UserInfo.cluster_1"
self.assertEqual(self.bn3.full_clean(), None)
def test_bn_node_validation(self):
# Test First Step: fields corresponds to Node type
with self.assertRaises(ValidationError):
self.mu.deterministic_params = "a, b"
self.mu.full_clean()
self.setUp()
with self.assertRaises(ValidationError):
self.mu.node_type = \
models.BayesianNetworkNode.NODE_TYPE_DETERMINISTIC
self.mu.full_clean()
# Test Second Step: Validations on Stochastic Types
# Stochastic Nodes must have a Distribution
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution = None
self.mu.full_clean()
# Stochastic Nodes must have a Distribution Params
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = None
self.mu.full_clean()
# Test Third Step: Validations on Deterministic Types
# Deterministic Nodes must have a function
self.setUp()
with self.assertRaises(ValidationError):
self.z.deterministic = None
self.z.full_clean()
# Deterministic Nodes must have function parameters
self.setUp()
with self.assertRaises(ValidationError):
self.z.deterministic_params = None
self.z.full_clean()
# Test Fourth Step: Arg parsing
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = '#my_param1, %my_param2'
self.mu.full_clean()
# Test Final Step: BayesPy initialization
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = "1, 2, 3, 4, 5"
self.mu.full_clean()
def test_node_column_validation(self):
# Node Columns must reference a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_model = None
self.ui_avg1_col.full_clean()
# Node Columns must be linked to a field or a callable of a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_column = None
self.ui_avg1_col.full_clean()
# Node Columns must be linked to an existing fields of a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_column = "non-existant-field"
self.ui_avg1_col.full_clean()
def test_bn_get_nodes_names(self):
expected_output = ['mu', 'tau', 'userinfo.avg1']
actual_output = list(self.bn1.get_nodes_names())
self.assertEqual(expected_output, actual_output)
def test_node_get_data(self):
# Test no columns assigned
self.setUp()
with self.assertRaises(ValueError):
self.ui_avg1.data_columns.all().delete()
self.ui_avg1.get_data()
# Test not-matching column lengths
self.setUp()
smaller_data_column, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.ui_avg1,
ref_model=ContentType.objects.get(
model="userinfo2", app_label="test_models"),
ref_column="avg2"
)
with self.assertRaises(ValidationError):
self.ui_avg1.data_columns.add(smaller_data_column)
self.ui_avg1.get_data()
smaller_data_column.delete()
# Test correct functioning
self.setUp()
expected_output = list(test_models.UserInfo.objects.values_list(
"avg1", flat=True))
actual_output = list(self.ui_avg1.get_data())
self.assertEqual(expected_output, actual_output)
def test_node_get_params_type(self):
self.assertEqual(self.mu.get_params_type(), "distribution")
self.assertEqual(self.z.get_params_type(), "deterministic")
def test_node_reset_engine_object(self):
self.bn1.perform_inference(recalculate=True)
self.ui_avg1 = self.bn1.nodes.last()
self.assertTrue(self.ui_avg1.engine_object is not None)
self.assertTrue(self.ui_avg1.engine_object_timestamp is not None)
self.ui_avg1.reset_engine_object()
self.assertTrue(self.ui_avg1.engine_object is None)
self.assertTrue(self.ui_avg1.engine_object_timestamp is None)
def test_node_get_engine_inferred_object(self):
self.bn1.perform_inference(recalculate=True)
expected_output = self.bn1.engine_object['userinfo.avg1']
actual_output = self.ui_avg1.get_engine_inferred_object()
self.assertEqual(expected_output, actual_output)
def test_node_resolve_eos_in_params(self):
self.z.deterministic_params = "x1, x2, x3"
with self.assertRaises(ValueError):
self.z.get_engine_object()
self.z.deterministic_params = "x1, x2, kp=x3"
with self.assertRaises(ValueError):
self.z.get_engine_object()
def test_bn_meta_iterations(self):
self.setUp()
self.bn1.engine_meta_iterations = 5
self.bn1.perform_inference(recalculate=True)
# There must be a dict of size 5
self.assertTrue(len(self.bn1._eo_meta_iterations) == 5)
# containing the same likelihood as there isn't random initialization
for iteration in self.bn1._eo_meta_iterations:
self.assertEqual(
str(self.bn1._eo_meta_iterations[iteration]["L"])[:7],
"-630.42"
)
def test_bn_engine_iterations(self):
self.setUp()
self.bn1.engine_iterations = 1
self.bn1.perform_inference(recalculate=True)
# There must be a dict of size 1, as engine_meta_iterations defaults to
# 1
self.assertTrue(len(self.bn1._eo_meta_iterations) == 1)
# containing the likelihood of the second iteration
self.assertTrue(
str(self.bn1._eo_meta_iterations[0]["eo"].L[0]) != "nan"
)
self.assertEqual(
str(self.bn1._eo_meta_iterations[0]["eo"].L[1]),
"nan"
)
def test_bn_update_eos_struct(self):
bn1_eos_struct = {n.name: {"dm": n, "eo": None}
for n in self.bn1.nodes.all()}
node = self.bn1.nodes.get(name="userinfo.avg1")
models.BayesianNetwork.update_eos_struct(bn1_eos_struct, node)
self.assertTrue('Gamma' in
str(bn1_eos_struct['tau']['eo'].__class__))
self.assertTrue('GaussianARD' in
str(bn1_eos_struct['mu']['eo'].__class__))
def test_node_children(self):
expected_output = [self.ui_avg1]
actual_output = list(self.mu.children())
self.assertEqual(expected_output, actual_output)
def test_bn_whole_clustering(self):
self.setUp()
# Test metadata initialization
expected_initial_metadata = {
"clusters_labels": {},
"prev_clusters_labels": {},
"clusters_means": {},
"prev_clusters_means": {},
"clusters_sizes": {},
"prev_clusters_sizes": {},
"columns": [],
}
self.assertEqual(self.bn3.metadata, expected_initial_metadata)
# Test inference and clustering methods through metadata
self.bn3.perform_inference(recalculate=True)
expected_metadata = {
'prev_clusters_labels': {},
'prev_clusters_means': {},
'clusters_means': {
'A': np.array([0., 0.]),
'B': np.array([16., 16.]),
'C': np.array([20., 20.]),
'D': np.array([20., 20.]),
'E': np.array([25., 25.]),
},
'clusters_labels': {'4': 'E', '1': 'A', '5': 'A', '3': 'A',
'2': 'B', '8': 'A', '7': 'A', '0': 'C',
'6': 'D', '9': 'A'},
'clusters_sizes': {'A': 0, 'B': 50, 'C': 51, 'D': 49, 'E': 50},
'columns': ['avg_time_pages', 'avg_time_pages_a']
}
output_metadata = self.bn3.metadata
self.assertEqual(
output_metadata["prev_clusters_labels"],
expected_metadata["prev_clusters_labels"]
)
self.assertEqual(
output_metadata["prev_clusters_means"],
expected_metadata["prev_clusters_means"]
)
# Test BN.metadata_update_clusters_sizes()
self.assertEqual(
output_metadata["clusters_sizes"],
expected_metadata["clusters_sizes"]
)
# Test BN.assign_clusters_labels()
for cluster in expected_metadata["clusters_means"]:
o_cm = output_metadata["clusters_means"][cluster]
e_cm = expected_metadata["clusters_means"][cluster]
# Check that the cluster means are 'reasonably close' to
# the original ones
self.assertTrue(np.linalg.norm(e_cm - o_cm) ** 2 < 1)
del(output_metadata["clusters_means"][cluster])
self.assertEqual(
output_metadata["clusters_means"],
{}
)
self.assertEqual(
output_metadata["clusters_labels"],
expected_metadata["clusters_labels"],
)
# Test BN.columns_names_to_metadata()
self.assertEqual(
output_metadata["columns"],
expected_metadata["columns"]
)
# Test Results Storage
# BN.get_results()
results = self.bn3.get_results()
# Test resullts are OK (omitting the rest for avoiding pasting a
# list of size 200)
self.assertEqual(results[150:], ["B" for x in range(50)])
# Edge case
self.assertFalse(self.bn1.get_results())
# BN.store_results()
self.bn3.store_results()
stored_results = test_models.UserInfo.objects.all().values_list(
'cluster_1', flat=True)
# Test results are stored OK
self.assertEqual(list(results), list(stored_results))
# -> Test BN.threshold_actions validations
self.bn3.threshold_actions = ":recalculate :not-allowed-action"
with self.assertRaises(ValidationError):
self.bn3.full_clean()
# -> Test BN.counter, BN.counter_threshold and BN.threshold_actions
# Test Triggering an inference
self.threshold_actions = ":recalculate"
prev_timestamp = self.bn3.engine_object_timestamp
self.bn3.counter = 2
self.bn3.save()
# Test the inference has been run by the timestamp
self.assertTrue(self.bn3.engine_object_timestamp > prev_timestamp)
# Test the counter was reset
self.assertEqual(self.bn3.counter, 0)
# Test BN.assign_cluster()
self.assertEqual(
self.bn3.assign_cluster([10, 10]),
"B"
)
self.bn3.reset_inference()
self.assertFalse(
self.bn3.assign_cluster([10, 10])
)
self.assertFalse(
self.bn1.assign_cluster([10, 10])
)
def test_node_args_parsing(self):
# Test "general" parsing
args_string = ('True, :ifr, numpy.ones(2), [[1,2], [3,4]], '
'type=rect, sizes=[3, 4,], coords = ([1,2],[3,4]), '
'func=numpy.zeros(2), plates=:no')
expected_output = {
'args': [
True,
':ifr',
np.array([1., 1.]),
[[1, 2], [3, 4]]
],
'kwargs': {
'type': 'rect',
'sizes': [3, 4],
'coords': ([1, 2], [3, 4]),
'func': np.array([0., 0.]),
'plates': ':no',
}
}
output = parse_node_args(args_string)
# "np.array == np.array" does not return a single bool in NumPy,
# then the comparison "output == expected_output" does not work
# with Django tests. I think I also hit a bug, because for some
# reason, the comparison function that unittest uses for nested
# lists is the array comparison of NumPy and not the standard list
# comparison of Python.
# Test Positional Args
positions_tested = []
for position, arg in enumerate(output["args"]):
# For nested lists, don't know why but it keeps using the
# NumPy array comparison despites of not being of its class
if isinstance(arg, np.ndarray) or isinstance(arg, list):
comp = (expected_output["args"][position] ==
output["args"][position])
if not isinstance(comp, bool):
comp = all(comp)
self.assertEqual(comp, True)
else:
self.assertEqual(
expected_output["args"][position],
output["args"][position]
)
positions_tested.insert(0, position)
# Remove the tested elements from output
for pt in positions_tested:
del(output['args'][pt])
# Test Keyword Args
for kw in expected_output['kwargs'].keys():
if (isinstance(expected_output['kwargs'][kw], np.ndarray) or
isinstance(expected_output['kwargs'][kw], list)):
comp = (expected_output['kwargs'][kw] == output["kwargs"][kw])
if not isinstance(comp, bool):
comp = all(comp)
self.assertEqual(comp, True)
else:
self.assertEqual(
expected_output['kwargs'][kw],
output["kwargs"][kw]
)
# Remove the tested element from output
del(output['kwargs'][kw])
# Check there is nothing left in the output
self.assertEqual(output, {"args": [], "kwargs": {}})
# Test not allowed functions
with self.assertRaises(ValueError):
parse_node_args("shutil.rmtree('/')")
with self.assertRaises(ValueError):
parse_node_args("eval('<malicious_code>')")
# Test referencing to a function
args_string = ('@bayespy.nodes.Gaussian()')
expected_output = {
'args': [Gaussian],
'kwargs': {}
}
output = parse_node_args(args_string)
self.assertEqual(output, expected_output)
# Test invalid function invocaton
with self.settings(DJANGO_AI_WHITELISTED_MODULES=["numpy", ]):
# Reimport the function with the new settings
from django_ai.bayesian_networks.utils import \
parse_node_args as pnn
with self.assertRaises(ValueError):
pnn("numpy.ones(k)")
# Test flat output in args parsing
expected_output = [np.array([1., 1.])]
output = parse_node_args("numpy.ones(2)", flat=True)
self.assertTrue(all(output[0] == expected_output[0]))
output.pop(0)
self.assertEqual(output, [])
def test_utils_misc(self):
# Test Mahalanobis Distance
self.assertEqual(
mahalanobis_distance([0, 1], [1, 0], [[2, 0], [0, 2]]), 1.0
)
def tearDown(self):
self.bn1.image.delete()
self.mu.image.delete()
self.tau.image.delete()
test_models.UserInfo.objects.all().update(cluster_1=None)
|
math-a3k/django-ai
|
tests/apps/bayesian_networks/test_bns.py
|
Python
|
lgpl-3.0
| 27,510
|
[
"Gaussian"
] |
40089dcebb25093840cac258eee24aa8b4a375f3baf3540a6e64e37e77e2b064
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2013-2014 CRS4.
#
# This file is part of vispa.
#
# vispa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# vispa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# vispa. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
"""
TIGET workflow -- BLAST step.
"""
from pydoop.pipes import runTask, Factory
from mapper import Mapper
from reducer import Reducer
def run_task():
return runTask(Factory(Mapper, Reducer))
|
crs4/vispa
|
bl/tiget/mr/blast/__init__.py
|
Python
|
gpl-3.0
| 920
|
[
"BLAST"
] |
aeeb63c9e54c0bf65cc5b825b3afc621fb3c841ca3af5c2750e33b33f1b97a73
|
# $HeadURL$
__RCSID__ = "$Id$"
import socket
import select
import os
try:
import hashlib as md5
except:
import md5
import GSI
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities import List, Network
from DIRAC.Core.DISET.private.Transports.SSL.SocketInfo import SocketInfo
from DIRAC.Core.DISET.private.Transports.SSL.SessionManager import gSessionManager
from DIRAC.Core.DISET.private.Transports.SSL.FakeSocket import FakeSocket
from DIRAC.Core.DISET.private.Transports.SSL.ThreadSafeSSLObject import ThreadSafeSSLObject
if GSI.__version__ < "0.5.0":
raise Exception( "Required GSI version >= 0.5.0" )
class SocketInfoFactory:
def generateClientInfo( self, destinationHostname, kwargs ):
infoDict = { 'clientMode' : True,
'hostname' : destinationHostname,
'timeout' : 600,
'enableSessions' : True }
for key in kwargs.keys():
infoDict[ key ] = kwargs[ key ]
try:
return S_OK( SocketInfo( infoDict ) )
except Exception, e:
return S_ERROR( "Error while creating SSL context: %s" % str( e ) )
def generateServerInfo( self, kwargs ):
infoDict = { 'clientMode' : False, 'timeout' : 30 }
for key in kwargs.keys():
infoDict[ key ] = kwargs[ key ]
try:
return S_OK( SocketInfo( infoDict ) )
except Exception, e:
return S_ERROR( str( e ) )
def __socketConnect( self, hostAddress, timeout, retries = 2 ):
addrs = socket.getaddrinfo(hostAddress[0], hostAddress[1], 0, socket.SOCK_STREAM)
errs = []
for a in [a for a in addrs if a[1] == socket.AF_INET ]:
res = self.__sockConnect( a[0], a[1], timeout, retries )
if res[ 'OK' ]:
return res
else:
errs.append( res[ 'Message' ] )
for a in [a for a in addrs if a[1] == socket.AF_INET6 ]:
res = self.__sockConnect( a[0], a[1], timeout, retries )
if res[ 'OK' ]:
return res
else:
errs.append( res[ 'Message' ] )
return S_ERROR( ", ".join( errs ) )
def __sockConnect( self, hostAddress, sockType, timeout, retries ):
osSocket = socket.socket( sockType, socket.SOCK_STREAM )
#osSocket.setblocking( 0 )
if timeout:
osSocket.settimeout( 5 )
try:
osSocket.connect( hostAddress )
except socket.error , e:
if e.args[0] == "timed out":
osSocket.close()
if retries:
return self.__sockConnect( hostAddress, sockType, timeout, retries - 1 )
else:
return S_ERROR( "Can't connect: %s" % str( e ) )
if e.args[0] not in ( 114, 115 ):
return S_ERROR( "Can't connect: %s" % str( e ) )
#Connect in progress
oL = select.select( [], [ osSocket ], [], timeout )[1]
if len( oL ) == 0:
osSocket.close()
return S_ERROR( "Connection timeout" )
errno = osSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR )
if errno != 0:
return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
return S_OK( osSocket )
def __connect( self, socketInfo, hostAddress ):
#Connect baby!
result = self.__socketConnect( hostAddress, socketInfo.infoDict[ 'timeout' ] )
if not result[ 'OK' ]:
return result
osSocket = result[ 'Value' ]
#SSL MAGIC
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
#Generate sessionId
sessionHash = md5.md5()
sessionHash.update( str( hostAddress ) )
sessionHash.update( "|%s" % str( socketInfo.getLocalCredentialsLocation() ) )
for key in ( 'proxyLocation', 'proxyString' ):
if key in socketInfo.infoDict:
sessionHash.update( "|%s" % str( socketInfo.infoDict[ key ] ) )
if 'proxyChain' in socketInfo.infoDict:
sessionHash.update( "|%s" % socketInfo.infoDict[ 'proxyChain' ].dumpAllToString()[ 'Value' ] )
sessionId = sessionHash.hexdigest()
socketInfo.sslContext.set_session_id( str( hash( sessionId ) ) )
socketInfo.setSSLSocket( sslSocket )
if gSessionManager.isValid( sessionId ):
sslSocket.set_session( gSessionManager.get( sessionId ) )
#Set the real timeout
if socketInfo.infoDict[ 'timeout' ]:
sslSocket.settimeout( socketInfo.infoDict[ 'timeout' ] )
#Connected!
return S_OK( sslSocket )
def getSocket( self, hostAddress, **kwargs ):
hostName = hostAddress[0]
retVal = self.generateClientInfo( hostName, kwargs )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
retVal = Network.getIPsForHostName( hostName )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not resolve %s: %s" % ( hostName, retVal[ 'Message' ] ) )
ipList = List.randomize( retVal[ 'Value' ] )
for i in range( 3 ):
connected = False
errorsList = []
for ip in ipList :
ipAddress = ( ip, hostAddress[1] )
retVal = self.__connect( socketInfo, ipAddress )
if retVal[ 'OK' ]:
sslSocket = retVal[ 'Value' ]
connected = True
break
errorsList.append( "%s: %s" % ( ipAddress, retVal[ 'Message' ] ) )
if not connected:
return S_ERROR( "Could not connect to %s: %s" % ( hostAddress, "," .join( [ e for e in errorsList ] ) ) )
retVal = socketInfo.doClientHandshake()
if retVal[ 'OK' ]:
#Everything went ok. Don't need to retry
break
#Did the auth or the connection fail?
if not retVal['OK']:
return retVal
if 'enableSessions' in kwargs and kwargs[ 'enableSessions' ]:
sessionId = hash( hostAddress )
gSessionManager.set( sessionId, sslSocket.get_session() )
return S_OK( socketInfo )
def getListeningSocket( self, hostAddress, listeningQueueSize = 5, reuseAddress = True, **kwargs ):
osSocket = socket.socket( socket.AF_INET6, socket.SOCK_STREAM )
if reuseAddress:
osSocket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
retVal = self.generateServerInfo( kwargs )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
sslSocket.bind( hostAddress )
sslSocket.listen( listeningQueueSize )
socketInfo.setSSLSocket( sslSocket )
return S_OK( socketInfo )
def renewServerContext( self, origSocketInfo ):
retVal = self.generateServerInfo( origSocketInfo.infoDict )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
osSocket = origSocketInfo.getSSLSocket().get_socket()
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
socketInfo.setSSLSocket( sslSocket )
return S_OK( socketInfo )
gSocketInfoFactory = SocketInfoFactory()
|
rajanandakumar/DIRAC
|
Core/DISET/private/Transports/SSL/SocketInfoFactory.py
|
Python
|
gpl-3.0
| 6,721
|
[
"DIRAC"
] |
5c7683957ac2d8b583c6d75c33b73e49f5a50da9e26d8cd1b533c91e7ac5f364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This example demonstrates the usage of the vtNamedColor class.
"""
from __future__ import print_function
import vtk
def main():
nc = vtk.vtkNamedColors()
# We can print out the variables.
# The color name and RGBA values are displayed.
print(nc)
# Here we just print out the colors and any
# synonyms.
PrintColors(nc)
PrintSynonyms(nc)
"""
Create a cone, contour it using the banded contour filter and
color it with the primary additive and subtractive colors.
"""
# Create a cone
coneSource = vtk.vtkConeSource()
coneSource.SetCenter(0.0, 0.0, 0.0)
coneSource.SetRadius(5.0)
coneSource.SetHeight(10)
coneSource.SetDirection(0, 1, 0)
coneSource.SetResolution(6)
coneSource.Update()
bounds = [1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
coneSource.GetOutput().GetBounds(bounds)
elevation = vtk.vtkElevationFilter()
elevation.SetInputConnection(coneSource.GetOutputPort())
elevation.SetLowPoint(0, bounds[2], 0)
elevation.SetHighPoint(0, bounds[3], 0)
bcf = vtk.vtkBandedPolyDataContourFilter()
bcf.SetInputConnection(elevation.GetOutputPort())
bcf.SetScalarModeToValue()
bcf.GenerateContourEdgesOn()
bcf.GenerateValues(7, elevation.GetScalarRange())
# Test setting and getting a color here.
# We are also modifying alpha.
# Convert to a list so that
# SetColor(name,rgba) works.
rgba = list(nc.GetColor4d("Red"))
rgba[3] = 0.5
nc.SetColor("My Red", rgba)
# Does "My Red" match anything?
match = FindSynonyms(nc, "My Red")
print("Matching colors to My Red:", ', '.join(match))
# Build a simple lookup table of
# primary additive and subtractive colors.
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(7)
lut.SetTableValue(0, nc.GetColor4d("My Red"))
# Let's make the dark green one partially transparent.
rgba = nc.GetColor4d("Lime")
rgba[3] = 0.3
lut.SetTableValue(1, rgba)
lut.SetTableValue(2, nc.GetColor4d("Blue"))
lut.SetTableValue(3, nc.GetColor4d("Cyan"))
lut.SetTableValue(4, nc.GetColor4d("Magenta"))
lut.SetTableValue(5, nc.GetColor4d("Yellow"))
lut.SetTableValue(6, nc.GetColor4d("White"))
lut.SetTableRange(elevation.GetScalarRange())
lut.Build()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(bcf.GetOutputPort())
mapper.SetLookupTable(lut)
mapper.SetScalarModeToUseCellData()
contourLineMapper = vtk.vtkPolyDataMapper()
contourLineMapper.SetInputData(bcf.GetContourEdgesOutput())
contourLineMapper.SetScalarRange(elevation.GetScalarRange())
contourLineMapper.SetResolveCoincidentTopologyToPolygonOffset()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
contourLineActor = vtk.vtkActor()
actor.SetMapper(mapper)
contourLineActor.SetMapper(contourLineMapper)
contourLineActor.GetProperty().SetColor(
nc.GetColor3d("black"))
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.AddActor(contourLineActor)
renderer.SetBackground2(nc.GetColor3d('RoyalBlue'))
renderer.SetBackground(nc.GetColor3d('MistyRose'))
renderer.GradientBackgroundOn()
renderWindow.SetSize(600, 600)
renderWindow.Render()
renderWindow.SetWindowName('NamedColors')
renderWindow.Render()
renderWindow.Render()
renderWindowInteractor.Start()
def FindSynonyms(nc, colorName):
"""
Find any synonyms for a specified color.
:param: nc: The vtkNamedColor class
:param: colorName: the name of the color to find the synonyms for.
:return: The synonyms for colorName.
"""
availableColors = nc.GetColorNames()
# Make a list
availableColors = availableColors.split('\n')
synonyms = []
# We use lower case for comparison and
# just the red, green, and blue components
# of the color.
myColor = nc.GetColor3ub(colorName)
for color in availableColors:
rgb = nc.GetColor3ub(color)
if list(myColor) == list(rgb):
synonyms.append(color)
return synonyms
def PrintColors(nc):
colorNames = nc.GetColorNames().split('\n')
print("There are", len(colorNames), "colors:")
max_str_len = len(max(colorNames, key=len))
n = 0
s = ''
for p in colorNames:
n += 1
if n % 5 == 0:
s += '{:s}\n'.format(p)
else:
s += '{:s} '.format(p.ljust(max_str_len))
s = s.strip() + '\n'
print(s)
def PrintSynonyms(nc):
syn = nc.GetSynonyms().split('\n\n')
print("There are", len(syn), "synonyms:")
synonyms = []
for ele in syn:
synonyms.append(ele.split('\n'))
max_str_len = 0
for p in synonyms:
max_len = len(max(p, key=len))
if max_len > max_str_len:
max_str_len = max_len
s = ''
for p in synonyms:
n = 0
for q in p:
n += 1
if n < len(p):
s += '{:s} '.format(q.ljust(max_str_len))
else:
s += '{:s}\n'.format(q)
s = s.strip() + '\n'
print(s)
if __name__ == "__main__":
main()
|
lorensen/VTKExamples
|
src/Python/Visualization/NamedColors.py
|
Python
|
apache-2.0
| 5,382
|
[
"VTK"
] |
4fc7fabc387f420c618097ac68391041d685f3fa175eaf858900f907338816a9
|
# psf.py
import numpy as np
from skimage.util import random_noise
from astropy.io import fits
def add_gaussnoise(img, noise_sigma):
"""
add iid gaussian noise on image
Params
------
img
noise_sigma
Return
------
img
"""
if noise_sigma == 0:
result = img
else:
result = img+random_noise(np.zeros(img.shape), mode='gaussian',var=noise_sigma**2,clip=False)
return result
def bin_img(img, binsize=2):
"""
bin the image by taking the mean average of (binsize * binsize) blocks. The resulting image is binsize times smaller than the original in each dimension. If the img size is not divisible by the binsize, the remainder is cut off and ignored.
Params
------
img
binsize = 2 (int)
Return
------
img
"""
nx, ny = img.shape
# sanity check
for n in [nx, ny]:
if n%binsize != 0:
print("[simtools] image size not divisible by binsize. The remainder is ignored. ")
# raise Exception("[simtools] image size is not divisible by binsize")
nx_b, ny_b = nx // binsize, ny // binsize
img_b = np.zeros([nx_b, ny_b])
for i in range(nx_b):
for j in range(ny_b):
img_b[i, j] = np.average(img[i*binsize:(i+1)*binsize, j*binsize:(j+1)*binsize])
return img_b
|
aileisun/bubbleimg
|
bubbleimg/imgsim/simtools.py
|
Python
|
mit
| 1,205
|
[
"Gaussian"
] |
bbb98c14961bb5679ddbaf5b1cfab574ec545c00623c4b0f93f32d3851cce373
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements input and output processing from Gaussian.
"""
__author__ = 'Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import re
import numpy as np
import warnings
from pymatgen.core.operations import SymmOp
from pymatgen.core import Element, Molecule, Composition
from monty.io import zopen
from pymatgen.util.coord_utils import get_angle
import pymatgen.core.physical_constants as cst
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile("^([sS][cC][rR][fF])\s*=\s*(.+)")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif "#" in tok:
# does not store # in route to avoid error in input
dieze_tag = tok
continue
else:
d = tok.split("=")
v = None if len(d) == 1 else d[1]
route_params[d[0]] = v
return functional, basis_set, route_params, dieze_tag
class GaussianInput(object):
"""
An object representing a Gaussian input file.
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
#Commonly used regex patterns
zmat_patt = re.compile("^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
xyz_patt = re.compile("^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, mol, charge=None, spin_multiplicity=None, title=None,
functional="HF", basis_set="6-31G(d)", route_parameters=None,
input_parameters=None, link0_parameters=None, dieze_tag="#P",
gen_basis=None):
self._mol = mol
self.charge = charge if charge is not None else mol.charge
nelectrons = - self.charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.title = title if title else self._mol.composition.formula
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#P"
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile("^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1)] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput.xyz_patt.match(l):
m = GaussianInput.xyz_patt.match(l)
species.append(m.group(1))
toks = re.split("[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput.zmat_patt.match(l):
zmode = True
toks = re.split("[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub("\d", "", sp_str)
return sp.capitalize()
species = [parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile("^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1)] = m.group(2)
route_patt = re.compile("^#[sSpPnN]*.*")
route = None
for i, l in enumerate(lines):
if route_patt.match(l):
route = l
route_index = i
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split("[\s,]", lines[route_index + ind])
charge = int(toks[0])
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput.parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set, route_parameters=route_paras,
input_parameters=input_paras,link0_parameters=link0_dict,
dieze_tag=dieze_tag)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i)
for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append("{}".format(site.specie))
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append("{} {} B{}".format(self._mol[i].specie,
nn[0] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append("{} {} B{} {} A{}".format(self._mol[i].specie,
nn[0] + 1, i,
nn[1] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append("{} {} B{} {} A{} {} D{}"
.format(self._mol[i].specie, nn[0] + 1, i,
nn[1] + 1, i, nn[2] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
outputvar.append("D{}={:.6f}".format(i, dih))
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
outs = []
to_s = lambda x: "%0.6f" % x
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string, " ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: whe cart_coords sets to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = ["{}={}".format(k, v) if v else k
for k, v in sorted(para.items())]
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
output.append("{diez} {func}/{bset} {route}"
.format(diez=self.dieze_tag, func=self.functional,
bset=self.basis_set,
route=para_dict_to_string(self.route_parameters))
)
output.append("")
output.append(self.title)
output.append("")
output.append("{} {}".format(self.charge, self.spin_multiplicity))
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
else:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append("{:s}\n".format(self.gen_basis))
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename,cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag}
@classmethod
def from_dict(cls, d):
return GaussianInput(mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"])
class GaussianOutput(object):
"""
Parser for Gaussian output files.
Args:
filename: Filename of Gaussian output file.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
The frequencies and normal modes.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_mult
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
return self.energies[-1]
@property
def final_structure(self):
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(" \(Enter \S+l101\.exe\)")
route_patt = re.compile(" #[pPnNtT]*.*")
link0_patt = re.compile("^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile("Charge\s+=\s*([-\\d]+)\s+"
"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile("([0-9]+)\s+basis functions")
pcm_patt = re.compile("Polarizable Continuum Model")
stat_type_patt = re.compile("imaginary frequencies")
scf_patt = re.compile("E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile("EUMP2\s*=\s*(.*)")
oniom_patt = re.compile("ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile("(Normal|Error) termination")
error_patt = re.compile(
"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(
"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(
'^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)')
end_mulliken_patt = re.compile(
'(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)')
std_orientation_patt = re.compile("Standard orientation")
end_patt = re.compile("--+")
orbital_patt = re.compile("Alpha\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile("(Zero-point|Thermal) correction(.*)="
"\s+([\d\.-]+)")
forces_on_patt = re.compile(
"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile("Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(
"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(
"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
freq_patt = re.compile("Frequencies\s--\s+(.*)")
normal_mode_patt = re.compile(
"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.structures = []
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
coord_txt = []
read_coord = 0
read_mulliken = False
orbitals_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line):
params = read_route_line(line)
self.functional = params[0]
self.basis_set = params[1]
self.route = params[2]
self.dieze_tag = params[3]
parse_stage = 1
elif parse_stage == 1:
if charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_mult = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "FREQ" in self.route and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
if not end_patt.search(line):
coord_txt.append(line)
else:
read_coord = (read_coord + 1) % 4
if not read_coord:
sp = []
coords = []
for l in coord_txt[2:]:
toks = l.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(i) for i in toks[3:6]])
self.structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v) for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
elif parse_freq:
m = freq_patt.search(line)
if m:
values = [float(_v) for _v in m.groups()[0].split()]
for value in values:
frequencies.append([value, []])
elif normal_mode_patt.search(line):
values = [float(_v) for _v in line.split()[2:]]
n = int(len(values) / 3)
for i in range(0, len(values), 3):
j = -n + int(i / 3)
frequencies[j][1].extend(values[i:i+3])
elif line.find("-------------------") != -1:
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization "
"error",
"Convergence failure": "SCF convergence error"
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif (not num_basis_found) and \
num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "FREQ" in self.route and "OPT" in self.route and \
stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D",
"E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
coord_txt = []
read_coord = 1
elif orbital_patt.search(line):
orbitals_txt.append(line)
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dict = {int(m.group(1)): [m.group(2), float(m.group(3))]}
mulliken_charges.update(dict)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
if not terminated:
#raise IOError("Bad Gaussian output file.")
warnings.warn("\n" + self.filename + \
": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile("(Dispersion|Cavitation|Repulsion) energy"
"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile("with all non electrostatic terms\s+\S+\s+"
"=\s+(\S*)")
parameter_patt = re.compile("(Eps|Numeral density|RSolv|Eps"
"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm['{} energy'.format(m.group(1))] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm['Total energy'] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure)}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_mult
vin = {"route": self.route, "functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections
}
d['output'] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
""" return a list of float from a list of string """
return [float(v) for v in l]
scan_patt = re.compile("^\sSummary of the potential surface scan:")
optscan_patt = re.compile("^\sSummary of Optimized Potential Surface Scan")
float_patt = re.compile("\s*([+-]?\d+\.\d+)")
# data dict return
data = {"energies": list(), "coords": dict()}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while not re.search("(^\s+(\d+)|^\s-+)", line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if re.search("^\s-+", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: list() for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search("^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i+1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy / eV")
e_min = min(d["energies"])
y = [(e - e_min) * cst.HARTREE_TO_ELECTRON_VOLT for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
float_patt = re.compile("\s*([+-]?\d+\.\d+)")
transitions = list()
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search("^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search("^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible spectra. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the spectra is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "spectra": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (spectra).
A matplotlib plot.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
from matplotlib.mlab import normpdf
plt = get_publication_quality_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min([val[0] for val in transitions]) - 5.0 * sigma
maxval = max([val[0] for val in transitions]) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9 for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * normpdf(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "spectra": spectre}
# plot transitions as vlines
plt.vlines([val[1] for val in transitions], \
0., \
[val[2] for val in transitions], \
color="blue", \
label="transitions",
linewidth=2)
plt.xlabel("$\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf",
sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(self, filename, mol=None, charge=None,
spin_multiplicity=None, title=None, functional=None,
basis_set=None, route_parameters=None, input_parameters=None,
link0_parameters=None, dieze_tag=None, cart_coords=False):
"""
Write a new input file using by default the last geometry read in the output
file and with the same calculation parameters. Arguments are the same as
GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if not charge:
charge = self.charge
if not spin_multiplicity:
spin_multiplicity = self.spin_mult
if not title:
title = "restart "
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
gauinp = GaussianInput(mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag)
gauinp.write_file(filename, cart_coords=cart_coords)
return gauinp
|
migueldiascosta/pymatgen
|
pymatgen/io/gaussian.py
|
Python
|
mit
| 42,348
|
[
"Gaussian",
"pymatgen"
] |
06dc104e55879021cd2ca80a887266478929ab2d58d4df50431e0bc4a4a2cd94
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Attempts to catch Python based import errors and provides possible solutions."""
# NumPy import
try:
import numpy as np
except:
msg = """
NumPy is a runtime requirement for Psi4. Please install NumPy to proceed.
NumPy installation with a package manager can be accomplished by the following lines:
- conda install numpy
- sudo yum install numpy
- sudo apt-get install python-numpy
- brew install numpy
"""
raise ImportError(msg)
# Import plugin add-ons here for now
try:
import csx4psi
except ImportError:
pass
try:
from . import pasture
except ImportError:
pass
|
CDSherrill/psi4
|
psi4/driver/dependency_check.py
|
Python
|
lgpl-3.0
| 1,556
|
[
"Psi4"
] |
ee4991967ca95c91bfbb73dd8cf9d22d9416307555316aa212bae432d852f4bc
|
import networkx as nx
from random import *
import datetime
# Import the Django stuff
import sys
from django.core.management import setup_environ
sys.path.append(('/'.join(__file__.split('/')[:-2])) + '/server')
import settings
setup_environ(settings)
import wiki.models
class Page:
def __init__(self, name, URL):
self.name = name
self.url = URL
self.p_landing = 0.0
self.data = []
def __str__(self):
return self.url
class User:
def __init__(self, visit_rate, visit_time_unit, last_visit_time):
self.visits = []
self.visit_rate = visit_rate
self.visit_time_unit = visit_time_unit
self.last_visit_time = last_visit_time
self.next_visit_time = None
def perform_next_visit(self, site):
visit = Visit(self.last_visit_time, self.next_visit_time, site.generate_click_trail())
self.visits.append(visit)
self.last_visit_time = self.next_visit_time
self.plan_next_visit()
return visit
def plan_next_visit(self):
# Don't sample.. just use the visit rate
time_delta = expovariate(self.visit_rate)
#time_delta = 1/float(self.visit_rate)
params = {self.visit_time_unit : time_delta}
if (self.next_visit_time == None):
self.next_visit_time = self.last_visit_time + datetime.timedelta(**params)
else:
self.next_visit_time += datetime.timedelta(**params)
def visits_to_json(self, baseurl):
visits = ",".join([visit.to_json(baseurl) for visit in self.visits])
return "[%s]" % (visits)
class Visit:
def __init__(self, last_time, this_time, click_trail):
self.last_time = last_time
self.this_time = this_time
self.click_trail = click_trail
def __str__(self):
return str(self.last_time) + ", " + str(self.this_time) + ", " + str(self.click_trail)
def to_json(self, baseurl):
return '{"time":"%s", "clicktrail":%s}' % (self.this_time, self.click_trail.to_json(baseurl))
class ClickTrail:
def __init__(self, path):
self.path = path
def __str__(self):
return str([page.name for page in self.path])
def to_json(self, baseurl):
comma_list = ",".join([('"%s%s"' % (baseurl, page.url)) for page in self.path])
return '[%s]' % (comma_list)
class Site:
def __init__(self, base_url):
self.landing_page = None
self.graph = None
self.base_url = base_url
def create_graph(self, pages, articles_per_page, prob_of_next, prob_of_leaving):
self.graph = nx.DiGraph()
# The main pages
main_pages = []
article_pages = []
END = Page("END", "END")
urlroot = self.base_url
prob_of_article = (1.0 - prob_of_next - prob_of_leaving) / articles_per_page
for i in range(pages):
page = Page("Page " + str(i), urlroot)
if i == 0:
page.p_landing = 1.0
self.graph.add_node(page)
main_pages.append(page)
for j in range(articles_per_page):
# 10 Articles per page
num = i * articles_per_page + i + 1
article = Page("Link " + str(num), urlroot)
self.graph.add_node(article)
self.graph.add_edge(page, article, weight=prob_of_article)
article_pages.append(article)
# Set ending probabilities
for a in article_pages:
self.graph.add_edge(a, END, weight=1.0)
for p in main_pages:
self.graph.add_edge(p, END, weight=prob_of_leaving)
# Set transition prob from main page to main page
for i in range(len(main_pages) - 1):
p1 = main_pages[i]
p2 = main_pages[i+1]
self.graph.add_edge(p1, p2, weight=prob_of_next)
def sample_start_page(self):
r = random()
sum = 0.0
for page in self.graph:
sum += page.p_landing
if sum > r:
return page
return None
def sample_click_trail(self, landing_page):
click_trail = []
cur_page = landing_page
while cur_page.name != "END":
click_trail.append(cur_page)
r = random()
sum = 0.0
for neighbor in self.graph[cur_page]:
link = self.graph[cur_page][neighbor]
sum += link['weight']
if sum > r:
cur_page = neighbor
break
return ClickTrail(click_trail)
def generate_click_trail(self):
start = self.sample_start_page()
trail = self.sample_click_trail(start)
return trail
class Blog(Site):
def __init__(self, base_url):
Site.__init__(self, base_url)
self.create_graph(10, 10, 0.3, 0.4)
class OnePageBlog(Site):
def __init__(self, base_url):
Site.__init__(self, base_url)
self.create_graph(10, 10, 0.0, 1.0)
class Wiki(Site):
def __init__(self, base_url, p_leave):
Site.__init__(self, base_url)
self.p_leave = p_leave
self.build_site()
def build_site(self):
pages = wiki.models.Page.objects.all().only('id', 'access_probability')
self.graph = nx.DiGraph()
page_cache = {}
END = Page("END", "END")
self.graph.add_node(END)
# Add the pages
for page in pages:
page_node = Page(page.title, self.base_url + '?pageid=' + str(page.id))
page_node.p_landing = page.access_probability
page_cache[page.id] = page_node
self.graph.add_node(page_node)
# Add the nodes
for page in pages:
page_node = page_cache[page.id]
linksum = 0
for other in page.outlinks.all().only('id', 'access_probability'):
linksum += other.access_probability
for other in page.outlinks.all().only('id', 'access_probability'):
other_node = page_cache[other.id]
self.graph.add_edge(page_node, other_node, weight=((other.access_probability / float(linksum)) * (1.0 - self.p_leave)))
self.graph.add_edge(page_node, END, weight=self.p_leave)
|
eob/synckit-research
|
perf/gen_model.py
|
Python
|
bsd-3-clause
| 6,388
|
[
"VisIt"
] |
09f4ebf410f36fabe379444fdbfa184272718d898d5266334f770e5c52dce9f9
|
import ast
from collections import defaultdict
from contextlib import ExitStack, contextmanager
from functools import singledispatch
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from breakfast.position import Position
from breakfast.source import Source
from tests import make_source
QualifiedName = Tuple[str, ...]
class Node:
def __init__(self, parent: Optional["Node"]):
self.parent = parent
self.children: Dict[str, "Node"] = defaultdict(lambda: Node(parent=self))
self.occurrences: Set[Position] = set()
self.is_class = False
def add_occurrence(self, occurrence: Any):
self.occurrences.add(occurrence)
def __getitem__(self, name: str) -> "Node":
return self.children[name]
def __contains__(self, name: str) -> bool:
return name in self.children
def alias(self, other: "Node") -> None:
for name, value in other.children.items():
if name not in self.children:
self.children[name] = value
else:
self.children[name].alias(value)
other.children = self.children
self.occurrences |= other.occurrences
other.occurrences = self.occurrences
def flatten(
self,
prefix: Tuple[str, ...] = tuple(),
seen: Optional[Set[Position]] = None,
) -> Dict[Tuple[str, ...], List[Tuple[int, int]]]:
if not seen:
seen = set()
result = {}
next_values = []
for key, value in self.children.items():
new_prefix = prefix + (key,)
if value.occurrences:
occurrence = next(iter(value.occurrences))
if occurrence in seen:
continue
positions = [(o.row, o.column) for o in value.occurrences]
result[new_prefix] = positions
seen |= value.occurrences
next_values.append((new_prefix, value))
for new_prefix, value in next_values:
result.update(value.flatten(prefix=new_prefix, seen=seen))
return result
class State:
def __init__(self, position: Position):
self.position = position
self.root = Node(parent=None)
self.current_node = self.root
self.current_path: QualifiedName = tuple()
self.lookup_scopes = [self.root]
self.found: Optional[Node] = None
@contextmanager
def scope(self, name: str, lookup_scope: bool = False, is_class: bool = False):
previous_node = self.current_node
self.current_node = self.current_node[name]
self.current_node.is_class = is_class
if lookup_scope:
self.lookup_scopes.append(self.current_node)
self.current_path += (name,)
yield
self.current_node = previous_node
self.current_path = self.current_path[:-1]
if lookup_scope:
self.lookup_scopes.pop()
def add_occurrence(self, *, position: Optional[Position] = None) -> None:
if position:
self.current_node.occurrences.add(position)
if position == self.position:
self.found = self.current_node
print(
f"{self.current_path}: {[(o.row,o.column) for o in self.current_node.occurrences]}"
)
def alias(self, path: QualifiedName) -> None:
other_node = self.current_node
for name in path:
if name == "..":
if other_node.parent:
other_node = other_node.parent
else:
other_node = other_node[name]
self.current_node.alias(other_node)
def node_position(
node: ast.AST, source: Source, row_offset=0, column_offset=0
) -> Position:
return source.position(
row=(node.lineno - 1) + row_offset, column=node.col_offset + column_offset
)
def generic_visit(node: ast.AST, source: Source, state: State) -> None:
"""Called if no explicit visitor function exists for a node.
Adapted from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
visit(item, source, state)
elif isinstance(value, ast.AST):
visit(value, source, state)
@singledispatch
def visit(node: ast.AST, source: Source, state: State) -> None:
generic_visit(node, source, state)
@visit.register
def visit_module(node: ast.Module, source: Source, state: State) -> None:
with state.scope(source.module_name):
with state.scope(".", lookup_scope=True):
generic_visit(node, source, state)
@visit.register
def visit_name(node: ast.Name, source: Source, state: State) -> None:
position = node_position(node, source)
if isinstance(node.ctx, ast.Store):
with state.scope(node.id):
state.add_occurrence(position=position)
else:
if node.id not in state.current_node:
for scope in state.lookup_scopes[::-1]:
if node.id in scope or scope is state.root:
scope[node.id].alias(state.current_node[node.id])
break
with state.scope(node.id):
state.add_occurrence(position=node_position(node, source))
@singledispatch
def names_for(node: ast.AST) -> QualifiedName: # pylint: disable= unused-argument
return ()
@names_for.register
def names_for_name(node: ast.Name) -> QualifiedName:
return (node.id,)
@names_for.register
def names_for_attribute(node: ast.Attribute) -> QualifiedName:
return names_for(node.value) + (node.attr,)
@names_for.register
def names_for_call(node: ast.Call) -> QualifiedName:
return names_for(node.func) + ("()",)
def get_names(value: ast.AST) -> List[QualifiedName]:
if isinstance(value, ast.Tuple):
return [names_for(v) for v in value.elts]
return [names_for(value)]
@visit.register
def visit_assign(node: ast.Assign, source: Source, state: State) -> None:
for node_target in node.targets:
visit(node_target, source, state)
visit(node.value, source, state)
target_names = get_names(node.targets[0])
value_names = get_names(node.value)
for target, value in zip(target_names, value_names):
if target and value:
path: QualifiedName = ("..",)
with ExitStack() as stack:
for name in target:
stack.enter_context(state.scope(name))
stack.enter_context(state.scope("."))
path += ("..",)
state.alias(path + value + (".",))
def is_static_method(node: ast.FunctionDef) -> bool:
return any(
n.id == "staticmethod" for n in node.decorator_list if isinstance(n, ast.Name)
)
@visit.register
def visit_function_definition(
node: ast.FunctionDef, source: Source, state: State
) -> None:
is_method = state.lookup_scopes[-1] and state.lookup_scopes[-1].is_class
position = node_position(node, source, column_offset=len("def "))
with state.scope(node.name):
state.add_occurrence(position=position)
with state.scope("()"):
for i, arg in enumerate(node.args.args):
position = node_position(arg, source)
with state.scope(arg.arg):
state.add_occurrence(position=position)
if i == 0 and is_method and not is_static_method(node):
with state.scope("."):
state.alias(("..", "..", "..", ".."))
generic_visit(node, source, state)
@visit.register
def visit_class(node: ast.ClassDef, source: Source, state: State) -> None:
position = node_position(node, source, column_offset=len("class "))
for base in node.bases:
visit(base, source, state)
with state.scope(node.name, lookup_scope=True, is_class=True):
state.add_occurrence(position=position)
with state.scope("()"):
with state.scope("."):
state.alias(("..", "..", "."))
for base in node.bases:
state.alias(("..", "..", "..") + names_from(base) + ("()", "."))
for statement in node.body:
visit(statement, source, state)
@visit.register
def visit_call(node: ast.Call, source: Source, state: State) -> None:
call_position = node_position(node, source)
for arg in node.args:
visit(arg, source, state)
visit(node.func, source, state)
names = names_from(node.func)
with ExitStack() as stack:
if names:
stack.enter_context(state.scope(names[0]))
for name in names[1:]:
stack.enter_context(state.scope(name))
stack.enter_context(state.scope("()"))
for keyword in node.keywords:
if not keyword.arg:
continue
position = source.find_after(keyword.arg, call_position)
with state.scope(keyword.arg):
state.add_occurrence(position=position)
@singledispatch
def names_from(node: ast.AST) -> QualifiedName: # pylint: disable=unused-argument
return ()
@names_from.register
def name_names(node: ast.Name) -> QualifiedName:
return (node.id,)
@names_from.register
def attribute_names(node: ast.Attribute) -> QualifiedName:
return names_from(node.value) + (".", node.attr)
@names_from.register
def call_names(node: ast.Call) -> QualifiedName:
names = names_from(node.func)
return names
@visit.register
def visit_attribute(node: ast.Attribute, source: Source, state: State) -> None:
visit(node.value, source, state)
position = node_position(node, source)
names = names_from(node.value)
with ExitStack() as stack:
for name in names:
position = source.find_after(name, position)
stack.enter_context(state.scope(name))
stack.enter_context(state.scope("."))
position = source.find_after(node.attr, position)
stack.enter_context(state.scope(node.attr))
state.add_occurrence(position=position)
def visit_comp(
node: Union[ast.DictComp, ast.ListComp, ast.SetComp, ast.GeneratorExp],
source: Source,
state: State,
*sub_nodes,
) -> None:
position = node_position(node, source)
name = f"{type(node)}-{position.row},{position.column}"
with state.scope(name):
for generator in node.generators:
visit(generator.target, source, state)
visit(generator.iter, source, state)
for if_node in generator.ifs:
visit(if_node, source, state)
for sub_node in sub_nodes:
visit(sub_node, source, state)
@visit.register
def visit_dict_comp(node: ast.DictComp, source: Source, state: State) -> None:
visit_comp(node, source, state, node.key, node.value)
@visit.register
def visit_list_comp(node: ast.ListComp, source: Source, state: State) -> None:
visit_comp(node, source, state, node.elt)
@visit.register
def visit_set_comp(node: ast.SetComp, source: Source, state: State) -> None:
visit_comp(node, source, state, node.elt)
@visit.register
def visit_generator_exp(node: ast.GeneratorExp, source: Source, state: State) -> None:
visit_comp(node, source, state, node.elt)
def all_occurrence_positions(
position: Position,
) -> Iterable[Position]:
source = position.source
state = State(position)
visit(source.get_ast(), source=source, state=state)
if state.found:
return sorted(state.found.occurrences)
return []
def test_distinguishes_local_variables_from_global():
source = make_source(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
"""
)
position = source.position(row=2, column=4)
assert all_occurrence_positions(position) == [
source.position(row=2, column=4),
source.position(row=4, column=13),
source.position(row=5, column=8),
]
def test_finds_non_local_variable():
source = make_source(
"""
old = 12
def fun():
result = old + 1
return result
old = 20
"""
)
position = source.position(1, 0)
assert all_occurrence_positions(position) == [
Position(source, 1, 0),
Position(source, 4, 13),
Position(source, 7, 0),
]
def test_does_not_rename_random_attributes():
source = make_source(
"""
import os
path = os.path.dirname(__file__)
"""
)
position = source.position(row=3, column=0)
assert all_occurrence_positions(position) == [source.position(row=3, column=0)]
def test_finds_parameter():
source = make_source(
"""
def fun(old=1):
print(old)
old = 8
fun(old=old)
"""
)
assert all_occurrence_positions(source.position(1, 8)) == [
source.position(1, 8),
source.position(2, 10),
source.position(5, 4),
]
def test_finds_function():
source = make_source(
"""
def fun_old():
return 'result'
result = fun_old()
"""
)
assert [source.position(1, 4), source.position(3, 9)] == all_occurrence_positions(
source.position(1, 4)
)
def test_finds_class():
source = make_source(
"""
class OldClass:
pass
instance = OldClass()
"""
)
assert [source.position(1, 6), source.position(4, 11)] == all_occurrence_positions(
source.position(1, 6)
)
def test_finds_method_name():
source = make_source(
"""
class A:
def old(self):
pass
unbound = A.old
"""
)
position = source.position(row=3, column=8)
assert all_occurrence_positions(position) == [
source.position(row=3, column=8),
source.position(row=6, column=12),
]
def test_finds_passed_argument():
source = make_source(
"""
old = 2
def fun(arg, arg2):
return arg + arg2
fun(1, old)
"""
)
assert [source.position(1, 0), source.position(4, 7)] == all_occurrence_positions(
source.position(1, 0)
)
def test_finds_parameter_with_unusual_indentation():
source = make_source(
"""
def fun(arg, arg2):
return arg + arg2
fun(
arg=\\
1,
arg2=2)
"""
)
assert [
source.position(1, 8),
source.position(2, 11),
source.position(4, 4),
] == all_occurrence_positions(source.position(1, 8))
def test_does_not_find_method_of_unrelated_class():
source = make_source(
"""
class ClassThatShouldHaveMethodRenamed:
def old(self, arg):
pass
def foo(self):
self.old('whatever')
class UnrelatedClass:
def old(self, arg):
pass
def foo(self):
self.old('whatever')
a = ClassThatShouldHaveMethodRenamed()
a.old()
b = UnrelatedClass()
b.old()
"""
)
occurrences = all_occurrence_positions(source.position(3, 8))
assert [
source.position(3, 8),
source.position(7, 13),
source.position(20, 2),
] == occurrences
def test_finds_definition_from_call():
source = make_source(
"""
def old():
pass
def bar():
old()
"""
)
assert [source.position(1, 4), source.position(5, 4)] == all_occurrence_positions(
source.position(1, 4)
)
def test_finds_attribute_assignments():
source = make_source(
"""
class ClassName:
def __init__(self, property):
self.property = property
def get_property(self):
return self.property
"""
)
occurrences = all_occurrence_positions(source.position(4, 13))
assert [source.position(4, 13), source.position(7, 20)] == occurrences
def test_finds_dict_comprehension_variables():
source = make_source(
"""
old = 1
foo = {old: None for old in range(100) if old % 3}
old = 2
"""
)
position = source.position(row=2, column=21)
assert all_occurrence_positions(position) == [
source.position(row=2, column=7),
source.position(row=2, column=21),
source.position(row=2, column=42),
]
def test_finds_list_comprehension_variables():
source = make_source(
"""
old = 100
foo = [
old for old in range(100) if old % 3]
old = 200
"""
)
position = source.position(row=3, column=12)
assert all_occurrence_positions(position) == [
source.position(row=3, column=4),
source.position(row=3, column=12),
source.position(row=3, column=33),
]
def test_finds_set_comprehension_variables() -> None:
source = make_source(
"""
old = 100
foo = {old for old in range(100) if old % 3}
"""
)
position = source.position(row=2, column=15)
assert all_occurrence_positions(position) == [
source.position(row=2, column=7),
source.position(row=2, column=15),
source.position(row=2, column=36),
]
def test_finds_generator_comprehension_variables() -> None:
source = make_source(
"""
old = 100
foo = (old for old in range(100) if old % 3)
"""
)
position = source.position(row=2, column=15)
assert all_occurrence_positions(position) == [
source.position(row=2, column=7),
source.position(row=2, column=15),
source.position(row=2, column=36),
]
def test_finds_loop_variables():
source = make_source(
"""
old = None
for i, old in enumerate(['foo']):
print(i)
print(old)
print(old)
"""
)
position = source.position(row=1, column=0)
assert all_occurrence_positions(position) == [
source.position(row=1, column=0),
source.position(row=2, column=7),
source.position(row=4, column=10),
source.position(row=5, column=6),
]
def test_finds_tuple_unpack():
source = make_source(
"""
foo, old = 1, 2
print(old)
"""
)
position = source.position(row=1, column=5)
assert all_occurrence_positions(position) == [
source.position(1, 5),
source.position(2, 6),
]
def test_finds_superclasses():
source = make_source(
"""
class A:
def old(self):
pass
class B(A):
pass
b = B()
c = b
c.old()
"""
)
position = source.position(row=3, column=8)
assert all_occurrence_positions(position) == [
source.position(row=3, column=8),
source.position(row=11, column=2),
]
def test_recognizes_multiple_assignments():
source = make_source(
"""
class A:
def old(self):
pass
class B:
def old(self):
pass
foo, bar = A(), B()
foo.old()
bar.old()
"""
)
position = source.position(row=2, column=8)
assert all_occurrence_positions(position) == [
source.position(2, 8),
source.position(10, 4),
]
def test_finds_enclosing_scope_variable_from_comprehension():
source = make_source(
"""
old = 3
res = [foo for foo in range(100) if foo % old]
"""
)
position = source.position(row=1, column=0)
assert all_occurrence_positions(position) == [
source.position(1, 0),
source.position(2, 42),
]
def test_finds_static_method():
source = make_source(
"""
class A:
@staticmethod
def old(arg):
pass
a = A()
b = a.old('foo')
"""
)
position = source.position(row=4, column=8)
assert all_occurrence_positions(position) == [
source.position(4, 8),
source.position(8, 6),
]
def test_finds_method_after_call():
source = make_source(
"""
class A:
def old(arg):
pass
b = A().old('foo')
"""
)
position = source.position(row=3, column=8)
assert all_occurrence_positions(position) == [
source.position(3, 8),
source.position(6, 8),
]
def test_finds_argument():
source = make_source(
"""
class A:
def foo(self, arg):
print(arg)
def bar(self):
arg = "1"
self.foo(arg=arg)
"""
)
position = source.position(row=3, column=18)
assert all_occurrence_positions(position) == [
source.position(3, 18),
source.position(4, 14),
source.position(8, 17),
]
|
thisfred/breakfast
|
tests/test_attempt_12.py
|
Python
|
bsd-2-clause
| 21,250
|
[
"VisIt"
] |
689bb2c04344a8c9b67d999ac56c6d1679bba2638eb7d980d53f6ced4d168878
|
# Copyright (c) 2010 - 2014, AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import os
import os.path
import re
import string
from datetime import datetime
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
_re_package = re.compile(r'^\s*package\s+([A-Za-z\.]+)', re.M)
_re_public = re.compile(r'^public', re.M)
def parse_javadoc_file(fn):
"""
Return the package name of Java files with a public class. As
configured here, javadoc only generates documentation for public
classes.
"""
contents = open(fn, 'r').read()
pkg = _re_package.search(contents).group(1)
if _re_public.search(contents):
return pkg
def javadoc_emitter(source, target, env):
"""
Look in the source directory for all .java and .html files. Only
those .java files with public classes will be listed as a source
dependency.
The target is a single file: DOCS/index.html (always generated by
javadoc). I could not figure out how to get SCons to use a
directory as a target, therefore the use of the pseudo-builder
JavaDoc (and also to handle clean correctly).
"""
slist = []
for entry in source:
def visit(sl, dirname, names):
d = env.Dir(dirname)
for fn in names:
if os.path.splitext(fn)[1] in ['.java']:
f = d.File(fn)
f.attributes.javadoc_src = source
f.attributes.javadoc_sourcepath = entry.abspath
pkg = parse_javadoc_file(str(f))
if pkg:
f.attributes.javadoc_pkg = pkg
slist.append(f)
elif os.path.splitext(fn)[1] in ['.html']:
f = d.File(fn)
f.attributes.javadoc_src = source
f.attributes.javadoc_sourcepath = entry.abspath
if os.path.basename(str(f)) == 'overview.html':
f.attributes.javadoc_overview = '"' + env.File(str(f)).abspath + '"'
slist.append(f)
os.path.walk(entry.abspath, visit, slist)
slist = env.Flatten(slist)
tlist = [target[0].File('index.html')]
return tlist, slist
def javadoc_generator(source, target, env, for_signature):
javadoc_classpath = '-classpath \"%s\"' % (env['JAVACLASSPATH'])
javadoc_windowtitle = '-windowtitle \"%s\"' % (env['PROJECT_LONG_NAME'])
javadoc_doctitle = '-doctitle \"%s<br/><h3>%s</h3>\"' % (env['PROJECT_LONG_NAME'], env['PROJECT_NUMBER'])
javadoc_header = '-header \"<b>%s</b>\"' % (env['PROJECT_SHORT_NAME'])
try:
copyright = env['PROJECT_COPYRIGHT']
except KeyError:
copyright = "Copyright © 2010-2014 AllSeen Alliance.<br/><p>AllJoyn is a trademark of Qualcomm Innovation Center, Inc. AllJoyn is used here with permission to identify unmodified materials originating in the AllJoyn project.<br/>The AllJoyn open source project is hosted by the AllSeen Alliance.</p><b>THIS DOCUMENT AND ALL INFORMATION CONTAIN HEREIN ARE PROVIDED ON AN \"AS-IS\" BASIS WITHOUT WARRANTY OF ANY KIND</b>.<br/><b>MAY CONTAIN U.S. AND INTERNATIONAL EXPORT CONTROLLED INFORMATION</b>"
javadoc_bottom = '-bottom \"' + "<small>%s %s ($(%s$))<br/>%s<br/></small>" % (env['PROJECT_LONG_NAME'], env['PROJECT_NUMBER'], datetime.now().strftime('%a %b %d %H:%M:%S %Y'), copyright) + '\"'
javadoc_overview = ''
for s in source:
try:
javadoc_overview = '-overview ' + s.attributes.javadoc_overview
except AttributeError:
pass
javadoc_sourcepath = []
for s in source:
try:
javadoc_sourcepath.append(s.attributes.javadoc_sourcepath)
except AttributeError:
pass
javadoc_sourcepath = os.pathsep.join(set(javadoc_sourcepath))
javadoc_packages = []
for s in source:
try:
javadoc_packages.append(s.attributes.javadoc_pkg)
except AttributeError:
pass
javadoc_packages = ' '.join(set(javadoc_packages))
com = 'javadoc %s -use %s %s -quiet -public -noqualifier all %s %s %s -sourcepath "%s" -d ${TARGET.dir} %s' % (javadoc_classpath, javadoc_windowtitle, javadoc_doctitle, javadoc_header, javadoc_bottom, javadoc_overview, javadoc_sourcepath, javadoc_packages)
return com
def JavaDoc(env, target, source, *args, **kw):
"""
JavaDoc('docs', 'src') will call javadoc on all public .java files
under the 'src' directory. Package and private .java files are
ignored.
"""
apply(env.JavaDocBuilder, (target, source) + args, kw)
env.Clean(target, target)
return [env.Dir(target)]
def generate(env):
fs = SCons.Node.FS.get_default_fs()
javadoc_builder = SCons.Builder.Builder(
generator = javadoc_generator,
emitter = javadoc_emitter,
target_factory = fs.Dir,
source_factory = fs.Dir)
env.Append(BUILDERS = { 'JavaDocBuilder': javadoc_builder })
env.AddMethod(JavaDoc, 'JavaDoc')
def exists(env):
"""
Make sure javadoc exists.
"""
return env.Detect("javadoc")
|
cybertanyellow/alljoyn-core
|
build_core/tools/scons/javadoc.py
|
Python
|
isc
| 5,715
|
[
"VisIt"
] |
2308ee9a4ac3514c644731ba28ba825a5c86fe8737e521bf5e8f4d1b9e1e6be7
|
# Taking inspiration from https://realpython.com/fastapi-python-web-apis/
# run with
# uvicorn basic:app --reload
# Then visit http://127.0.0.1:8000/docs and http://127.0.0.1:8000/redoc
from fastapi import FastAPI
app = FastAPI()
@app.get("/") # $ routeSetup="/"
async def root(): # $ requestHandler
return {"message": "Hello World"} # $ HttpResponse
@app.get("/non-async") # $ routeSetup="/non-async"
def non_async(): # $ requestHandler
return {"message": "non-async"} # $ HttpResponse
@app.get(path="/kw-arg") # $ routeSetup="/kw-arg"
def kw_arg(): # $ requestHandler
return {"message": "kw arg"} # $ HttpResponse
@app.get("/foo/{foo_id}") # $ routeSetup="/foo/{foo_id}"
async def get_foo(foo_id: int): # $ requestHandler routedParameter=foo_id
# FastAPI does data validation (with `pydantic` PyPI package) under the hood based
# on the type annotation we did for `foo_id`, so it will auto-reject anything that's
# not an int.
return {"foo_id": foo_id} # $ HttpResponse
# this will work as query param, so `/bar?bar_id=123`
@app.get("/bar") # $ routeSetup="/bar"
async def get_bar(bar_id: int = 42): # $ requestHandler routedParameter=bar_id
return {"bar_id": bar_id} # $ HttpResponse
# The big deal is that FastAPI works so well together with pydantic, so you can do stuff like this
from typing import Optional
from pydantic import BaseModel
class Item(BaseModel):
name: str
price: float
is_offer: Optional[bool] = None
@app.post("/items/") # $ routeSetup="/items/"
async def create_item(item: Item): # $ requestHandler routedParameter=item
# Note: calling `item` a routed parameter is slightly untrue, since it doesn't come
# from the URL itself, but from the body of the POST request
return item # $ HttpResponse
# this also works fine
@app.post("/2items") # $ routeSetup="/2items"
async def create_item2(item1: Item, item2: Item): # $ requestHandler routedParameter=item1 routedParameter=item2
return (item1, item2) # $ HttpResponse
@app.api_route("/baz/{baz_id}", methods=["GET"]) # $ routeSetup="/baz/{baz_id}"
async def get_baz(baz_id: int): # $ requestHandler routedParameter=baz_id
return {"baz_id2": baz_id} # $ HttpResponse
# Docs:
# see https://fastapi.tiangolo.com/tutorial/path-params/
# Things we should look at supporting:
# - https://fastapi.tiangolo.com/tutorial/dependencies/
# - https://fastapi.tiangolo.com/tutorial/background-tasks/
# - https://fastapi.tiangolo.com/tutorial/middleware/
# - https://fastapi.tiangolo.com/tutorial/encoder/
|
github/codeql
|
python/ql/test/library-tests/frameworks/fastapi/basic.py
|
Python
|
mit
| 2,541
|
[
"VisIt"
] |
d3bb314341fb01cd23e3522f631bd4b244e8bad24b71fe71649eec22ec664022
|
'''This module contains models with a few variations on estimating locations
with fitted Gaussian mixture models (GMMs).'''
# Copyright (c) Los Alamos National Security, LLC, and others.
from collections import Counter, OrderedDict
import operator
import math
from pprint import pprint
import sys
import time
from django.contrib.gis import geos
import matplotlib.pyplot as plt # for testing
import numpy as np
import osgeo.gdal as ogdal
from sklearn.datasets.samples_generator import make_blobs
import sklearn.mixture
import multicore
import testable
import tweet
import u
from . import base
from . import optimize
from . import pipeline
from . import srs
l = u.l
# This is a reference to the class parameters dictionary; it is set in
# Model.parms_init(), and the defaults are immediately below. Yes, it's an
# awkward interaction between class and module. We do it this way because
# joblib can only parallelize functions, not methods.
#
# FIXME: This approach probably depends on only one instance of one class from
# this module being instantiated per process.
model_parms = None
# Default model parameters. It's a function rather than a variable because we
# need to refer to functions which are not yet defined.
def MODEL_PARMS_DEFAULT():
return { 'best_point_f': best_point_weighted_avg,
'component_ct_max': 20,
'component_ct_min': 1,
'component_sz_min': 3,
'covariance_type': 'full',
'gmm_fit_f': gmm_fit_log_heuristic,
'min_covar': 0.001,
'opt_feature_id': 1,
'opt_feature_misc': 0,
'opt_reg': 1,
'opt_init': '',
'weight_f': wt_inv_feature,
'weight_feature': 'covar_sumprod',
'weight_min': 0.001,
'wt_inv_error_exponent': 4.0,
'wt_inv_min_tweets': 3,
'wt_inv_sample_ct': 100 }
def gmm_fit_tokenpoints(token, points):
'''Given a (token, points) pair, return a (token, GMM) fitted to the points
using the configure strategy. This interface is provided for
compatibility with iterators that yield such pairs.'''
assert (points.geom_type == 'MultiPoint')
gmm = model_parms['gmm_fit_f'](token, points)
# FIXME: This is extremely chatty, so I'm leaving it commented out. Perhaps
# we need another level of logging below DEBUG.
#l.debug('fit %d gaussians to %d points for token <%s>'
# % (gmm.n_components, len(points), token))
# While gmm contains token, we return a tuple because many callers want to
# cache a mapping from token to GMM.
return (token, gmm)
# Options for parameter gmm_fit_f:
def gmm_fit_ternary(points):
return do_gmm_fit_ternary(points, model_parms['component_ct_min'],
model_parms['component_ct_max'])
def gmm_fit_exhaustive(points):
''' Exhaustive search for n_components that minimizes BIC score.
#>>> Token.parms_init({'component_ct_min':1, 'component_ct_max':20})
#>>> g = gmm_fit_exhaustive([[1],[1.1],[1.12],[1.09], [6],[6.2],[6.1]])
#>>> g.n_components
#2'''
lowest_bic = np.infty
bic = []
n_components_range = list(range(model_parms['component_ct_min'],
min(model_parms['component_ct_max'],
len(points))))
for n_components in n_components_range:
gmm = fit_gmm(points, n_components)
bic.append(gmm.bic(np.array(points)))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
return best_gmm
def gmm_fit_fixed(points):
''' n_components = component_ct_min
#>>> Token.parms_init({'component_ct_min':2, 'component_ct_max':2})
#>>> gmm_fit_fixed([[0],[1],[2],[3],[4]]).n_components
#2
#>>> gmm_fit_fixed([[0],[1]]).n_components
#1'''
n = min(len(points)//2, model_parms['component_ct_min'])
return fit_gmm(points, n)
def gmm_fit_log_heuristic(token, mp):
'''n_components = log(k)/2 for k points
Respects component_ct_max/min.
>>> Token.parms_init({'component_ct_min':1, 'component_ct_max':20})
>>> ps = [geos.Point(xy) for xy in zip(range(32), range(32))]
>>> mp16 = geos.MultiPoint(ps[:16], srid=4326)
>>> mp32 = geos.MultiPoint(ps, srid=4326)
>>> gmm_fit_log_heuristic('foo', mp16).n_components
2
>>> gmm_fit_log_heuristic('bar', mp32).n_components # doctest: +SKIP
# see issue #100
3'''
n = (int(round(math.log(len(mp), 2)/2)))
n = min(n, model_parms['component_ct_max'])
n = max(n, model_parms['component_ct_min'])
return Geo_GMM.from_fit(mp, n, token)
def gmm_fit_sqrt_heuristic(points):
''' n_components = sqrt(k//2) for k points
Respects component_ct_max/min.
#>>> Token.parms_init({'component_ct_min':1, 'component_ct_max':20})
#>>> gmm_fit_sqrt_heuristic([[i] for i in range(0,10)]).n_components
#2
#>>> gmm_fit_sqrt_heuristic([[i] for i in range(0,50)]).n_components
#5'''
n = (int(round(math.sqrt(len(points)/2))))
n = min(n, model_parms['component_ct_max'])
n = max(n, model_parms['component_ct_min'])
return fit_gmm(points, n)
def score_to_prob(s):
return 1.0 / (1.0 + np.exp(-s))
def best_gmm(scores, gmms):
return gmms[min([i for i in range(0, len(scores))],
key=lambda s: scores[s])]
def ternary_search(data, min_i, min_score, min_gmm, max_i, max_score, max_gmm):
'''Searches for optimal number of gaussians using Ternary
Search. Assumes BIC score is monotonic.'''
left_third = (2 * min_i + max_i) // 3
right_third = (min_i + 2 * max_i) // 3
left_third_gmm = fit_gmm(data, left_third)
right_third_gmm = fit_gmm(data, right_third)
left_third_score = left_third_gmm.bic(data)
right_third_score = right_third_gmm.bic(data)
if max_i - min_i <= 3:
return best_gmm([min_score, left_third_score,
right_third_score, max_score],
[min_gmm, left_third_gmm,
right_third_gmm, max_gmm])
if left_third_score > right_third_score:
return ternary_search(data, left_third, left_third_score, left_third_gmm,
max_i, max_score, max_gmm)
else:
return ternary_search(data, min_i, min_score, min_gmm,
right_third, right_third_score, right_third_gmm)
def do_gmm_fit_ternary(data, n_min, n_max):
'''Ternary search for GMM with optimal number of components from n_min to
n_max (both inclusive). n_max is clamped at len(data) - 1.
NOTE: We considered mixture.DPGMM (which obviates need to pick N), but
this module was not working at time of writing:
http://sourceforge.net/mailarchive/message.php?msg_id=29984164'''
n_components_range = list(range(n_min, min(n_max, len(data)-1) + 1))
min_gmm = fit_gmm(data, n_components_range[0])
max_gmm = fit_gmm(data, n_components_range[-1])
npdata = np.array(data)
return ternary_search(npdata, n_components_range[0], min_gmm.bic(npdata),
min_gmm, n_components_range[-1],
max_gmm.bic(npdata), max_gmm)
# Options for parameter best_point_f
def best_point_f(model):
return model_parms['best_point_f'](model)
def best_point_means(model):
return model.means_[max([i for i in range(0, len(model.means_))],
key=lambda w: model.score([model.means_[i]])[0])]
def best_point_weighted_avg(model):
'''Return the mean of the means in the model, weighted by component
weights. For example:
>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint([geos.Point(xy)
... for xy
... in [[1,1],[1.1,1],[.9,1.1], [5,5],[4.5,5]]])
>>> g = Geo_GMM.from_fit(mp, 2, 'foo')
>>> best_point_weighted_avg(g)
array([ 2.5 , 2.62])'''
return np.average(model.means_, axis=0, weights=model.weights_)
def cae(token, points, token_gmms):
m = token_gmms[token]
m.prepare(0.95)
av = np.average(points, axis=0)
avg_p = geos.Point(*av, srid=points[0].srid)
w = 1/(1 + m.cae(avg_p))
return w
def relevant_gmms(tokens, token_gmms):
return [ token_gmms[t] for t in tokens if t in token_gmms ]
# Options for parameter weight_f. This is a function which takes a token:Model
# mapping, an iterator of tweets, and token:MultiPoint mapping and returns a
# token:weight dictionary.
def cae_opt(tms, tweets, tokenpoints):
'''Optimize token_weights to minimize CAE over all training tweets'''
l.debug('preparing token models')
t_start = time.time()
for g in list(tms.values()):
g.populate_samples(100)
l.debug('done preparing in %s' % (u.fmt_seconds(time.time() - t_start)))
gmms_list = []
errors_list = []
l.debug('computing CAE for all tweets')
t_start = time.time()
for tw in tweets:
r_gmms = relevant_gmms(tw.tokens, tms)
if (len(r_gmms) == 0):
continue
errors = [max(0.00001, g.cae(tw.geom)) for g in r_gmms]
gmms_list.append(r_gmms)
errors_list.append(errors)
l.debug('done computing CAE in %s' % (u.fmt_seconds(time.time() - t_start)))
return optimize.Weight(gmms_list, errors_list,
regularizer=model_parms['opt_reg'],
identity_feature=model_parms['opt_feature_id'],
misc_feature=model_parms['opt_feature_misc'],
init_by_feature=model_parms['opt_init']
).optimize()
# FIXME: DRY (cae_opt)
def sae_opt(tms, tweets, tokenpoints):
'''Optimize token_weights to minimize SAE over all training tweets'''
l.debug('preparing token models')
t_start = time.time()
# FIXME: multicore?
for g in list(tms.values()):
g.populate_best_point()
l.debug('done preparing in %s' % (u.fmt_seconds(time.time() - t_start)))
gmms_list = []
errors_list = []
l.debug('computing MSAE for all tweets')
t_start = time.time()
for tw in tweets:
r_gmms = relevant_gmms(tw.tokens, tms)
if (len(r_gmms) == 0):
continue
errors = [g.sae(tw.geom) for g in r_gmms]
gmms_list.append(r_gmms)
errors_list.append(errors)
l.debug('done computing SAE in %s' % (u.fmt_seconds(time.time() - t_start)))
return optimize.Weight(gmms_list, errors_list,
regularizer=model_parms['opt_reg'],
identity_feature=model_parms['opt_feature_id'],
misc_feature=model_parms['opt_feature_misc'],
init_by_feature=model_parms['opt_init']).optimize()
def wt_inv_error_sae(tms, tweets, tokenpoints):
return wt_inv_error(tms, tweets, tokenpoints, 'sae')
def wt_inv_error_cae(tms, tweets, tokenpoints):
return wt_inv_error(tms, tweets, tokenpoints, 'cae')
def wt_inv_error(tms, tweets, tokenpts, errattr):
'''Weight of token T is |1/E^x|, where E is the mean error between T and
each tweet in tweets having that token, using measure errattr ('sae' or
'cae'), and x is model parm wt_inv_error_exponent. The number of samples
used in computing CAE is model parm wt_inv_sample_ct. If the number of
tweets with the token is less than model parm wt_inv_min_tweets, the
weight is 0.'''
l.debug('computing inverse errors')
t1 = time.time()
# We work in chunks to keep memory use down. The chunk size is currently
# not configurable, though we could make it so if needed.
models = list(tms.values())
weights = dict()
x = model_parms['wt_inv_error_exponent']
for chunk in u.groupn(models, 20000):
weights.update((tok, min(1, abs(1/(1+err**x))))
for (tok, err)
in multicore.do(model_error, (errattr, tokenpts), chunk))
l.debug('inverse error chunk completed')
dur = time.time() - t1
l.debug('computed inverse errors in %s (%.2gs per token)'
% (u.fmt_seconds(dur), dur / len(models)))
return weights
def model_error(errattr, tokenpts, g):
'''Return the error (using measure errattr) of model g on token points
looked up in tokenpts. If there are fewer than model parm
wt_inv_min_tweets for the token, return positive infinity.'''
# FIXME: awkard to return (token, error) tuple? just return error and let
# caller zip() it up?
assert (len(g.explanation) == 1)
token = next(iter(g.explanation.keys()))
points = tokenpts[token]
assert (points.geom_type == 'MultiPoint')
if (len(points) < model_parms['wt_inv_min_tweets']):
return np.inf
assert (not g.prepared)
# This if/else is kind of awkward. populate_samples() is pretty
# heavyweight, so we certainly shouldn't do that unless we have to. But
# still, I'm uncomfortable here...
if (errattr == 'sae'):
g.populate_best_point()
elif (errattr == 'cae'):
g.populate_samples(model_parms['wt_inv_sample_ct'])
else:
assert False, 'unreachable'
err = np.mean([getattr(g, errattr)(pt) for pt in points])
g.unprepare()
return (token, err)
def scale(token_weights):
'''Scale weights to be positive, if needed, by exp(x - max(token_weights))
>>> pprint(scale({'a':-1,'b':1}))
{'a': 0.135..., 'b': 1.0}
>>> pprint(scale({'a':10,'b':5}))
{'a': 10, 'b': 5}'''
if any(v < 0 for v in token_weights.values()):
max_v = max(token_weights.values())
return {t:math.exp(v - max_v)
for (t,v) in token_weights.items()}
else:
return token_weights
def inverse(token_weights):
'''Make small values big and big values small. All will be positive in the
end, in range (1,+Infty).
>>> pprint(inverse({'a':-1,'b':1}))
{'a': 3.0, 'b': 1.0}
>>> pprint(inverse({'a':-100,'b':100}))
{'a': 201.0, 'b': 1.0}'''
if any(v < 0 for v in token_weights.values()):
max_v = max(token_weights.values())
return {t:max_v + 1. - v
for (t,v) in token_weights.items()}
else:
return token_weights
def wt_neg_feature(tms, tweets, tokenpoints):
'''Weight by max(weights) + 1 - w, where w is a weight_feature. This means
that small weights become large, large weights become small, and all
weights are positive in range (1,+Infty).'''
m = { t: m.features()[model_parms['weight_feature']]
for (t, m) in tms.items() }
m = inverse(m)
return m
def wt_inv_feature(tms, tweets, tokenpoints):
'''Weight by the inverse of feature name specified by parameter
weight_feature). If negative numbers exist, shift all values to be
positive.'''
m = { t: 1 / (1 + m.features()[model_parms['weight_feature']])
for (t, m) in tms.items() }
return scale(m)
class Geo_GMM(base.Location_Estimate, sklearn.mixture.GMM):
'''This is a GMM with a geographic interpretation, which also serves as a
location estimate (hence the multiple inheritance). Adds the following
attributes:
samples .......... List of (Point, log probability, component index)
tuples sampled from the model, ordered by
descending log probability. WARNING: These samples
are *not* guaranteed to be in-bounds (i.e., valid
locations on the globe).
samples_inbound .. geos.MultiPoint of above which are in-bounds.'''
# FIXME: Lame to use tuples for the samples list. Better to use objects?
def __init__(self, *args, **kwargs):
self.samples = None
self.samples_inbound = None
u.call_kw(base.Location_Estimate.__init__, self, **kwargs)
u.call_kw(sklearn.mixture.GMM.__init__, self, **kwargs)
@property
def explanation(self):
return self.tokens
@classmethod
def combine(class_, gmms, weights, coverage):
'''Combine Geo_GMMs using gmm_combine_f. gmms is an iterable of Geo_GMMs
(each with exactly one token of weight 1), while weights is a (token,
weight) mapping that must be a superset of the tokens in gmms.
GMMs with weights close to zero are omitted; at least one must
remain. All component SRIDs must be the same, as must all covariance
types. The result is a prepared Geo_GMM will all the
Location_Estimate juicyness.
For example:
>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,2), geos.Point(3,4), srid=4326)
>>> m1 = Geo_GMM.from_fit(mp, 1, 'foo')
>>> m2 = Geo_GMM.from_fit(mp, 2, 'bar')
>>> m3 = Geo_GMM.from_fit(mp, 1, 'baz')
>>> combined = Geo_GMM.combine([m1, m2, m3],
... { 'foo':2, 'bar':3, 'baz':1e-6 }, 0.95)
>>> combined.weights_
array([ 0.4, 0.3, 0.3])
>>> pprint(combined.explanation)
{'bar': 0.6, 'foo': 0.4}
>>> combined.n_points
4
>>> [combined.sample(5) for i in range(100)] and None
>>> combined.srid
4326
>>> combined.pred_region.geom_type
'MultiPolygon'
>>> combined.pred_coverage
0.95
>>> print(Geo_GMM.combine([m1, m2, m3],
... { 'foo':0, 'bar':0, 'baz':0 }, 0.95))
None
'''
# sanity checks
assert (len(gmms) >= 1)
srid = gmms[0].srid
covariance_type = gmms[0].covariance_type
assert (srid is not None)
def weight(g):
return weights[next(iter(g.tokens.keys()))]
for g in gmms:
assert (g.srid == srid)
assert (g.covariance_type == covariance_type)
assert (len(g.tokens) > 0)
assert (weight(g) >= 0)
# following aren't fundamental, just not yet supported
assert (len(g.tokens) == 1)
assert (next(iter(g.tokens.values())) == 1.0)
# remove GMMs that don't have enough weight
max_weight = max([weight(g) for g in gmms])
min_weight = max_weight * model_parms['weight_min']
gmms = [g for g in gmms if weight(g) > min_weight]
# all weights are 0. cannot locate.
if (max_weight == 0):
return None
assert (len(gmms) >= 1)
# renormalize weights
relevant_weights = { t: weights[t]
for t in sum([list(g.tokens.keys()) for g in gmms], []) }
total_weight = sum(relevant_weights.values())
weights = { t: w / total_weight
for (t, w) in relevant_weights.items() }
# build a skeleton GMM
n_components = sum([g.n_components for g in gmms])
new = class_(n_components=n_components, covariance_type=covariance_type)
# populate the new GMM
new.srid = srid
new.means_ = np.concatenate([g.means_ for g in gmms])
new.covars_ = np.concatenate([g.covars_ for g in gmms])
new.weights_ = np.concatenate([g.weights_ * weight(g) for g in gmms])
new.converged_ = True
new.tokens = weights
new.n_points = sum([g.n_points for g in gmms])
# prepare
new.prepare(coverage)
return new
@classmethod
def filter_small_components(class_, m, data):
'''Remove components with fewer than component_sz_min points. If none
remain, re-fit with one component.
>>> Token.parms_init({'component_sz_min':2})
>>> x,y = make_blobs(n_samples=100, centers=[[10,10], [20,20]],
... n_features=2, random_state=100)
>>> x = np.vstack((x, [100,100])) # outlier
>>> mp = geos.MultiPoint([geos.Point(tuple(xy)) for xy in x])
>>> m = Geo_GMM.from_fit(mp, 3, 'foo')
>>> m.n_components
2
>>> mp = geos.MultiPoint([geos.Point((10,10)), geos.Point((20,20))])
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> m.n_components
1'''
cts = Counter(m.predict(data))
tokeep = [idx for (idx,ct) in list(cts.items())
if ct >= model_parms['component_sz_min']]
if len(tokeep) == 0:
m.n_components = 1
m.fit(data)
else:
m.means_ = m.means_[tokeep]
m.covars_ = m.covars_[tokeep]
m.weights_ = m.weights_[tokeep]
m.weights_ = m.weights_ / sum(m.weights_)
m.n_components = len(tokeep)
return m
@classmethod
def from_fit(class_, mp, n_components, tokens=tuple()):
'''Given a MultiPoint, return a new Geo_GMM fitted to those points. If
given, tokens is an iterable of tokens or a single token string.'''
new = class_(n_components=n_components,
covariance_type=model_parms['covariance_type'],
min_covar=model_parms['min_covar'],
random_state=u.rand_np, n_iter=1000)
data = np.array(mp, dtype=np.float) # mp.coords is slow
new.fit(data)
new = Geo_GMM.filter_small_components(new, data)
new.srid = mp.srid
if (isinstance(tokens, str)):
tokens = [tokens]
new.tokens = { t:1 for t in tokens }
new.n_points = mp.num_geom
new.aic_cache = new.aic(data)
new.bic_cache = new.bic(data)
# use average of X and Y variance as the variance
new.var_cache = np.mean((data[:,0].var(), data[:,1].var()))
return new
def cae(self, pt):
return np.mean(srs.geodesic_distance_mp(pt, self.samples_inbound_mp))
def contour(self, pt):
score = self.score_pt(pt)
idx = sum(score < i[1] for i in self.samples)
return (idx / len(self.samples))
def coverst_p_real(self, pt):
return self.score_pt(pt) > self.pred_region_threshold
def likelihood_polygon(self,pg):
'''>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,1), geos.Point(10,10), srid=4326)
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> c = Geo_GMM.combine([m], {'foo':1 }, 0.95)
>>> c.likelihood_polygon(geos.Polygon.from_bbox((0.9,0.9,1.1,1.1)))
0.503
>>> c.likelihood_polygon(geos.Polygon.from_bbox((0.95,0.95,1.05,1.05)))
0.385'''
# returns proportion of samples contained in pg
return sum(pg.contains(p[0]) for p in self.samples) / len(self.samples)
def likelihood_polygons(self, polygons, threshold=0.001):
'''Return (index, probability) tuples for the likelihood of each
polygon, trimmed by threshold.
>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,1), geos.Point(10,10), srid=4326)
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> combined = Geo_GMM.combine([m], {'foo':1 }, 0.95)
>>> big = geos.Polygon.from_bbox((0.9,0.9,1.1,1.1))
>>> small = geos.Polygon.from_bbox((0.95,0.95,1.05,1.05))
>>> combined.likelihood_polygons([big, small])
[(0, 0.503), (1, 0.387)]'''
scores = [(i, self.likelihood_polygon(p))
for (i,p) in enumerate(polygons)]
return [(i, s) for (i,s) in scores if s >= threshold]
def dump_geoimage(self, basename, width_px):
# FIXME: This method is a mess and needs to be cleaned & split into
# several other methods.
#
# The GDAL documentation for Python is pretty poor, so this is cobbled
# together from a bunch of Googling. Notable sources:
#
# https://gist.github.com/205115
# http://www.gdal.org/gdal_tutorial.html
# http://trac.osgeo.org/gdal/wiki/PythonGotcha
# http://www.gdal.org/frmt_gtiff.html
# Find the bounds and image dimensions in this estimate's SRS, aiming
# for square pixels (which of course may not be square in other SRS).
def t(xy):
return srs.transform(geos.Point(xy, srid=srs.SRID_WGS84), self.srid)
xmin = t((base.GEOIMG_LONMIN, 0)).x
xmax = t((base.GEOIMG_LONMAX, 0)).x
ymin = t((0, base.GEOIMG_LATMIN)).y
ymax = t((0, base.GEOIMG_LATMAX)).y
height_px = int(width_px * (xmax - xmin) / (ymax - ymin))
# Evaluate the model across the world. (FIXME: This could be sped up
# with some smarter choices of bounds.) (FIXME: should we have
# endpoint=False?)
xs = np.linspace(xmin, xmax, num=width_px)
ys = np.linspace(ymin, ymax, num=height_px)
xys = np.dstack(np.meshgrid(xs, ys)).reshape((width_px * height_px, 2))
# FIXME: Token GMMs have a bad self.score, introduced by optimize.py;
# see issue #32. This works around the problem in unpickled objects that
# can't be fixed by simply updating the code; it patches the live
# objects to restore the method. It should be removed when no longer
# needed.
l.warning('workaround code for private issue #32 active')
import numpy
if (isinstance(self.score, numpy.float64)):
l.debug('workaround code for private issue #32 triggered')
from types import MethodType
self.score = MethodType(self.__class__.score, self, self.__class__)
probs = score_to_prob(self.score(xys))
probs = probs.reshape((height_px, width_px))
# FIXME: There is a bug in libgdal
# (http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=695060) which
# prevents it from correctly interpreting files that have distance units
# other than meters. Thus, if we are using one of our SRS with km or Mm
# units, use the following kludge to convert the result to the
# meter-based equivalent.
srid_export = self.srid
(base_srid, r) = divmod(self.srid, 10)
if (base_srid >= 10000):
srid_export = base_srid
[xmin, xmax, ymin, ymax] = [x*10**r for x in [xmin, xmax, ymin, ymax]]
# Write the results as a GeoTIFF. First transformation is to boost the
# low values to make them more visible in the plot. b>0 is "bendiness".
l.debug("max probability before bending = %g" % (probs.max()))
b = 4.0
probs = (b * probs + probs) / (b * probs + 1)
l.debug("max probability after bending = %g" % (probs.max()))
# We scale the probability range to [0,255] so that we can use the Byte
# type and JPEG compression (which saves approximately 30x).
probs = (255/probs.max() * probs).clip(0, 255).astype(np.uint8)
driver = ogdal.GetDriverByName('GTiff')
out = driver.Create(basename + '.tif',
width_px, height_px, 1, ogdal.GDT_Byte,
['COMPRESS=JPEG', 'JPEG_QUALITY=95',
'PHOTOMETRIC=MINISWHITE'])
# arbitrary key, value metadata; doesn't appear in QGIS
#out.SetMetadataItem('foo', 'bar')
# Affine transform from image space to projected space. I don't quite
# understand what is going on here; the resulting image has upper left
# and lower left corners reversed according to gdalinfo (and same for
# right). However, it displays fine in QGIS. An alternative is to offer
# ymax and invert the pixel size (making it negative), which gives
# corners that seem right but then the image is upside down.
# http://gdal.org/classGDALDataset.html#af9593cc241e7d140f5f3c4798a43a668
out.SetGeoTransform([xmin, (xmax - xmin) / width_px, 0,
ymin, 0, (ymax - ymin) / height_px])
out.SetProjection(srs.SRS[srid_export].wkt)
out.GetRasterBand(1).WriteArray(probs)
# In order to correctly display in QGIS, you need to compute the exact
# statistics. A bug prevents QGIS from doing this
# http://hub.qgis.org/issues/6496(), and also if use this call, then
# it's embedded in the file and no auxiliary .xml file is created.
out.GetRasterBand(1).GetStatistics(0,1)
def features(self, identity=True, misc=True):
'''Return an OrderedDict of some features that might be worth judging
the quality of this object on. If identity is True, include a
uniquely named feature with value 1; if misc is True, include
everything else. For example:
>>> Token.parms_init({'component_sz_min':1})
>>> points = [geos.Point(xy) for xy in ((1,2), (3,4), (6,5), (9,7))]
>>> mp = geos.MultiPoint(points, srid=4326)
>>> g = Geo_GMM.from_fit(mp, 2, 'tx foo')
>>> for (k, v) in g.features().items(): print('%s: %s' % (k, v))
Geo_GMM/...: 1
tx: 1
one: 1
n_components: 2
n_points: 4
aic: 22.36...
bic: 15.61...
variance: 6.21...
variance_comp: 3.10...
variance_pt: 1.55...
covar_sumsum: 10.25...
covar_sumprod: 6.07...
covar_sumsum_comp: 5.12...
covar_sumsum_pt: 2.56...
covar_sumprod_comp: 3.03...
covar_sumprod_pt: 1.51...
Warning: the identity feature is only valid for the lifetime of this
object. In particular, if you pickle and rebuild this object, you
will get a different identity feature.
'''
assert (identity or misc)
od = OrderedDict()
if (identity):
od['%s/%d' % (self.__class__.__name__, id(self))] = 1
if (misc):
od.update({ t[:2]:1 for t in self.tokens.keys() }) # tweet field
od['one'] = 1
od['n_components'] = self.n_components
od['n_points'] = self.n_points
try:
od['aic'] = self.aic_cache
od['bic'] = self.bic_cache
od['variance'] = self.var_cache
od['variance_comp'] = od['variance'] / self.n_components
od['variance_pt'] = od['variance'] / self.n_points
except AttributeError:
pass
od['covar_sumsum'] = self.covars_.sum()
od['covar_sumprod'] = sum([cv.prod() for cv in self.covars_])
od['covar_sumsum_comp'] = od['covar_sumsum'] / self.n_components
od['covar_sumsum_pt'] = od['covar_sumsum'] / self.n_points
od['covar_sumprod_comp'] = od['covar_sumprod'] / self.n_components
od['covar_sumprod_pt'] = od['covar_sumprod'] / self.n_points
return od
def populate_best_point(self):
self.best_point = geos.Point(tuple(best_point_f(self)), srid=self.srid)
def populate_pred_region_real(self, trim=True):
# what's the contour value?
threshold_idx = int(round(self.pred_coverage * len(self.samples)))
self.pred_region_threshold = self.samples[threshold_idx][1]
bests = self.samples[:threshold_idx]
# compute contours
regions = []
for i in range(self.n_components):
points = [j for j in bests if j[2]==i]
if (len(points) < 3):
# can't make a polygon with less than 3 vertices, skip
continue
points = geos.MultiPoint([i[0] for i in points], srid=self.srid)
regions.append(points.convex_hull)
# create a multipolygon and clean up
assert (len(regions) > 0)
pr = geos.MultiPolygon(regions, srid=self.srid).cascaded_union
if (trim):
pr = srs.trim(pr)
if (pr.geom_type == 'Polygon'):
# cascaded_union can collapse a MultiPolygon into a single Polygon
pr = geos.MultiPolygon([pr], srid=self.srid)
assert (pr.geom_type == 'MultiPolygon')
self.pred_region = pr
def prepare_real(self):
self.populate_samples(model_parms['mc_sample_ct'])
def populate_samples(self, sample_ct):
sraw = [geos.Point(tuple(i), srid=self.srid)
for i in self.sample(sample_ct, u.rand_np)]
evals = self.score_samples([i.coords for i in sraw])
logprobs = evals[0]
component_is = [np.argmax(i) for i in evals[1]]
self.samples = list(zip(sraw, logprobs, component_is))
self.samples.sort(reverse=True, key=operator.itemgetter(1))
mp = geos.MultiPoint([i[0] for i in self.samples], srid=self.srid)
self.samples_inbound_mp = srs.trim(mp)
def score_pt(self, pt):
return self.score((pt.coords,))[0]
def unprepare(self):
self.samples = u.Deleted_To_Save_Memory()
self.samples_inbound_mp = u.Deleted_To_Save_Memory()
base.Location_Estimate.unprepare(self)
class Model(base.Model):
parms_default = u.copyupdate(base.Model.parms_default,
MODEL_PARMS_DEFAULT())
@classmethod
def parms_init(class_, parms, **kwargs):
super(Model, class_).parms_init(parms, **kwargs)
# See above for more on this kludge.
global model_parms
model_parms = class_.parms
class Message(Model):
''' Gaussian Mixture Model created for every message based on
location of tokens in that message.'''
def build(self):
'Just store map of all token/location pairs'
self.warn_if_parallel()
self.token_iterator = feature_select(self.tokens)
self.tokens = {token:points for (token,points) in self.token_iterator}
def locate(self, tokens, confidence):
tweet_points = []
for token in tokens:
if token in self.tokens:
tweet_points.extend(self.tokens[token])
if len(tweet_points) == 0:
return None
else:
model = gmm_fit_tokenpoints('all',geos.MultiPoint(tweet_points))[1]
return Location_Estimate(model, confidence, self.srid)
class Token(Model):
'''Gaussian Mixture Model created for every token. Locate method
combines models according to gmm_combine_f(). For example:
>>> Token.parms_init({})
>>> mp = geos.MultiPoint(geos.Point(30, 60), geos.Point(40,70),
... srid=u.WGS84_SRID)
>>> m = Token([('foo', mp)], u.WGS84_SRID)
>>> m.build()
>>> m.locate(['foo'], 0.95).best_point.coords
(34.999..., 64.999...)'''
def build(self):
self.token_gmms = dict(multicore.do(gmm_fit_tokenpoints,
(), list(self.tokens.items())))
self.token_weights = model_parms['weight_f'](self.token_gmms,
self.tweets, self.tokens)
def locate(self, tokens, confidence):
r_gmms = relevant_gmms(tokens, self.token_gmms)
if (len(r_gmms) == 0):
return None
else:
return Geo_GMM.combine(r_gmms, self.token_weights, confidence)
def token_summary(self, token):
od = Model.token_summary(self, token)
od.update(self.token_gmms[token].features(identity=False, misc=True))
return od
class All_Tweets(Model):
'''Single Gaussian Mixture Model created for all tokens.
>>> All_Tweets.parms_init({})
>>> mp1 = geos.MultiPoint(geos.Point(30, 60), srid=u.WGS84_SRID)
>>> mp2 = geos.MultiPoint(geos.Point(40,70), srid=u.WGS84_SRID)
>>> m = All_Tweets([('foo', mp1), ('bar', mp2)], u.WGS84_SRID)
>>> m.build()
>>> m.locate(['foo'], 0.95).best_point.coords
(34.999..., 64.999...)
>>> m.locate(['bar'], 0.95).best_point.coords
(34.999..., 64.999...)'''
def build(self):
self.warn_if_parallel()
allpoints = geos.MultiPoint([pts for sublist in self.tokens.values()
for pts in sublist],
srid=self.srid)
l.debug('fitting All_Tweets to %d points...' % len(allpoints))
self.global_model = gmm_fit_tokenpoints('_all_tweets_', allpoints)[1]
def locate(self, tokens, confidence):
# FIXME: wasteful to repeatedly prepare the same model?
self.global_model.prepare(confidence)
return self.global_model
# FIXME: figure out how to pass model_parms for each submodel here
class Message_All_Pipeline(pipeline.Model):
def __init__(self, token_iterator):
assert False, 'unimplemented'
pipeline.Model.__init__(self, [Message(token_iterator),
All_Tweets(token_iterator)])
class Token_All_Pipeline(pipeline.Model):
def __init__(self, token_iterator):
assert False, 'unimplemented'
pipeline.Model.__init__(self, [Token(token_iterator),
All_Tweets(token_iterator)])
### Tests ###
# Test-Depends: geo manual
# Test passes as of sklearn.13-git
# Fails under sklearn 0.16.1
testable.register('''
# Test that fitting respects consistent random state.
>>> def test_r():
... r = np.random.mtrand.RandomState(1234)
... m = sklearn.mixture.GMM(n_components=2, random_state=r)
... m.fit([1, 1.1, 2, 2.2])
... return m.sample(10, r)
>>> all((test_r().tolist() == test_r().tolist() for i in range(100)))
True
''')
def test_interactive():
import cProfile
#prof = cProfile.Profile()
#prof.enable()
u.logging_init('inter', verbose_=True)
test_error_metrics()
test_interactive_real()
#prof.disable()
#prof.dump_stats('profile.out')
def test_interactive_real():
sample_ct = 1000
test_ct = 1
# first, try one fit and plot it
if (True):
r = test_fitting(0.95, 1, sample_ct)
plt.axhline(y=90)
plt.scatter(*list(zip(*r['all_xys'])), s=5, color='b', marker='.')
plt.scatter(*list(zip(*r['g'].means_)), s=40, color='r', marker='s')
plt.scatter(*list(zip(*[s[0].coords for s in r['g'].samples])),
s=5, color='g', marker='.')
for polygon in r['g'].pred_region:
(xs, ys) = list(zip(*polygon[0].coords))
plt.fill(xs, ys, 'k', lw=2, fill=False, edgecolor='r')
plt.show()
return
# next, try a bunch of fits and report how well calibrated they are
all_ = dict()
for coverage in (0.50, 0.90, 0.95):
l.info('COVERAGE = %g' % (coverage))
all_[coverage] = list()
for seed in range(test_ct):
l.info('SEED = %d' % (seed))
all_[coverage].append(test_fitting(coverage, seed, sample_ct))
l.info('RESULTS')
for (coverage, results) in all_.items():
l.info('coverage = %g' % (coverage))
l.info(' mean observed coverage (covers) = %g'
% (np.mean([r['coverage_obs'] for r in results])))
l.info(' MCE (covers) = %g'
% (np.mean([r['coverage_error'] for r in results])))
l.info(' mean fudge = %g'
% (np.mean([r['coverage_fudge'] for r in results])))
l.info(' mean observed coverage (coverst) = %g'
% (np.mean([r['coveraget_obs'] for r in results])))
l.info(' MCE (coverst) = %g'
% (np.mean([r['coveraget_error'] for r in results])))
l.info(' mean fudge (coverst) = %g'
% (np.mean([r['coveraget_fudge'] for r in results])))
l.info(' mean contour = %g'
% (np.mean([r['contour'] for r in results])))
l.info(' mean MSAE = %g km'
% (np.mean([r['msae'] for r in results])))
l.info(' mean MCAE = %g km'
% (np.mean([r['mcae'] for r in results])))
l.info(' MPRA = %g km^2'
% (np.mean([r['pra'] for r in results])))
def test_fitting(coverage, seed, sample_ct):
result = {}
rs = np.random.RandomState(seed)
Model.parms_init({ 'mc_sample_ct': sample_ct }) # FIXME: kludge ugly here
# Create and fit a GMM. We fit random points centered on Alert, Nunavut (83
# degrees north) as well as Los Alamos in order to test clamping for sampled
# points that are too far north. The two places are roughly 5,447 km apart.
ct = sample_ct
alert_xys = list(zip(-62.33 + rs.normal(scale=4.0, size=ct*1.5),
82.50 + rs.normal(scale=8.0, size=ct*1.5)))
# make sure we are indeed slushing over the northern boundary of the world
assert (len([xy for xy in alert_xys if xy[1] >= 90]) > 8)
la_xys = list(zip(-106.30 + rs.normal(scale=3.0, size=ct),
35.89 + rs.normal(scale=2.0, size=ct)))
all_xys = alert_xys + la_xys
inbounds_xys = [xy for xy in all_xys if xy[1] < 90]
l.info('true points in bounds = %d/%d = %g'
% (len(inbounds_xys), len(all_xys), len(inbounds_xys)/len(all_xys)))
result['all_xys'] = all_xys
result['inbounds_xys'] = inbounds_xys
all_mp = geos.MultiPoint([geos.Point(xy) for xy in all_xys],
srid=srs.SRID_WGS84)
t1 = time.time()
g = Geo_GMM.from_fit(all_mp, 2)
result['g'] = g
l.info('fitted %d components in %gs' % (len(g.weights_), time.time() - t1))
t1 = time.time()
g.prepare(coverage)
l.info("prepare()'d %d points in %gs" % (len(g.samples), time.time() - t1))
l.info('component weights: %s' % ([g.weights_],))
l.info('component assignments: %s'
% ([len([i for i in g.samples if i[2]==0]),
len([i for i in g.samples if i[2]==1])],))
# coverage
covers_ct = sum(g.covers_p(geos.Point(xy, srid=srs.SRID_WGS84))
for xy in inbounds_xys)
result['coverage_req'] = coverage
result['coverage_obs'] = covers_ct / len(inbounds_xys)
result['coverage_error'] = result['coverage_obs'] - coverage
result['coverage_fudge'] = coverage / result['coverage_obs']
l.info('observed coverage (in-bounds) = %d/%d = %g'
% (covers_ct, len(inbounds_xys), result['coverage_obs']))
t1 = time.time()
result['contour'] = sum(g.contour(geos.Point(xy, srid=srs.SRID_WGS84))
for xy in inbounds_xys) / len(inbounds_xys)
l.info('computed contour() in %gs per point'
% ((time.time() - t1) / len(inbounds_xys)))
covers_ct = sum(g.coverst_p(geos.Point(xy, srid=srs.SRID_WGS84))
for xy in inbounds_xys)
result['coveraget_obs'] = covers_ct / len(inbounds_xys)
result['coveraget_error'] = result['coveraget_obs'] - coverage
result['coveraget_fudge'] = coverage / result['coveraget_obs']
l.info('observed coverage (in-bounds, coverst) = %d/%d = %g'
% (covers_ct, len(inbounds_xys), result['coveraget_obs']))
# absolute error for a random true point
inb_sample = inbounds_xys[:1]
t1 = time.time()
sae = [g.sae(geos.Point(p, srid=srs.SRID_WGS84)) for p in inb_sample]
l.info('computed SAE in %gs per point'
% ((time.time() - t1) / len(inb_sample)))
result['msae'] = np.mean(sae)
t1 = time.time()
cae = [g.cae(geos.Point(p, srid=srs.SRID_WGS84)) for p in inb_sample]
l.info('computed CAE in %gs per point'
% ((time.time() - t1) / len(inb_sample)))
result['mcae'] = np.mean(cae)
# area of confidence region
result['pra'] = g.pred_area
return result
def sample_gaussian(rs,offset=0.):
''' Return the (mean,covar) of a random 2d gaussian by sampling two scalars
for the mean from the standard normal distribution, shifting by offset. The
covariance matrix is fixed to [1,0],[0,1], which is positive semidefinite,
as required.'''
return (offset + rs.standard_normal(2),
[[1.,0.],[0.,1.]])
def sample_points(rs, components, ct):
''' Sample ct points from a random gaussian mixture model with the
specified number of components. An equal number of samples are drawn from
each component.'''
sz = ct // components
samples = np.array([]).reshape(0,2)
mean = 0
for i in range(0,components):
(mean,covar) = sample_gaussian(rs, mean)
samples = np.append(samples, rs.multivariate_normal(mean, covar, sz), 0)
mean += 5
return geos.MultiPoint([geos.Point(*xy) for xy in samples],
srid=srs.SRID_WGS84)
def results_dict(coverages, fns):
'initialize a 2d dict of results'
results = dict.fromkeys(fns)
for k in results.keys():
results[k] = dict.fromkeys(coverages,0)
return results
def test_error_metrics():
''' Generate n random gaussians and fit GMMs. Report metrics at
various coverage levels and inspect for sanity'''
rs = np.random.RandomState(1)
gaussian_sample_ct = 100
sample_ct = 100
coverages = [0.5, 0.90, 0.95]
max_n_components = 4
test_ct = 50 # only test on some points
fns = ['covers_p','coverst_p','contour','sae','cae']
Model.parms_init({ 'mc_sample_ct': sample_ct })
l.info('testing error metrics...')
for n_components in range(1,max_n_components):
results = results_dict(coverages, fns)
for i in range(0,gaussian_sample_ct):
points = sample_points(rs, n_components, sample_ct)
g = Geo_GMM.from_fit(points, n_components)
for coverage in coverages:
g.prepare(coverage)
for fn in fns:
results[fn][coverage] += np.mean(
[getattr(g, fn)(p) for p in points[0:test_ct]])
l.info('#components=%d' % n_components)
for coverage in coverages:
l.info('\tcoverage=%g' % coverage)
for f in fns:
l.info('\t\tmean %s=%g' % (f,
results[f][coverage] /
gaussian_sample_ct))
|
casmlab/quac
|
lib/geo/gmm.py
|
Python
|
apache-2.0
| 44,885
|
[
"Gaussian"
] |
9888cfbc046818eeeeb45b3cef8ca6377be13e25139b655477097341f16fa66d
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 14:39 2015
@author: Drew Leonard
"""
import numpy as np
from scipy.io.idl import readsav as read
from os.path import expanduser
def gaussian(x, mean=0.0, std=1.0, amp=1.0):
"""Simple function to return a Gaussian distribution"""
if isinstance(x, list):
x = np.array(x)
power = -((x - mean) ** 2.0) / (2.0 * (std ** 2.0))
f = amp * np.exp(power)
if amp == 1:
f = f / max(f)
return f
def load_temp_responses(n_wlens=6, corrections=True):
resp = np.zeros((n_wlens, 301))
tresp = read(expanduser('~/CoronaTemps/aia_tresp'))
resp[0, 80:181] = tresp['resp94']
resp[1, 80:181] = tresp['resp131']
resp[2, 80:181] = tresp['resp171']
resp[3, 80:181] = tresp['resp193']
resp[4, 80:181] = tresp['resp211']
resp[5, 80:181] = tresp['resp335']
if n_wlens > 6:
resp[6, 80:181] = tresp['resp304']
if corrections:
# Add empirical correction factor for 9.4nm response function below log(T)=6.3
# (see Aschwanden et al 2011)
resp[0:126, 0] = resp[0:126, 0]*6.7
return resp
|
drewleonard42/CoronaTemps
|
utils.py
|
Python
|
bsd-2-clause
| 1,130
|
[
"Gaussian"
] |
a396c6b1aaecd6e887df95e599ac2e802bcc0ae16934059794d7cfeccf0d82e3
|
""" Module for DM Halo calculations
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
from pkg_resources import resource_filename
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from scipy.special import hyp2f1
from scipy.interpolate import interp1d
from astropy.coordinates import SkyCoord
from astropy import units
from astropy import constants
from astropy.table import Table
from frb.defs import frb_cosmo as cosmo
from IPython import embed
# Speed up calculations
m_p = constants.m_p.cgs.value # g
def init_hmf():
"""
Initialize the Aemulus Halo Mass Function
WARNING: This uses the original version which codes Tinker+2008
We may refactor to use the more accurate, new version
Returns:
"""
# Hidden here to avoid it becoming a dependency
import hmf_emulator
# Setup HMF
# https://github.com/astropy/astropy/blob/master/astropy/cosmology/parameters.py
#sigma8 = 0.8159
ns = 0.9667
Neff = 3.046
#cosmo_dict = {"om":cosmo.Om0,"ob":cosmo.Ob0,"ol":1.-cosmo.Om0,"ok":0.0,
# "h":cosmo.h,"s8":sigma8,"ns":ns,"w0":-1.0,"Neff":Neff} # "wa":0.0 is assumed internally
cosmo_dict = {"omega_cdm":(cosmo.Om0-cosmo.Ob0)*cosmo.h**2,
"omega_b":cosmo.Ob0*cosmo.h**2,"ok":0.0,
"ln10As": 3.098, # THIS REPLACES sigma8
"H0":cosmo.H0.to('km/(s*Mpc)').value,
"n_s":ns,"w0":-1.0,"N_eff":Neff} # "wa":0.0 is assumed internally
hmfe = hmf_emulator.hmf_emulator()
hmfe.set_cosmology(cosmo_dict)
# Return
return hmfe
# Storing for use
try:
import hmf_emulator
except:
pass
else:
hmfe = init_hmf()
def frac_in_halos(zvals, Mlow, Mhigh, rmax=1.):
"""
Calculate the fraction of matter in collapsed halos
over a mass range and at a given redshift
Note that the fraction of DM associated with these halos
will be scaled down by an additional factor of f_diffuse
Requires Aemulus HMF to be installed
Args:
zvals: ndarray
Mlow: float
In h^-1 units already so this will be applied for the halo mass function
Mhigh: float
In h^-1 units already
rmax: float
Extent of the halo in units of rvir
WARNING: This calculation assumes a single concentration for all halos
Returns:
ratios: ndarray
rho_halo / rho_m
"""
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
ratios = []
for z in zvals:
# Setup
#dndlM = np.array([hmfe.dndlnM(Mi, a)[0] for Mi in M])
dndlM = M*hmfe.dndM(M, z)
M_spl = IUS(lM, M * dndlM)
# Integrate
rho_tot = M_spl.integral(np.log(Mlow*cosmo.h), np.log(Mhigh*cosmo.h)) * units.M_sun / units.Mpc ** 3
# Cosmology
rho_M = cosmo.critical_density(z) * cosmo.Om(z)/(1+z)**3 # Tinker calculations are all mass
ratio = (rho_tot*cosmo.h**2 / rho_M).decompose()
#
ratios.append(ratio)
ratios = np.array(ratios)
# Boost halos if extend beyond rvir (homologous in mass, but constant concentration is an approx)
if rmax != 1.:
#from pyigm.cgm.models import ModifiedNFW
c = 7.7
nfw = ModifiedNFW(c=c)
M_ratio = nfw.fy_dm(rmax * nfw.c) / nfw.fy_dm(nfw.c)
ratios *= M_ratio
# Return
return np.array(ratios)
def halo_incidence(Mlow, zFRB, radius=None, hmfe=None, Mhigh=1e16, nsample=20,
cumul=False):
"""
Calculate the (approximate) average number of intersections to halos of a
given minimum mass to a given zFRB.
Requires Aemulus HMF to be installed
Args:
Mlow: float
Mass of minimum halo in Solar masses
The code deals with h^-1 factors so that you do not
The minimum value is 2e10
zFRB: float
Redshift of the FRB
radius: Quantity, optional
The calculation will specify this radius as rvir derived from
Mlow unless this is specified. And this rvir *will* vary with redshift
hmfe (hmf.hmf_emulator, optional): Halo mass function emulator from Aeumulus
Mhigh: float, optional
Mass of maximum halo in Solar masses
nsammple: int, optional
Number of samplings in redshift
20 should be enough
cumul: bool, optional
Return the cumulative quantities instead
Returns:
If cumul is False
Navg: float
Number of average intersections
elif cumul is True
zeval: ndarray
Ncumul: ndarray
"""
# Mlow limit
if Mlow < 2e10:
warnings.warn("Calculations are limited to Mlow > 2e10")
return
# HMF
if hmfe is None:
hmfe = init_hmf()
#
zs = np.linspace(0., zFRB, nsample)
# Mean density
ns = []
for iz in zs:
ns.append(hmfe.n_in_bins((Mlow * cosmo.h, Mhigh * cosmo.h), iz) * cosmo.h**3) # * units.Mpc**-3
# Interpolate
ns = units.Quantity(ns*units.Mpc**-3)
# Radii
if radius is None:
rhoc = cosmo.critical_density(zs)
#https://arxiv.org/pdf/1312.4629.pdf eq5
q = cosmo.Ode0/(cosmo.Ode0+cosmo.Om0*(1+zs)**3)
rhovir = (18*np.pi**2-82*q-39*q**2)*rhoc
r200 = (((3*Mlow*constants.M_sun.cgs) / (4*np.pi*rhovir))**(1/3)).to('kpc')
else:
r200 = np.ones_like(zs) * radius
# Ap
Ap = np.pi * r200**2
# l(X)
loX = ((constants.c/cosmo.H0) * ns * Ap).decompose().value
# dX
X = cosmo.absorption_distance(zs)
dX = X - np.roll(X,1)
dX[0] = 0.
# Finish
if cumul:
Navg = np.cumsum(loX * dX)
return zs, Navg
else:
Navg = np.sum(loX * dX)
return Navg
def build_grid(z_FRB=1., ntrial=10, seed=12345, Mlow=1e10, r_max=2., outfile=None, dz_box=0.1,
dz_grid=0.01, f_hot=0.75, verbose=True):
"""
Generate a universe of dark matter halos with DM measurements
Mainly an internal function for generating useful output grids.
Requires the Aemulus Halo Mass function
Args:
z_FRB: float, optional
ntrial: int, optional
seed: int, optional
Mlow: float, optional
h^-1 mass
r_max: float, optional
Extent of the halo in units of rvir
outfile: str, optional
Write
dz_box: float, optional
Size of the slice of the universe for each sub-calculation
dz_grid: float, optional
redshift spacing in the DM grid
f_hot: float
Fraction of the cosmic fraction of matter in diffuse gas (for DM)
Returns:
DM_grid: ndarray (ntrial, nz)
halo_tbl: Table
Table of all the halos intersected
"""
Mhigh = 1e16 # Msun
# mNFW
y0 = 2.
alpha = 2.
warnings.warn("Ought to do concentration properly someday!")
cgm = ModifiedNFW(alpha=alpha, y0=y0, f_hot=f_hot)
icm = ICM()
# Random numbers
rstate = np.random.RandomState(seed)
# Init HMF
hmfe = init_hmf()
# Boxes
nbox = int(z_FRB / dz_box)
nz = int(z_FRB / dz_grid)
dX = int(np.sqrt(ntrial))+1
#
npad = 6 # Mpc
base_l = 2*dX + npad
print('L_base = {} cMpc'.format(base_l))
warnings.warn("Worry about being big enough given cMpc vs pMpc")
DM_grid = np.zeros((ntrial,nz))
# Spline distance to z
D_max = cosmo.comoving_distance(z_FRB)
D_val = np.linspace(1e-3,D_max.value,200) # IS THIS FINE ENOUGH?
z_val = np.array([z_at_value(cosmo.comoving_distance, iz) for iz in D_val*units.Mpc])
D_to_z = IUS(D_val, z_val)
# Save halo info
#halos = [[] for i in range(ntrial)]
halo_i, M_i, R_i, DM_i, z_i = [], [], [], [], []
# Loop me
prev_zbox = 0.
#for ss in range(nbox):
#for ss in [0]:
for ss in [5]:
zbox = ss*dz_box + dz_box/2.
print('zbox = {}'.format(zbox))
a = 1./(1.0 + zbox) # Scale factor
# Mass function
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
n_spl = IUS(lM, dndlM)
cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
ncum_n = cum_n/cum_n[-1]
# As z increases, we have numerical issues at the high mass end (they are too rare)
try:
mhalo_spl = IUS(ncum_n, lM)
except ValueError:
# Kludge me
print("REDUCING Mhigh by 2x")
Mhigh /= 2.
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
n_spl = IUS(lM, dndlM)
cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
ncum_n = cum_n/cum_n[-1]
#
mhalo_spl = IUS(ncum_n, lM)
# Volume -- Box with base l = 2Mpc
D_zn = cosmo.comoving_distance(zbox + dz_box/2.) # Full box
D_zp = cosmo.comoving_distance(ss*dz_box) # Previous
D_z = D_zn - D_zp
V = D_z * (base_l*units.Mpc)**2
# Average N_halo
avg_n = hmf.n_bin(Mlow*cosmo.h, Mhigh*cosmo.h, a) * cosmo.h**3 * units.Mpc**-3
avg_N = (V * avg_n).value
# Assume Gaussian stats for number of halos
N_halo = int(np.round(avg_N + np.sqrt(avg_N)*rstate.randn(1)))
# Random masses
randM = rstate.random_sample(N_halo)
rM = np.exp(mhalo_spl(randM)) / cosmo.h
# r200
r200 = (((3*rM*units.M_sun.cgs) / (4*np.pi*200*cosmo.critical_density(zbox)))**(1/3)).to('kpc')
# Random locations (X,Y,Z)
X_c = rstate.random_sample(N_halo)*base_l # Mpc
Y_c = rstate.random_sample(N_halo)*base_l # Mpc
Z_c = (rstate.random_sample(N_halo)*D_z.to('Mpc') + D_zp).value
# Check mass fraction
if verbose:
Mtot = np.log10(np.sum(rM))
M_m = (cosmo.critical_density(zbox)*cosmo.Om(zbox) * V/(1+zbox)**3).to('M_sun')
#print("N_halo: {} avg_N: {}".format(N_halo, avg_N))
print("z: {} Mhalo/M_m = {}".format(zbox, 10**Mtot/M_m.value))
print(frac_in_halos([zbox], Mlow, Mhigh))
# Redshifts
z_ran = D_to_z(Z_c)
# Loop on trials
all_DMs = []
all_nhalo = []
all_r200 = []
for itrial in range(ntrial):
# X,Y trial
X_trial = npad//2 + (2*itrial%dX) # Step by 2Mpc
Y_trial = npad//2 + 2*itrial // dX
# Impact parameters
try:
R_com = np.sqrt((X_c-X_trial)**2 + (Y_c-Y_trial)**2) # Mpc
except:
pdb.set_trace()
R_phys = R_com * 1000. / (1+z_ran) * units.kpc
# Cut
intersect = R_phys < r_max*r200
print("We hit {} halos".format(np.sum(intersect)))
all_nhalo.append(np.sum(intersect))
if not np.any(intersect):
all_DMs.append(0.)
continue
# Loop -- FIND A WAY TO SPEED THIS UP!
DMs = []
for iobj in np.where(intersect)[0]:
# Init
if rM[iobj] > 1e14: # Use ICM model
model = icm
else:
model = cgm
model.log_Mhalo=np.log10(rM[iobj])
model.M_halo = 10.**model.log_Mhalo * constants.M_sun.cgs
model.z = zbox # To be consistent with above; should be close enough
model.setup_param(cosmo=cosmo)
# DM
DM = model.Ne_Rperp(R_phys[iobj], rmax=r_max, add_units=False)/(1+model.z)
DMs.append(DM)
# Save halo info
halo_i.append(itrial)
M_i.append(model.M_halo.value)
R_i.append(R_phys[iobj].value)
DM_i.append(DM)
z_i.append(z_ran[iobj])
all_r200.append(cgm.r200.value)
# Save em
iz = (z_ran[intersect]/dz_grid).astype(int)
DM_grid[itrial,iz] += DMs
all_DMs.append(np.sum(DMs))
#print(DMs, np.log10(rM[intersect]), R_phys[intersect])
if (itrial % 100) == 0:
pdb.set_trace()
# Table the halos
halo_tbl = Table()
halo_tbl['trial'] = halo_i
halo_tbl['M'] = M_i
halo_tbl['R'] = R_i
halo_tbl['DM'] = DM_i
halo_tbl['z'] = z_i
# Write
if outfile is not None:
print("Writing to {}".format(outfile))
np.save(outfile, DM_grid, allow_pickle=False)
halo_tbl.write(outfile+'.fits', overwrite=True)
return DM_grid, halo_tbl
def rad3d2(xyz):
""" Calculate radius to x,y,z inputted
Assumes the origin is 0,0,0
Parameters
----------
xyz : Tuple or ndarray
Returns
-------
rad3d : float or ndarray
"""
return xyz[0]**2 + xyz[1]**2 + xyz[-1]**2
def stellarmass_from_halomass(log_Mhalo,z=0, params=None):
""" Stellar mass from Halo Mass from Moster+2013
https://doi.org/10.1093/mnras/sts261
Args:
log_Mhalo (float): log_10 halo mass
in solar mass units.
z (float, optional): halo redshift.
Assumed to be 0 by default.
Returns:
log_mstar (float): log_10 galaxy stellar mass
in solar mass units.
"""
# Define model parameters from Table 1
# of the paper if not supplied
if params is None:
N10 = 0.0351
N11 = -0.0247
beta10 = 1.376
beta11 = -0.826
gamma10 = 0.608
gamma11 = 0.329
M10 = 11.59
M11 = 1.195
else:
N10,N11,beta10,beta11,gamma10,gamma11,M10,M11 = params
# Get redshift dependent parameters
# from equations 11-14.
z_factor = z/(1+z)
N = N10 + N11*z_factor
beta = beta10 + beta11*z_factor
gamma = gamma10 + gamma11*z_factor
logM1 = M10 + M11*z_factor
M1 = 10**logM1
M_halo = 10**log_Mhalo
# Simple
log_mstar = log_Mhalo + np.log10(2*N) - np.log10((M_halo/M1)**-beta+(M_halo/M1)**gamma)
# Done
return log_mstar
def halomass_from_stellarmass(log_mstar,z=0, randomize=False):
""" Halo mass from Stellar mass (Moster+2013).
Inverts the function `stellarmass_from_halomass`
numerically.
Args:
log_mstar (float or numpy.ndarray): log_10 stellar mass
in solar mass units.
z (float, optional): galaxy redshift
Returns:
log_Mhalo (float): log_10 halo mass
in solar mass units.
"""
try:
log_mstar*z
except ValueError:
raise TypeError("log_mstar and z can't be broadcast together for root finding. Use numpy arrays of same length or scalar values.")
if not randomize:
f = lambda x: stellarmass_from_halomass(x, z = z)-log_mstar
else:
np.random.seed()
N10 = np.random.normal(0.0351, 0.0058)
N11 = np.random.normal(-0.0247, 0.0069)
beta10 = np.random.normal(1.376, 0.153)
beta11 = np.random.normal(-0.826, 0.225)
gamma10 = np.random.normal(0.608, 0.059)
gamma11 = np.random.normal(0.329, 0.173)
M10 = np.random.normal(11.59, 0.236)
M11 = np.random.normal(1.195, 0.353)
params = [N10,N11,beta10,beta11,gamma10,gamma11,M10,M11]
f = lambda x: stellarmass_from_halomass(x, z = z, params = params)-log_mstar
guess = 2+log_mstar
if isiterable(log_mstar):
return fsolve(f, guess)
else:
return fsolve(f, guess)[0]
class ModifiedNFW(object):
""" Generate a modified NFW model, e.g. Mathews & Prochaska 2017
for the hot, virialized gas.
Parameters:
log_Mhalo: float, optional
log10 of the Halo mass (solar masses)
c: float, optional
concentration of the halo
f_hot: float, optional
Fraction of the baryons in this hot phase
Will likely use this for all diffuse gas
alpha: float, optional
Parameter to modify NFW profile power-law
y0: float, optional
Parameter to modify NFW profile position.
z: float, optional
Redshift of the halo
cosmo: astropy cosmology, optional
Cosmology of the universe.
Attributes:
H0: Quantity; Hubble constant
fb: float; Cosmic fraction of baryons (stars+dust+gas) in the entire halo
Default to 0.16
r200: Quantity
Virial radius
rho0: Quantity
Density normalization
M_b: Quantity
Mass in baryons of the
"""
def __init__(self, log_Mhalo=12.2, c=7.67, f_hot=0.75, alpha=0.,
y0=1., z=0., cosmo=cosmo, **kwargs):
# Init
# Param
self.log_Mhalo = log_Mhalo
self.M_halo = 10.**self.log_Mhalo * constants.M_sun.cgs
self.c = c
self.alpha = alpha
self.y0 = y0
self.z = z
self.f_hot = f_hot
self.zero_inner_ne = 0. # kpc
self.cosmo = cosmo
# Init more
self.setup_param(cosmo=self.cosmo)
def setup_param(self,cosmo):
""" Setup key parameters of the model
"""
# Cosmology
if cosmo is None:
self.rhoc = 9.2e-30 * units.g / units.cm**3
self.fb = 0.16 # Baryon fraction
self.H0 = 70. *units.km/units.s/ units.Mpc
else:
self.rhoc = self.cosmo.critical_density(self.z)
self.fb = cosmo.Ob0/cosmo.Om0
self.H0 = cosmo.H0
# Dark Matter
self.q = self.cosmo.Ode0/(self.cosmo.Ode0+self.cosmo.Om0*(1+self.z)**3)
#r200 = (((3*Mlow*constants.M_sun.cgs) / (4*np.pi*200*rhoc))**(1/3)).to('kpc')
self.rhovir = (18*np.pi**2-82*self.q-39*self.q**2)*self.rhoc
self.r200 = (((3*self.M_halo) / (4*np.pi*self.rhovir))**(1/3)).to('kpc')
self.rho0 = self.rhovir/3 * self.c**3 / self.fy_dm(self.c) # Central density
# Baryons
self.M_b = self.M_halo * self.fb
self.rho0_b = (self.M_b / (4*np.pi) * (self.c/self.r200)**3 / self.fy_b(self.c)).cgs
# Misc
self.mu = 1.33 # Reduced mass correction for Helium
def fy_dm(self, y):
""" Enclosed mass function for the Dark Matter NFW
Assumes the NFW profile
Parameters
----------
y : float or ndarray
y = c(r/r200)
Returns
-------
f_y : float or ndarray
"""
f_y = np.log(1+y) - y/(1+y)
#
return f_y
def fy_b(self, y):
""" Enclosed mass function for the baryons
Parameters
y: float or ndarray
Returns
-------
f_y: float or ndarray
Enclosed mass
"""
f_y = (y/(self.y0 + y))**(1+self.alpha) * (
self.y0**(-self.alpha) * (self.y0 + y)**(1+self.alpha) * hyp2f1(
1+self.alpha, 1+self.alpha, 2+self.alpha, -1*y/self.y0)
- self.y0) / (1+self.alpha) / self.y0
return f_y
def ne(self, xyz):
""" Calculate n_e from n_H with a correction for Helium
Assume 25% mass is Helium and both electrons have been stripped
Parameters
----------
xyz : ndarray (3, npoints)
Coordinate(s) in kpc
Returns
-------
n_e : float or ndarray
electron density in cm**-3
"""
ne = self.nH(xyz) * 1.1667
if self.zero_inner_ne > 0.:
rad = np.sum(xyz**2, axis=0)
inner = rad < self.zero_inner_ne**2
if np.any(inner):
if len(xyz.shape) == 1:
ne = 0.
else:
ne[inner] = 0.
# Return
return ne
def nH(self, xyz):
""" Calculate the Hydrogen number density
Includes a correction for Helium
Parameters
----------
xyz : ndarray
Coordinate(s) in kpc
Returns
-------
nH : float or ndarray
Density in cm**-3
"""
nH = (self.rho_b(xyz) / self.mu / m_p).cgs.value
# Return
return nH
def rho_b(self, xyz):
""" Mass density in baryons in the halo; modified
Parameters
----------
xyz : ndarray
Position (assumes kpc)
Returns
-------
rho : Quantity
Density in g / cm**-3
"""
radius = np.sqrt(rad3d2(xyz))
y = self.c * (radius/self.r200.to('kpc').value)
rho = self.rho0_b * self.f_hot / y**(1-self.alpha) / (self.y0+y)**(2+self.alpha)
# Return
return rho
def Ne_Rperp(self, Rperp, step_size=0.1*units.kpc, rmax=1., add_units=True, cumul=False):
""" Calculate N_e at an input impact parameter Rperp
Just a simple sum in steps of step_size
Parameters
----------
Rperp : Quantity
Impact parameter, typically in kpc
step_size : Quantity, optional
Step size used for numerical integration (sum)
rmax : float, optional
Maximum radius for integration in units of r200
add_units : bool, optional
Speed up calculations by avoiding units
cumul: bool, optional
Returns
-------
if cumul:
zval: ndarray (kpc)
z-values where z=0 is the midplane
Ne_cumul: ndarray
Cumulative Ne values (pc cm**-3)
else:
Ne: Quantity
Column density of total electrons
"""
dz = step_size.to('kpc').value
# Cut at rmax*rvir
if Rperp > rmax*self.r200:
if add_units:
return 0. / units.cm**2
else:
return 0.
# Generate a sightline to rvir
zmax = np.sqrt((rmax*self.r200) ** 2 - Rperp ** 2).to('kpc')
zval = np.arange(-zmax.value, zmax.value+dz, dz) # kpc
# Set xyz
xyz = np.zeros((3,zval.size))
xyz[0, :] = Rperp.to('kpc').value
xyz[2, :] = zval
# Integrate
ne = self.ne(xyz) # cm**-3
if cumul:
Ne_cumul = np.cumsum(ne) * dz * 1000 # pc cm**-3
return zval, Ne_cumul
Ne = np.sum(ne) * dz * 1000 # pc cm**-3
# Return
if add_units:
return Ne * units.pc / units.cm**3
else:
return Ne
def RM_Rperp(self, Rperp, Bparallel, step_size=0.1*units.kpc, rmax=1.,
add_units=True, cumul=False, zmax=None):
""" Calculate RM at an input impact parameter Rperp
Just a simple sum in steps of step_size
Assumes a constant Magnetic field
Parameters
----------
Rperp : Quantity
Impact parameter, typically in kpc
Bparallel (Quantity):
Magnetic field
step_size : Quantity, optional
Step size used for numerical integration (sum)
rmax : float, optional
Maximum radius for integration in units of r200
add_units : bool, optional
Speed up calculations by avoiding units
cumul: bool, optional
zmax: float, optional
Maximum distance along the sightline to integrate.
Default is rmax*rvir
Returns
-------
if cumul:
zval: ndarray (kpc)
z-values where z=0 is the midplane
Ne_cumul: ndarray
Cumulative Ne values (pc cm**-3)
else:
RM: Quantity
Column density of total electrons
"""
dz = step_size.to('kpc').value
# Cut at rmax*rvir
if Rperp > rmax*self.r200:
if add_units:
return 0. / units.cm**2
else:
return 0.
# Generate a sightline to rvir
if zmax is None:
zmax = np.sqrt((rmax*self.r200) ** 2 - Rperp ** 2).to('kpc')
zval = np.arange(-zmax.value, zmax.value+dz, dz) # kpc
# Set xyz
xyz = np.zeros((3,zval.size))
xyz[0, :] = Rperp.to('kpc').value
xyz[2, :] = zval
# Integrate
ne = self.ne(xyz) # cm**-3
# Using Akahori & Ryu 2011
RM = 8.12e5 * Bparallel.to('microGauss').value * \
np.sum(ne) * dz / 1000 # rad m**-2
if cumul:
RM_cumul = 8.12e5 * Bparallel.to('microGauss') * np.cumsum(
ne) * dz / 1000 # rad m**-2
return zval, RM_cumul
# Return
if add_units:
return RM * units.rad / units.m**2
else:
return RM
def mass_r(self, r, step_size=0.1*units.kpc):
""" Calculate baryonic halo mass (not total) to a given radius
Just a simple sum in steps of step_size
Parameters
----------
r : Quantity
Radius, typically in kpc
step_size : Quantity, optional
Step size used for numerical integration (sum)
Returns
-------
Mr: Quantity
Enclosed baryonic mass within r
Msun units
"""
dr = step_size.to('kpc').value
# Generate a sightline to rvir
rval = np.arange(0., r.to('kpc').value+dr, dr) # kpc
# Set xyz
xyz = np.zeros((3,rval.size))
xyz[2, :] = rval
# Integrate
nH = self.nH(xyz) # cm**-3
Mr_number = 4*np.pi*np.sum(nH*rval**2) * dr * self.mu * m_p # g kpc**3/cm**3
Mr = Mr_number * units.g * (units.kpc**3)/(units.cm**3)#
# Return
return Mr.to('M_sun')
def __repr__(self):
txt = '<{:s}: {:s} {:s}, logM={:f}, r200={:g}'.format(
self.__class__.__name__,
self.coord.icrs.ra.to_string(unit=units.hour,sep=':',pad=True),
self.coord.icrs.dec.to_string(sep=':',pad=True,alwayssign=True),
np.log10(self.M_halo.to('Msun').value),
self.r200)
# Finish
txt = txt + '>'
return (txt)
class MB04(ModifiedNFW):
"""
Halo based on the Maller & Bullock (2004) model of
virialized halo gas.
Parameters:
Rc: Quantity
cooling radius
"""
def __init__(self, Rc=167*units.kpc, log_Mhalo=12.2, c=7.67, f_hot=0.75, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot, **kwargs)
# Setup
self.Rs = self.r200/self.c
self.Rc = Rc
self.Cc = (self.Rc/self.Rs).decompose().value
self.rhoV = 1. * constants.m_p/units.cm**3 # Will be renormalized
# For development
self.debug=False
# Normalize
self.norm_rhoV()
def norm_rhoV(self):
"""
Normalize the density constant from MB04
Returns:
"""
# Set rhoV to match expected baryon mass
r = np.linspace(1., self.r200.to('kpc').value, 1000) # kpc
# Set xyz
xyz = np.zeros((3,r.size))
xyz[2, :] = r
#
dr = r[1] - r[0]
Mass_unnorm = 4 * np.pi * np.sum(r**2 * self.rho_b(xyz)) * dr * units.kpc**3 # g * kpc**3 / cm**3
# Ratio
rtio = (Mass_unnorm/self.M_b).decompose().value
self.rhoV = self.rhoV.cgs/rtio
#
print("rhoV normalized to {} to give M_b={}".format((self.rhoV/constants.m_p).cgs,
self.M_b.to('Msun')))
def rho_b(self, xyz):
"""
Baryonic density profile
Args:
xyz: ndarray
Position array assumed in kpc
Returns:
"""
radius = np.sqrt(rad3d2(xyz))
x = radius/self.Rs.to('kpc').value
#
rho = self.rhoV * (1+ (3.7/x)*np.log(1+x) - (3.7/self.Cc) * np.log(1+self.Cc))**(3/2)
if self.debug:
pdb.set_trace()
#
return rho
class YF17(ModifiedNFW):
"""
Y. Faerman et al (2017) model of the Milky Way
For the un-normalized density profile, we adopt the
average of the warm and hot components in
"""
def __init__(self, log_Mhalo=12.18, c=7.67, f_hot=0.75, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot, **kwargs)
# Read
#faerman_file = resource_filename('pyigm', '/data/CGM/Models/Faerman_2017_ApJ_835_52-density-full.txt')
faerman_file = resource_filename('frb', '/data/Halos/Faerman_2017_ApJ_835_52-density-full.txt')
self.yf17 = Table.read(faerman_file, format='ascii.cds')
self.yf17['nH'] = self.yf17['nHhot'] + self.yf17['nHwarm']
# For development
self.debug=False
# Setup
self.rhoN = constants.m_p/units.cm**3
self.setup_yfdensity()
def setup_yfdensity(self):
"""
Normalize the density profile from the input mass
Returns:
Initializes self.rhoN, the density normalization
"""
# Setup Interpolation
self.yf17_interp = interp1d(self.yf17['Radius'], self.yf17['nH'], kind='cubic', bounds_error=False, fill_value=0.)
# Set rhoN to match expected baryon mass
r = np.linspace(1., self.r200.to('kpc').value, 1000) # kpc
# Set xyz
xyz = np.zeros((3,r.size))
xyz[2, :] = r
#
dr = r[1] - r[0]
Mass_unnorm = 4 * np.pi * np.sum(r**2 * self.rho_b(xyz)) * dr * units.kpc**3 # g * kpc**3 / cm**3
# Ratio
rtio = (Mass_unnorm/self.M_b).decompose().value
self.rhoN = self.rhoN.cgs/rtio
#
print("rhoN normalized to {} to give M_b={}".format((self.rhoN/constants.m_p).cgs,
self.M_b.to('Msun')))
def rho_b(self, xyz):
"""
Calculate the baryonic density
Args:
xyz: ndarray
Coordinates in kpc
Returns:
rho: Quantity array
Baryonic mass density (g/cm**3)
"""
radius = np.sqrt(rad3d2(xyz))
#
rho = self.rhoN * self.yf17_interp(radius)
if self.debug:
pdb.set_trace()
#
return rho
class MB15(ModifiedNFW):
"""
Encodes the Galactic halo profile from
Miller & Bregman 2015, ApJ, 800, 14
https://ui.adsabs.harvard.edu/abs/2015ApJ...800...14M/abstract
The default normalization and beta values are taken from their Table 2, last row.
The models presented there do not appear to vary too much.
"""
def __init__(self, log_Mhalo=12.18, c=7.67, f_hot=0.75, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot, **kwargs)
# Best parameters
self.beta = 0.45
self.n0_rc3b = 0.79e-2 # Last entry of Table 2; Crazy units
def nH(self, xyz):
"""
Calculate the number density of Hydrogen
Args:
xyz: ndarray
Coordinates in kpc
Returns:
ndarray: Number density with units of 1/cm**3
"""
radius = np.sqrt(rad3d2(xyz))
# Equation 2 of Miller & Bregman 2015
nH = self.n0_rc3b / radius**(3*self.beta)
#
return nH # / units.cm**3
class MilkyWay(ModifiedNFW):
""" Fiducial model for the Galaxy
Halo mass follows latest constraints
Density profile is similar to Maller & Bullock 2004
"""
def __init__(self, log_Mhalo=12.18, c=7.67, f_hot=0.75, alpha=2, y0=2, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot,
alpha=alpha, y0=y0, **kwargs)
class M31(ModifiedNFW):
"""
Preferred model for M31
Taking mass from van der Marel 2012
"""
def __init__(self, log_Mhalo=12.18, c=7.67, f_hot=0.75, alpha=2, y0=2, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot,
alpha=alpha, y0=y0, **kwargs)
# Position from Sun
self.distance = 752 * units.kpc # (Riess, A.G., Fliri, J., & Valls - Gabaud, D. 2012, ApJ, 745, 156)
self.coord = SkyCoord('J004244.3+411609', unit=(units.hourangle, units.deg),
distance=self.distance)
def DM_from_Galactic(self, scoord, **kwargs):
"""
Calculate DM through M31's halo from the Sun
given a direction
Args:
scoord: SkyCoord
Coordinates of the sightline
**kwargs:
Passed to Ne_Rperp
Returns:
DM: Quantity
Dispersion measure through M31's halo
"""
# Setup the geometry
a=1
c=0
x0, y0 = self.distance.to('kpc').value, 0. # kpc
# Seperation
sep = self.coord.separation(scoord)
# More geometry
atan = np.arctan(sep.radian)
b = -1 * a / atan
# Restrct to within 90deg (everything beyond is 0 anyhow)
if sep > 90.*units.deg:
return 0 * units.pc / units.cm**3
# Rperp
Rperp = np.abs(a*x0 + b*y0 + c) / np.sqrt(a**2 + b**2) # kpc
# DM
DM = self.Ne_Rperp(Rperp*units.kpc, **kwargs).to('pc/cm**3')
return DM
class LMC(ModifiedNFW):
"""
Preferred model for LMC
Taking data from D'Onghia & Fox ARAA 2016
Mass updated according to
https://ui.adsabs.harvard.edu/abs/2019MNRAS.487.2685E/abstract
"""
def __init__(self, log_Mhalo=np.log10(1.e11), c=12.1, f_hot=0.75, alpha=2, y0=2, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot,
alpha=alpha, y0=y0, **kwargs)
# Position from Sun
self.distance = 50 * units.kpc
self.coord = SkyCoord('J052334.6-694522', unit=(units.hourangle, units.deg),
distance=self.distance)
class SMC(ModifiedNFW):
"""
Preferred model for SMC
Taking data from D'Onghia & Fox ARAA 2016
"""
def __init__(self, log_Mhalo=np.log10(2.4e9), c=15.0, f_hot=0.75, alpha=2, y0=2, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot,
alpha=alpha, y0=y0, **kwargs)
# Position from Sun
self.distance = 61 * units.kpc
self.coord = SkyCoord('J005238.0-724801', unit=(units.hourangle, units.deg),
distance=self.distance)
class M33(ModifiedNFW):
"""
Preferred model for SMC
Taking data from Corbelli 2006
"""
def __init__(self, log_Mhalo=np.log10(5e11), c=8.36, f_hot=0.75, alpha=2, y0=2, **kwargs):
# Init ModifiedNFW
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot,
alpha=alpha, y0=y0, **kwargs)
# Position from Sun
self.distance = 840 * units.kpc
self.coord = SkyCoord(ra=23.4621*units.deg, dec=30.6600*units.deg, distance=self.distance)
class ICM(ModifiedNFW):
"""
Intracluster medium (ICM) model following the analysis
of Vikhilnin et al. 2006
We scale the model to the profile fitted to A907
"""
def __init__(self, log_Mhalo=np.log10(5e14), c=5, f_hot=0.70, **kwargs):
ModifiedNFW.__init__(self, log_Mhalo=log_Mhalo, c=c, f_hot=f_hot, **kwargs)
def setup_param(self, cosmo=None):
super(ICM, self).setup_param(cosmo=cosmo)
# Scale the profile by r200
self.scale_profile()
def scale_profile(self):
# Using the Vihilnin et al. 2006 values for A907
self.a907_r200 = 1820 * units.kpc # Derived in the method below and hard-coded here
self.a907_c200 = 5.28
# A907 values
self.a907_n0 = 6.252e-3 #/ u.cm**3
self.a907_rc = 136.9 * (self.r200/self.a907_r200).decompose() #* u.kpc
self.a907_rs = 1887.1 * (self.r200/self.a907_r200).decompose() #* u.kpc
self.a907_alpha = 1.556
self.a907_beta = 0.594
self.a907_epsilon = 4.998
self.a907_n02 = 0.
# Scale/set
self.rc = self.a907_rc * (self.r200/self.a907_r200).decompose() #* u.kpc
self.rs = self.a907_rs * (self.r200/self.a907_r200).decompose() #* u.kpc
self.alpha = self.a907_alpha
self.beta = self.a907_beta
self.epsilon = self.a907_epsilon
self.n02 = self.a907_n02
self.n0 = 6.252e-3 #/ u.cm**3 (temporary)
# Fixed
self.gamma = 3
# Now the hot gas mass for the central density
Mb_M200 = self.mass_r(self.r200)
self.n0 *= (self.M_b*self.f_hot/Mb_M200).decompose()
def a907_nfw(self):
"""
Code to regenrate the r200 and c200 values for A907
Now hard-coded
"""
self.a907_c500 = 3.5
self.a907_M500 = 5e14 * units.Msun
self.a907_r500 = (((3*self.a907_M500) / (4*np.pi*500*self.rhoc))**(1/3)).to('kpc')
self.a907_Rs = self.a907_r500 / self.a907_c500 # Do not confuse with rs
# Code to re-calculate these
fy_500 = self.fy_dm(self.a907_r500 / self.a907_Rs)
yval = np.linspace(3.5, 10, 100)
rval = self.a907_Rs * yval
Mval = self.a907_M500 * self.fy_dm(yval) / fy_500
avg_rho = Mval / (4 * np.pi * rval ** 3 / 3.)
scaled_rho = (avg_rho / (200 * self.rhoc)).decompose()
srt = np.argsort(scaled_rho)
f_Mr = IUS(scaled_rho[srt], rval[srt])
self.a907_r200 = float(f_Mr(1.))*units.kpc
self.a907_c200 = (self.a907_r200 / self.a907_Rs).decompose()
self.a907_M200 = self.a907_M500 * self.fy_dm(self.a907_r200/self.a907_Rs) / fy_500
def ne(self, xyz):
"""
Parameters
----------
xyz : ndarray
Coordinate(s) in kpc
Returns
-------
n_e : float or ndarray
electron density in cm**-3
"""
radius = np.sqrt(rad3d2(xyz))
npne = np.zeros_like(radius)
# Zero out inner 10kpc
ok_r = radius > 10.
# This ignores the n02 term
npne[ok_r] = self.n0**2 * (radius[ok_r]/self.rc)**(-self.alpha) / (
(1+(radius[ok_r]/self.rc)**2)**(3*self.beta - self.alpha/2.)) * (1 /
(1+(radius[ok_r]/self.rs)**self.gamma)**(self.epsilon/self.gamma))
if self.n02 > 0:
pdb.set_trace() # Not coded yet
ne = np.sqrt(npne * 1.1667)
# Return
return ne
def nH(self, xyz):
"""
Scale by He
Args:
xyz:
Returns:
"""
return self.ne(xyz) / 1.1667
class Virgo(ICM):
"""
Parameterization of Virgo following the Planck Collaboration
paper: A&A 596 A101 (2016)
"""
def __init__(self, log_Mhalo=np.log10(1.2e14*(cosmo.Om0/cosmo.Ob0)), **kwargs):
ICM.__init__(self, log_Mhalo=log_Mhalo, **kwargs)
# Position from Sun
self.distance = 18 * units.Mpc
self.coord = SkyCoord('J123049+122328', # Using M87
unit=(units.hourangle, units.deg),
distance=self.distance)
def setup_param(self, cosmo=None):
""" Setup key parameters of the model
"""
self.r200 = 1.2 * units.Mpc
def ne(self, xyz):
radius = np.sqrt(rad3d2(xyz))
# Equation 8
ne = 8.5e-5 / (radius/1e3)**1.2
# Return
return ne
def rad3d2(xyz):
""" Calculate radius squared to x,y,z inputted
Assumes the origin is 0,0,0
Parameters
----------
xyz : Tuple or ndarray
Returns
-------
rad3d : float or ndarray
"""
return xyz[0]**2 + xyz[1]**2 + xyz[-1]**2
|
FRBs/FRB
|
frb/halos/models.py
|
Python
|
bsd-3-clause
| 40,171
|
[
"Galaxy",
"Gaussian",
"TINKER"
] |
9bb51b82991041d6d789b940ce4f62ce9dc2e08b0d60efed997e5bcad807f97c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
Module for implementing a CTRL file object class for the Stuttgart
LMTO-ASA code. It will primarily be used to generate a pymatgen
Structure object in the pymatgen.electronic_structure.cohp.py module.
"""
import re
import numpy as np
from monty.io import zopen
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ry_to_eV, bohr_to_angstrom
from pymatgen.electronic_structure.core import Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.num import round_to_sigfigs
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
class LMTOCtrl:
"""
Class for parsing CTRL files from the Stuttgart LMTO-ASA code.
Currently, only HEADER, VERS and the structure can be used.
"""
def __init__(self, structure, header=None, version="LMASA-47"):
"""
Args:
structure: The structure as a pymatgen Structure object.
header: The header for the CTRL file .
Defaults to None.
version: The LMTO version that is used for the VERS category.
Defaults to the newest version (4.7).
"""
self.structure = structure
self.header = header
self.version = version
def __eq__(self, other):
return self.get_string() == other.get_string()
def __repr__(self):
"""
Representation of the CTRL file is as a string.
"""
return self.get_string()
def __str__(self):
"""
String representation of the CTRL file.
"""
return self.get_string()
def get_string(self, sigfigs=8):
"""
Generates the string representation of the CTRL file. This is
the mininmal CTRL file necessary to execute lmhart.run.
"""
ctrl_dict = self.as_dict()
lines = [] if "HEADER" not in ctrl_dict else ["HEADER".ljust(10) + self.header]
if "VERS" in ctrl_dict:
lines.append("VERS".ljust(10) + self.version)
lines.append("STRUC".ljust(10) + "ALAT=" + str(round(ctrl_dict["ALAT"], sigfigs)))
for l, latt in enumerate(ctrl_dict["PLAT"]):
if l == 0:
line = "PLAT=".rjust(15)
else:
line = " ".ljust(15)
line += " ".join([str(round(v, sigfigs)) for v in latt])
lines.append(line)
for cat in ["CLASS", "SITE"]:
for a, atoms in enumerate(ctrl_dict[cat]):
if a == 0:
line = [cat.ljust(9)]
else:
line = [" ".ljust(9)]
for token, val in sorted(atoms.items()):
if token == "POS":
line.append("POS=" + " ".join([str(round(p, sigfigs)) for p in val]))
else:
line.append(token + "=" + str(val))
line = " ".join(line)
lines.append(line)
return "\n".join(lines) + "\n"
def as_dict(self):
"""
Returns the CTRL as a dictionary. "SITE" and "CLASS" are of
the form {'CATEGORY': {'TOKEN': value}}, the rest is of the
form 'TOKEN'/'CATEGORY': value. It gets the conventional standard
structure because primitive cells use the conventional
a-lattice parameter as the scaling factor and not the a-lattice
parameter of the primitive cell.
"""
ctrl_dict = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if self.header is not None:
ctrl_dict["HEADER"] = self.header
if self.version is not None:
ctrl_dict["VERS"] = self.version
sga = SpacegroupAnalyzer(self.structure)
alat = sga.get_conventional_standard_structure().lattice.a
plat = self.structure.lattice.matrix / alat
"""
The following is to find the classes (atoms that are not symmetry
equivalent, and create labels. Note that LMTO only attaches
numbers with the second atom of the same species, e.g. "Bi", "Bi1",
"Bi2", etc.
"""
eq_atoms = sga.get_symmetry_dataset()["equivalent_atoms"]
ineq_sites_index = list(set(eq_atoms))
sites = []
classes = []
num_atoms = {}
for s, site in enumerate(self.structure.sites):
atom = site.specie
label_index = ineq_sites_index.index(eq_atoms[s])
if atom.symbol in num_atoms:
if label_index + 1 > sum(num_atoms.values()):
num_atoms[atom.symbol] += 1
atom_label = atom.symbol + str(num_atoms[atom.symbol] - 1)
classes.append({"ATOM": atom_label, "Z": atom.Z})
else:
num_atoms[atom.symbol] = 1
classes.append({"ATOM": atom.symbol, "Z": atom.Z})
sites.append({"ATOM": classes[label_index]["ATOM"], "POS": site.coords / alat})
ctrl_dict.update(
{
"ALAT": alat / bohr_to_angstrom,
"PLAT": plat,
"CLASS": classes,
"SITE": sites,
}
)
return ctrl_dict
def write_file(self, filename="CTRL", **kwargs):
"""
Writes a CTRL file with structure, HEADER, and VERS that can be
used as input for lmhart.run.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
@classmethod
def from_file(cls, filename="CTRL", **kwargs):
"""
Creates a CTRL file object from an existing file.
Args:
filename: The name of the CTRL file. Defaults to 'CTRL'.
Returns:
An LMTOCtrl object.
"""
with zopen(filename, "rt") as f:
contents = f.read()
return LMTOCtrl.from_string(contents, **kwargs)
@classmethod
def from_string(cls, data, sigfigs=8):
"""
Creates a CTRL file object from a string. This will mostly be
used to read an LMTOCtrl object from a CTRL file. Empty spheres
are ignored.
Args:
data: String representation of the CTRL file.
Returns:
An LMTOCtrl object.
"""
lines = data.split("\n")[:-1]
struc_lines = {
"HEADER": [],
"VERS": [],
"SYMGRP": [],
"STRUC": [],
"CLASS": [],
"SITE": [],
}
for line in lines:
if line != "" and not line.isspace():
if not line[0].isspace():
cat = line.split()[0]
if cat in struc_lines:
struc_lines[cat].append(line)
else:
pass
for cat in struc_lines:
struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=")
structure_tokens = {"ALAT": None, "PLAT": [], "CLASS": [], "SITE": []}
for cat in ["STRUC", "CLASS", "SITE"]:
fields = struc_lines[cat].split("=") # pylint: disable=E1101
for f, field in enumerate(fields):
token = field.split()[-1]
if token == "ALAT":
alat = round(float(fields[f + 1].split()[0]), sigfigs)
structure_tokens["ALAT"] = alat
elif token == "ATOM":
atom = fields[f + 1].split()[0]
if not bool(re.match("E[0-9]*$", atom)):
if cat == "CLASS":
structure_tokens["CLASS"].append(atom)
else:
structure_tokens["SITE"].append({"ATOM": atom})
else:
pass
elif token in ["PLAT", "POS"]:
try:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()])
except ValueError:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()[:-1]])
if token == "PLAT":
structure_tokens["PLAT"] = arr.reshape([3, 3])
elif not bool(re.match("E[0-9]*$", atom)):
structure_tokens["SITE"][-1]["POS"] = arr
else:
pass
else:
pass
try:
spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP")
spcgrp = struc_lines["SYMGRP"][spcgrp_index : spcgrp_index + 12]
structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0]
except ValueError:
pass
for token in ["HEADER", "VERS"]:
try:
value = re.split(token + r"\s*", struc_lines[token])[1]
structure_tokens[token] = value.strip()
except IndexError:
pass
return LMTOCtrl.from_dict(structure_tokens)
@classmethod
def from_dict(cls, d):
"""
Creates a CTRL file object from a dictionary. The dictionary
must contain the items "ALAT", PLAT" and "SITE".
Valid dictionary items are:
ALAT: the a-lattice parameter
PLAT: (3x3) array for the lattice vectors
SITE: list of dictionaries: {'ATOM': class label,
'POS': (3x1) array of fractional
coordinates}
CLASS (optional): list of unique atom labels as str
SPCGRP (optional): space group symbol (str) or number (int)
HEADER (optional): HEADER text as a str
VERS (optional): LMTO version as a str
Args:
d: The CTRL file as a dictionary.
Returns:
An LMTOCtrl object.
"""
for cat in ["HEADER", "VERS"]:
if cat not in d:
d[cat] = None
alat = d["ALAT"] * bohr_to_angstrom
plat = d["PLAT"] * alat
species = []
positions = []
for site in d["SITE"]:
species.append(re.split("[0-9*]", site["ATOM"])[0])
positions.append(site["POS"] * alat)
# Only check if the structure is to be generated from the space
# group if the number of sites is the same as the number of classes.
# If lattice and the spacegroup don't match, assume it's primitive.
if "CLASS" in d and "SPCGRP" in d and len(d["SITE"]) == len(d["CLASS"]):
try:
structure = Structure.from_spacegroup(d["SPCGRP"], plat, species, positions, coords_are_cartesian=True)
except ValueError:
structure = Structure(
plat,
species,
positions,
coords_are_cartesian=True,
to_unit_cell=True,
)
else:
structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True)
return cls(structure, header=d["HEADER"], version=d["VERS"])
class LMTOCopl:
"""
Class for reading COPL files, which contain COHP data.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length}
.. attribute: efermi
The Fermi energy in Ry or eV.
.. attribute: energies
Sequence of energies in Ry or eV.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
"""
def __init__(self, filename="COPL", to_eV=False):
"""
Args:
filename: filename of the COPL file. Defaults to "COPL".
to_eV: LMTO-ASA gives energies in Ry. To convert energies into
eV, set to True. Defaults to False for energies in Ry.
"""
# COPL files have an extra trailing blank line
with zopen(filename, "rt") as f:
contents = f.read().split("\n")[:-1]
# The parameters line is the second line in a COPL file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
num_bonds = int(parameters[0])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 2 :]]).transpose()
if to_eV:
# LMTO energies have 5 sig figs
self.energies = np.array(
[round_to_sigfigs(energy, 5) for energy in data[0] * Ry_to_eV],
dtype=float,
)
self.efermi = round_to_sigfigs(float(parameters[-1]) * Ry_to_eV, 5)
else:
self.energies = data[0]
self.efermi = float(parameters[-1])
cohp_data = {}
for bond in range(num_bonds):
label, length, sites = self._get_bond_data(contents[2 + bond])
cohp = {spin: data[2 * (bond + s * num_bonds) + 1] for s, spin in enumerate(spins)}
if to_eV:
icohp = {
spin: np.array([round_to_sigfigs(i, 5) for i in data[2 * (bond + s * num_bonds) + 2] * Ry_to_eV])
for s, spin in enumerate(spins)
}
else:
icohp = {spin: data[2 * (bond + s * num_bonds) + 2] for s, spin in enumerate(spins)}
# This takes care of duplicate labels
if label in cohp_data:
i = 1
lab = "%s-%d" % (label, i)
while lab in cohp_data:
i += 1
lab = "%s-%d" % (label, i)
label = lab
cohp_data[label] = {
"COHP": cohp,
"ICOHP": icohp,
"length": length,
"sites": sites,
}
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line):
"""
Subroutine to extract bond label, site indices, and length from
a COPL header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.
Args:
line: line in the COHPCAR header describing the bond.
Returns:
The bond label, the bond length and a tuple of the site
indices.
"""
line = line.split()
length = float(line[2])
# Replacing "/" with "-" makes splitting easier
sites = line[0].replace("/", "-").split("-")
site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])
species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2])
label = "%s%d-%s%d" % (
species[0],
site_indices[0] + 1,
species[1],
site_indices[1] + 1,
)
return label, length, site_indices
|
gmatteo/pymatgen
|
pymatgen/io/lmto.py
|
Python
|
mit
| 15,543
|
[
"pymatgen"
] |
07db2421da5b8b6cc0189b682a16ebfa24e621cef03e71bdd6b53d98c6ac922d
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib/domain/views.py'
import datetime
from decimal import Decimal
from kiwi.datatypes import converter
from stoqlib.database.expr import Date
from stoqlib.database.runtime import get_current_branch
from stoqlib.database.viewable import Viewable
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment, PaymentChangeHistory
from stoqlib.domain.payment.views import (BasePaymentView, InPaymentView,
OutPaymentView, CardPaymentView,
InCheckPaymentView,
PaymentChangeHistoryView)
from stoqlib.domain.product import (ProductSupplierInfo, ProductStockItem,
Storable, Product, StockTransactionHistory)
from stoqlib.domain.purchase import PurchaseOrder, QuoteGroup
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.domain.views import AccountView
from stoqlib.domain.views import ProductBrandByBranchView
from stoqlib.domain.views import ProductComponentView
from stoqlib.domain.views import ProductFullStockView
from stoqlib.domain.views import ProductFullStockItemView
from stoqlib.domain.views import ProductFullStockItemSupplierView
from stoqlib.domain.views import QuotationView
from stoqlib.domain.views import SellableCategoryView
from stoqlib.domain.views import SellableFullStockView
from stoqlib.domain.views import SoldItemView
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.introspection import get_all_classes
def _get_all_views():
for klass in get_all_classes('stoqlib/domain'):
try:
# Exclude Viewable, since we just want to test it's subclasses
if not issubclass(klass, Viewable) or klass is Viewable:
continue
# This is a base viewable for other classes that should not be
# tested.
if klass.__name__ == 'BaseTransferView':
continue
except TypeError:
continue
yield klass
class TestViewsGeneric(DomainTest):
"""Generic tests for views"""
def _test_view(self, view):
from stoqlib.domain.person import Branch
if view.__name__ == 'ProductWithStockBranchView':
# This viewable must be queried with a branch
branch = self.store.find(Branch).any()
results_list = self.store.find(view, branch_id=branch.id)
elif view.__name__ in ['SellableFullStockView',
'ProductBrandByBranchView']:
# This viewable must be queried with a branch
branch = self.store.find(Branch).any()
results_list = view.find_by_branch(self.store, branch)
elif view.__name__ == 'ProductBranchStockView':
# This viewable must be queried with a storable
storable = self.store.find(Storable).any()
results_list = self.store.find(view, storable_id=storable.id)
else:
# This viewable must show everything
results_list = self.store.find(view)
# See if there are no duplicates
ids_set = set()
for result in results_list:
self.assertNotIn(result.id, ids_set)
ids_set.add(result.id)
for view_ in _get_all_views():
name = 'test' + view_.__name__
func = lambda s, v=view_: TestViewsGeneric._test_view(s, v)
func.__name__ = name
setattr(TestViewsGeneric, name, func)
del func
class TestBasePaymentView(DomainTest):
def test_post_search_callback(self):
self.create_payment()
sresults = BasePaymentView.find_pending(self.store)
results = BasePaymentView.post_search_callback(sresults=sresults)
self.assertEquals(results[0], ('count', 'sum'))
self.assertIsNotNone(results[1])
def test_can_cancel_payment(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.status = Payment.STATUS_PENDING
payment_view = self.store.find(InPaymentView, id=payment.id).one()
self.assertTrue(payment_view.can_cancel_payment())
payment.status = Payment.STATUS_PREVIEW
payment_view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(payment_view.can_cancel_payment())
sale = self.create_sale()
payment = self.add_payments(sale)[0]
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(view.can_cancel_payment())
purchase = self.create_purchase_order()
payment = self.add_payments(purchase)[0]
view = self.store.find(OutPaymentView, id=payment.id).one()
self.assertFalse(view.can_cancel_payment())
def test_is_late(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.due_date = localtoday() + datetime.timedelta(-4)
payment.status = Payment.STATUS_PREVIEW
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertTrue(view.is_late())
payment.status = Payment.STATUS_PAID
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(view.is_late())
def test_get_days_late(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(view.get_days_late())
payment.due_date = localtoday() + datetime.timedelta(-4)
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertEquals(view.get_days_late(), 4)
payment.due_date = localtoday() + datetime.timedelta(+4)
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(view.get_days_late())
def test_is_paid(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.status = Payment.STATUS_PENDING
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertFalse(view.is_paid())
payment.status = Payment.STATUS_PAID
view = self.store.find(InPaymentView, id=payment.id).one()
self.assertTrue(view.is_paid())
def test_find_pending(self):
due_date = localtoday() + datetime.timedelta(-4), localtoday()
for i in range(5):
if i % 2 == 0:
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.status = Payment.STATUS_PENDING
payment.due_date = localtoday() + datetime.timedelta(-2)
else:
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.status = Payment.STATUS_PENDING
payment.due_date = Date(localtoday())
result = InPaymentView.find_pending(store=self.store, due_date=due_date)
self.assertEquals(result.count(), 5)
result = InPaymentView.find_pending(store=self.store,
due_date=Date(localtoday()))
self.assertEquals(result.count(), 2)
result = InPaymentView.find_pending(store=self.store,
due_date=None)
self.assertEquals(result.count(), 7)
class TestInPaymentView(DomainTest):
def test_renegotiation(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
self.create_payment_renegotiation(group=payment.group)
result = self.store.find(InPaymentView, id=payment.id).one()
self.assertEquals(result.renegotiation.client.person.name, u'Client')
def test_renegotiated(self):
payment = self.create_payment(payment_type=Payment.TYPE_IN)
payment.set_pending()
payment.group.renegotiation = self.create_payment_renegotiation(
group=payment.group)
payment.group.renegotiation.set_renegotiated()
result = self.store.find(InPaymentView, id=payment.id).one()
self.assertEquals(result.renegotiated.client.person.name, u'Client')
def test_get_parent(self):
sale = self.create_sale()
payment = self.add_payments(sale)
result = self.store.find(InPaymentView, id=payment[0].id).one()
self.assertIs(result.get_parent(), sale)
class TestCardPaymentView(DomainTest):
def test_status_str(self):
payment = self.create_card_payment(payment_type=Payment.TYPE_IN)
result = self.store.find(CardPaymentView, id=payment.id).one()
self.assertEquals(result.status_str, u'Preview')
def test_renegotiation(self):
payment = self.create_card_payment(payment_type=Payment.TYPE_IN)
self.create_payment_renegotiation(group=payment.group)
result = self.store.find(CardPaymentView, id=payment.id).one()
self.assertEquals(result.renegotiation.client.person.name, u'Client')
class Test_BillandCheckPaymentView(DomainTest):
def test_status_str(self):
method = self.store.find(PaymentMethod, method_name=u'check').one()
p = self.create_payment(payment_type=Payment.TYPE_IN,
method=method)
view = self.store.find(InCheckPaymentView, id=p.id).one()
self.assertEquals(view.status_str, u'Preview')
def test_method_description(self):
method = self.store.find(PaymentMethod, method_name=u'check').one()
p = self.create_payment(payment_type=Payment.TYPE_IN,
method=method)
view = self.store.find(InCheckPaymentView, id=p.id).one()
self.assertEquals(view.method_description, u'Check')
class TestPaymentChangeHistoryView(DomainTest):
def test_changed_field(self):
payment = self.create_payment()
history = PaymentChangeHistory(payment=payment,
change_reason=u'Teste test test')
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertIsNotNone(view.changed_field)
history.last_due_date = Date(localtoday())
history.last_status = Payment.STATUS_PENDING
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.changed_field, u'Due Date')
history.last_due_date = None
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.changed_field, u'Status')
def test_from_value(self):
payment = self.create_payment()
history = PaymentChangeHistory(payment=payment,
change_reason=u'Teste test test')
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertIsNotNone(view.from_value)
history.last_due_date = Date(localtoday())
due_date = converter.as_string(datetime.date, history.last_due_date)
history.last_status = Payment.STATUS_PENDING
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.from_value, due_date)
history.last_due_date = None
status = Payment.statuses[history.last_status]
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.from_value, status)
def test_to_value(self):
payment = self.create_payment()
history = PaymentChangeHistory(payment=payment,
change_reason=u'Teste test test')
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertIsNotNone(view.to_value)
history.new_due_date = Date(localtoday())
due_date = converter.as_string(datetime.date, history.new_due_date)
history.new_status = Payment.STATUS_CONFIRMED
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.to_value, due_date)
history.new_due_date = None
status = Payment.statuses[history.new_status]
view = self.store.find(PaymentChangeHistoryView, id=history.id).one()
self.assertEquals(view.to_value, status)
class TestProductFullStockView(DomainTest):
def test_select_by_branch(self):
branch = self.create_branch()
p1 = self.create_product(branch=branch, stock=1)
results = ProductFullStockView.find_by_branch(self.store, branch)
self.failUnless(list(results))
# FIXME: Storm does not support count() with group_by
# self.assertEquals(results.count(), 1)
# The results should have 11 items. 10 for the products that already
# exists, and 1 more for the one we created
self.assertEquals(len(list(results)), 11)
results = ProductFullStockView.find_by_branch(self.store, branch)
results = results.find(ProductFullStockView.product_id == p1.id)
self.failUnless(list(results))
self.assertEquals(len(list(results)), 1)
def test_post_search_callback(self):
self.clean_domain([StockTransactionHistory, ProductSupplierInfo, ProductStockItem,
Storable, Product])
branch = self.create_branch()
for i in range(20):
self.create_product(branch=branch, stock=5)
for i in range(10):
self.create_product(branch=branch, stock=10)
# Get just the products we created here
sresults = self.store.find(ProductFullStockView)
postresults = ProductFullStockView.post_search_callback(sresults)
self.assertEqual(postresults[0], ('count', 'sum'))
self.assertEqual(
# Total stock = (10 * 10) + (20 * 5) = 200
self.store.execute(postresults[1]).get_one(), (30, 200))
sresults = sresults.find(ProductFullStockView.stock > 5)
postresults = ProductFullStockView.post_search_callback(sresults)
self.assertEqual(postresults[0], ('count', 'sum'))
self.assertEqual(
# Total stock = (10 * 10) = 100
self.store.execute(postresults[1]).get_one(), (10, 100))
def test_unit_description(self):
p1 = self.create_product()
p1.sellable.unit = self.create_sellable_unit()
p1.sellable.unit.description = u"kg"
p2 = self.create_product()
results = ProductFullStockView.find_by_branch(self.store, None)
results = results.find(ProductFullStockView.product_id == p1.id)
self.failUnless(list(results))
product_view = results[0]
self.assertEquals(product_view.unit_description, u"kg")
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p2.id)
self.failUnless(list(results))
product_view = results[0]
self.assertEquals(product_view.unit_description, u"un")
def test_get_product_and_category_description(self):
p1 = self.create_product()
p1.sellable.category = self.create_sellable_category()
p1.sellable.category.description = u"category"
p2 = self.create_product()
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p1.id)
self.failUnless(list(results))
pv = results[0]
desc = pv.get_product_and_category_description()
self.assertEquals(desc, u"[category] Description")
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p2.id)
self.failUnless(list(results))
pv = results[0]
desc = pv.get_product_and_category_description()
self.assertEquals(desc, u"Description")
def test_stock_cost(self):
branch = self.create_branch()
p1 = self.create_product(branch=branch, stock=1)
p2 = self.create_product()
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p1.id)
self.failUnless(list(results))
pv = results[0]
self.assertEquals(pv.stock_cost, 10)
branch = get_current_branch(self.store)
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p2.id)
self.failUnless(list(results))
pv = results[0]
self.assertEquals(pv.stock_cost, 0)
def test_price(self):
p1 = self.create_product()
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p1.id)
self.failUnless(list(results))
pv = results[0]
self.assertEquals(pv.price, 10)
# Set a sale price
sellable = p1.sellable
sellable.on_sale_price = Decimal('5.55')
# And a interval that includes today
yesterday = localtoday() - datetime.timedelta(days=1)
tomorrow = localtoday() + datetime.timedelta(days=1)
sellable.on_sale_start_date = yesterday
sellable.on_sale_end_date = tomorrow
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p1.id)
self.assertEquals(results[0].price, Decimal('5.55'))
# Testing with a sale price set, but in the past
date1 = localtoday() - datetime.timedelta(days=10)
date2 = localtoday() - datetime.timedelta(days=5)
sellable.on_sale_start_date = date1
sellable.on_sale_end_date = date2
results = ProductFullStockView.find_by_branch(self.store, None).find(
ProductFullStockView.product_id == p1.id)
self.assertEquals(results[0].price, 10)
def test_with_unblocked_sellables_query(self):
# This is used in the purchase wizard and breaks storm
from stoqlib.domain.sellable import Sellable
p1 = self.create_product()
supplier = self.create_supplier()
# Product should appear when querying without a supplier
query = Sellable.get_unblocked_sellables_query(self.store)
results = self.store.find(ProductFullStockView, query)
self.assertTrue(p1.id in [p.product_id for p in results])
# But should not appear when querying with a supplier
# When querying using the supplier, we should use the
# ProductFullStockSupplierView instead.
query = Sellable.get_unblocked_sellables_query(self.store,
supplier=supplier)
results = self.store.find(ProductFullStockItemSupplierView, query)
self.assertFalse(p1.id in [p.id for p in results])
# Now relate the two
ProductSupplierInfo(store=self.store,
supplier=supplier,
product=p1,
is_main_supplier=True)
# And it should appear now
query = Sellable.get_unblocked_sellables_query(self.store,
supplier=supplier)
results = self.store.find(ProductFullStockItemSupplierView, query)
self.assertTrue(p1.id in [s.product_id for s in results])
# But should not appear for a different supplier
other_supplier = self.create_supplier()
query = Sellable.get_unblocked_sellables_query(self.store,
supplier=other_supplier)
results = self.store.find(ProductFullStockItemSupplierView, query)
self.assertFalse(p1.id in [s.product_id for s in results])
class TestProductComponentView(DomainTest):
def test_sellable(self):
pc1 = self.create_product_component()
pc1.product.is_composed = True
results = self.store.find(ProductComponentView)
self.failUnless(list(results))
pcv = results[0]
self.assertEquals(pcv.sellable, pc1.product.sellable)
class TestSellableFullStockView(DomainTest):
def test_select_by_branch(self):
branch = self.create_branch()
p1 = self.create_product(branch=branch, stock=1)
p2 = self.create_product()
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p1.id)
self.failUnless(list(results))
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p2.id,)
self.failUnless(list(results))
# FIXME: Storm does not support count() with group_by
# self.assertEquals(results.count(), 1)
self.assertEquals(len(list(results)), 1)
def test_sellable(self):
branch = self.create_branch()
p1 = self.create_product(branch=branch, stock=1)
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p1.id)
self.failUnless(list(results))
self.assertEquals(results[0].sellable, p1.sellable)
def test_price(self):
branch = self.create_branch()
p1 = self.create_product(branch=branch, stock=1, price=Decimal('10.15'))
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p1.id)
self.failUnless(list(results))
self.assertEquals(results[0].price, Decimal('10.15'))
# Set a sale price
sellable = p1.sellable
sellable.on_sale_price = Decimal('5.55')
# And a interval that includes today
yesterday = localtoday() - datetime.timedelta(days=1)
tomorrow = localtoday() + datetime.timedelta(days=1)
sellable.on_sale_start_date = yesterday
sellable.on_sale_end_date = tomorrow
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p1.id)
self.assertEquals(results[0].price, Decimal('5.55'))
# Testing with a sale price set, but in the past
date1 = localtoday() - datetime.timedelta(days=10)
date2 = localtoday() - datetime.timedelta(days=5)
sellable.on_sale_start_date = date1
sellable.on_sale_end_date = date2
results = SellableFullStockView.find_by_branch(self.store, branch).find(
SellableFullStockView.product_id == p1.id)
self.assertEquals(results[0].price, Decimal('10.15'))
class TestSellableCategoryView(DomainTest):
def test_category(self):
category = self.create_sellable_category()
results = self.store.find(SellableCategoryView, id=category.id)
self.failUnless(list(results))
self.assertEquals(results[0].category, category)
def test_get_commission(self):
category = self.create_sellable_category()
results = self.store.find(SellableCategoryView, id=category.id)
self.failUnless(list(results))
self.assertEquals(results[0].get_commission(), None)
base_category = self.create_sellable_category()
self.create_commission_source(category=base_category)
category.category = base_category
results = self.store.find(SellableCategoryView, id=category.id)
self.assertEquals(results[0].get_commission(), 10)
self.create_commission_source(category=category)
results = self.store.find(SellableCategoryView, id=category.id)
self.assertEquals(results[0].get_commission(), 10)
def test_get_installments_commission(self):
category = self.create_sellable_category()
results = self.store.find(SellableCategoryView, id=category.id)
self.failUnless(list(results))
self.assertEquals(results[0].get_installments_commission(), None)
base_category = self.create_sellable_category()
category.category = base_category
self.create_commission_source(category=base_category)
results = self.store.find(SellableCategoryView, id=category.id)
self.assertEquals(results[0].get_installments_commission(), 1)
self.create_commission_source(category=category)
results = self.store.find(SellableCategoryView, id=category.id)
self.assertEquals(results[0].get_installments_commission(), 1)
def test_get_suggested_markup(self):
parent_category = self.create_sellable_category()
parent_category.suggested_markup = 100
category = self.create_sellable_category()
category.suggested_markup = 200
view = self.store.find(SellableCategoryView, id=category.id).one()
self.assertEquals(view.get_suggested_markup(), 200)
category.category = parent_category
category.suggested_markup = None
view = self.store.find(SellableCategoryView, id=category.id).one()
self.assertEquals(view.get_suggested_markup(), 100)
class TestQuotationView(DomainTest):
def test_group_quotation_purchase(self):
order = self.create_purchase_order()
quote = QuoteGroup(store=self.store, branch=order.branch)
order.status = PurchaseOrder.ORDER_QUOTING
quote.add_item(order)
self.assertEqual(order.status, PurchaseOrder.ORDER_QUOTING)
quotations = quote.get_items()
self.assertEqual(quotations.count(), 1)
self.assertFalse(quotations[0].is_closed())
quotations[0].close()
results = self.store.find(QuotationView, id=quotations[0].id)
self.failUnless(list(results))
self.assertEquals(results.count(), 1)
self.assertEquals(results[0].group, quote)
self.assertEquals(results[0].quotation, quotations[0])
self.assertEquals(results[0].purchase, order)
class TestSoldItemView(DomainTest):
def test_average_cost(self):
sale = self.create_sale()
sellable = self.add_product(sale)
sale.order()
self.add_payments(sale, method_type=u'money')
sale.confirm()
view = self.store.find(SoldItemView, id=sellable.id).one()
self.assertEquals(view.average_cost, 0)
class TestAccountView(DomainTest):
def test_account(self):
account = self.create_account()
results = self.store.find(AccountView, id=account.id)
self.failUnless(list(results))
self.assertEquals(results[0].account, account)
def test_parent_account(self):
account = self.create_account()
account.parent = self.create_account()
results = self.store.find(AccountView, id=account.id)
self.failUnless(list(results))
self.assertEquals(results[0].parent_account, account.parent)
def test_matches(self):
account = self.create_account()
account.parent = self.create_account()
results = self.store.find(AccountView, id=account.id)
self.failUnless(list(results))
self.failUnless(results[0].matches(account.id))
self.failUnless(results[0].matches(account.parent.id))
def test_get_combined_value(self):
a1 = self.create_account()
a2 = self.create_account()
results = self.store.find(AccountView, id=a1.id)
self.failUnless(list(results))
self.assertEquals(results[0].get_combined_value(), 0)
t1 = self.create_account_transaction(a1, 1)
t1.source_account = a1
t1.account = a2
self.store.flush()
t2 = self.create_account_transaction(a1, 9)
t2.source_account = a1
t2.account = a2
self.store.flush()
results = self.store.find(AccountView, id=a1.id)
self.failUnless(list(results))
self.assertEquals(results.count(), 1)
# The negative sum of t1 and t2
self.assertEquals(results[0].get_combined_value(), -10)
t3 = self.create_account_transaction(a2, 10)
t3.source_account = a2
t3.account = a1
self.store.flush()
t4 = self.create_account_transaction(a2, 90)
t4.source_account = a2
t4.account = a1
self.store.flush()
results = self.store.find(AccountView, id=a1.id)
self.failUnless(list(results))
self.assertEquals(results.count(), 1)
# The negative sum of t1 and t2 plus the sum of t3 and t4
self.assertEquals(results[0].get_combined_value(), 90)
def test_repr(self):
a1 = self.create_account()
results = self.store.find(AccountView, id=a1.id)
self.failUnless(list(results))
self.assertEquals(repr(results[0]), u'<AccountView Test Account>')
class TestProductFullStockItemView(DomainTest):
def test_select(self):
product = self.store.find(Product)[0]
order = self.create_purchase_order()
order.add_item(product.sellable, 1)
order.status = PurchaseOrder.ORDER_CONFIRMED
order2 = self.create_purchase_order()
order2.add_item(product.sellable, 4)
order2.status = PurchaseOrder.ORDER_CONFIRMED
# This viewable should return only one item for each existing product,
# event if there is more than one purchase order for the product.
results = self.store.find(ProductFullStockItemView)
ids = [r.id for r in results]
self.assertEquals(ids.count(product.sellable.id), 1)
class TestProductBrandByBranchView(DomainTest):
def test_find_by_category(self):
# Creating product 1
branch = self.create_branch(name=u"branch1")
p1 = self.create_product(branch=branch, stock=5)
p1.sellable.category = self.create_sellable_category(description=u"Category")
p1.brand = u"Black Mesa"
# Creating product 2
branch2 = self.create_branch(name=u"branch2")
p2 = self.create_product(branch=branch2, stock=1)
p2.sellable.category = p1.sellable.category
category = p1.sellable.category
p2.brand = u"Black Mesa"
# Search with a specific category
results = ProductBrandByBranchView.find_by_category(self.store,
category).find()
results.order_by(ProductBrandByBranchView.id)
# Checking the quantity for each product
self.assertEqual(results[0].quantity, 5)
self.assertEqual(results[1].quantity, 1)
# Checking total products
total_products = 0
for i in results:
total_products += i.quantity
self.assertEqual(total_products, 6)
# Without category, total item shouldnt be 0
results2 = ProductBrandByBranchView.find_by_category(self.store,
None).find()
total_products = 0
for i in results2:
total_products += i.quantity
self.assertNotEqual(total_products, 0)
|
tiagocardosos/stoq
|
stoqlib/domain/test/test_views.py
|
Python
|
gpl-2.0
| 31,597
|
[
"VisIt"
] |
ce47b73f3412e56731c6ecc105871642bc329522290a35781f8c2e30e77d84e7
|
#!/usr/bin/env python
################################################################################
# site-alerter.py
# Constructs a XML list of GCNs for a given site and sends alerts
#
# Created by Brian Baughman on 3/4/13.
# Copyright 2013 Brian Baughman. All rights reserved.
################################################################################
################################################################################
# Load needed modules
################################################################################
try:
import sys, re, time, logging, traceback
from os import environ, path, _exit, makedirs, stat, access, R_OK, W_OK
except:
print 'Failed to load base modules'
sys.exit(-1)
try:
try:
import xml.etree.ElementTree as ET
except:
try:
import elementtree.ElementTree as ET
except:
print 'Failed to load ElementTree'
_exit(-1)
import smtplib
from email import MIMEText
from gcn_dbinterface import GetGCNConfig,GetConfig, MakeEntry, baseentry,\
gcninfo
from bitly import shorten
from timeConv import tjd2dttm, secInday
from coordConv import *
from datetime import datetime
from numpy import deg2rad, rad2deg
# Home directory
homedir = environ['HOME']
# stop if something looks wrong
except:
print 'Failed to load modules'
_exit(-1)
##############################################################################
# Generic Settings
##############################################################################
# Update date format
updfmt = '%Y-%m-%dT%H:%M:%S'
# Subject of email format
sbjctfmt = 'GCN at %s at %.f in FOV'
# Content of email format
txtfmt = '%s trigger %s was in the FOV at %.2f degrees from Zenith. Info at %s and %s'
# Define some regexes
hrefre = re.compile("<[^>]*href=([^\ \"'>]+)>")
################################################################################
# Define DB structure
################################################################################
class alertinfo(baseentry):
def __init__(self):
self.id = "null"
# Read directly
self.trigid = "unset"
self.trig_tjd = 0
self.trig_sod = 0.
# self.trig_date = "unset"
self.updated_date = "unset"
self.sent = 0
self.setDBType()
################################################################################
# Useful functions
################################################################################
def easy_exit(eval,dbcfgs):
'''
Function to clean up before exiting and exiting itself
'''
if dbcfgs != None:
nfailed = 0
for dbcfg in dbcfgs:
try:
# Close DB connections
dbcfg.curs.close()
dbcfg.dbconn.commit()
except:
nfailed += 1
_exit(eval)
def dircheck(dir):
'''
Checks if the given directory exists, if not it attempts to create it.
'''
try:
stat(dir)
except:
makedirs(dir)
try:
stat(dir)
return True
except:
return False
def gettxt(cinfo,curzen,sitetag,sitelink):
'''
Generated formatted text for email alert
'''
txt = txtfmt%(cinfo.inst.capitalize(),\
cinfo.trigid,\
curzen,\
shorten(cinfo.link),\
sitelink)
return txt
def email(sender,recipient,subject,text):
msg = MIMEText.MIMEText(text)
# sender == the sender's email address
# recipient == the recipient's email address
msg['Subject'] = subject
msg['From'] = sender
if hasattr(recipient,'__iter__'):
msg['To'] = ','.join(recipient)
else:
msg['To'] = recipient
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(gcnsmtp)
s.sendmail(sender, msg['To'].split(','), msg.as_string())
s.quit()
if __name__ == "__main__":
# GCN database
try:
gcndbfname = environ['GCNDB']
except:
gcndbfname = '%s/gcns.db'%homedir
try:
gcndbname = environ['GCNDBNAME']
except:
gcndbname = "gcns"
# Alerts database
try:
salertdbfname = environ['SALERTDB']
except:
salertdbfname = '%s/alerts.db'%homedir
try:
salertdbname = environ['SALERTDBNAME']
except:
salertdbname = "alerts"
# Alerts config
try:
salertcfg = environ['SALERTCFG']
except:
salertcfg = None
# Log file name
try:
salertlog = environ['SALERTLOG']
except:
salertlog = '%s/logs/site-alerter.log'%homedir
# Site Setup
try:
sitetag = environ['GCNSITE']
except:
sitetag = 'HAWC'
try:
obslat = deg2rad(float(environ['GCNSITELAT']))
except:
obslat = deg2rad(+19.0304954539937)
try:
obslon = float(deg2rad(environ['GCNSITELONG']))
except:
obslon = deg2rad(-97.2698484177274)
try:
obshorizon = float(environ['GCNSITEHORIZON'])
except:
obshorizon = float(45.000)
##############################################################################
# LOG FILE CONFIGURATION
##############################################################################
# Log file name
logging.basicConfig(filename=salertlog,\
format='%(asctime)s %(levelname)s: %(message)s',\
filemode='a', level=logging.DEBUG)
################################################################################
# Get GCN Database
################################################################################
try:
dbcfg = GetGCNConfig(gcndbfname,gcndbname)
except:
logging.error('Could not read %s'%gcndbname)
easy_exit(-1,None)
if dbcfg == None:
logging.info('GCN DB failed to initialize.')
easy_exit(-1,None)
##############################################################################
# Get Alerts Database
##############################################################################
try:
alertdbcfg = GetConfig(salertdbfname,salertdbname,alertinfo)
except:
logging.error('Could not read %s'%salertdbname)
easy_exit(-1,[dbcfg])
if alertdbcfg == None:
logging.info('Alert DB failed to initialize.')
easy_exit(-1,[dbcfg])
##############################################################################
# Alerts config
##############################################################################
# Sender of emails
sender = None
# Reciepents of email
recipients = None
# Read configuration file
try:
salertcfgif = open(salertcfg,'r')
cfglines = salertcfgif.readlines()
salertcfgif.close()
if len(cfglines) >= 2:
sender = cfglines[0].strip()
recipients = cfglines[1].strip().split(',')
if len(recipients) <=0 :
sender = None
recipients = None
except:
if salertcfg != None:
logging.error('Cannot read sender/recipients from: %s\n'%(salertcfg))
easy_exit(-3,[dbcfg,alertdbcfg])
else:
logging.debug('No alerts will be sent')
##############################################################################
# Environment Settings
##############################################################################
# Get web base
try:
gcnhttp = environ['GCNHTTP']
except:
logging.error('GCNHTTP not set!')
easy_exit(-2,[dbcfg,alertdbcfg])
# GCNSMTP
try:
gcnsmtp = environ['GCNSMTP']
except:
logging.error( 'GCNSMTP not set!')
easy_exit(-2,[dbcfg,alertdbcfg])
# Get web base
try:
gcnweb = environ['GCNWEB']
except:
gcnweb = '%s/public_html'%homedir
try:
sitelink = environ['GCNSITELINK']
except:
sitelink = gcnhttp
# Number of recent GCNs to output
try:
nrecent = int(environ['NOUTGCNS'])
except:
nrecent = 100
################################################################################
# Meat
################################################################################
# Grab recents
trig_tjd = 0
trig_sod = 1
id = 2
trigid = 3
updated_date = 4
recentstr = "SELECT DISTINCT trig_tjd,trig_sod,id,trigid,updated_date"
recentstr += " FROM %s ORDER BY trig_tjd DESC, trig_sod DESC LIMIT %i ;"%(dbcfg.dbname,\
nrecent)
dbcfg.curs.execute(recentstr)
recent = dbcfg.curs.fetchall()
# XML header
root = ET.Element("xml")
root.attrib['version'] = "1.0"
gcns = ET.SubElement(root, "gcns")
a_id = alertdbcfg.dbstruct['id']['index']
a_sent = alertdbcfg.dbstruct['sent']['index']
a_updated_date = alertdbcfg.dbstruct['updated_date']['index']
for row in recent:
# Check if this entry has been updated
upd = False
sentflg = 0
qstr = "SELECT * FROM %s WHERE trig_tjd=%s AND trigid='%s';"
qstr = qstr%(alertdbcfg.dbname,row[trig_tjd],row[trigid])
alertdbcfg.curs.execute(qstr)
camtchs = alertdbcfg.curs.fetchall()
if len(camtchs) == 0:
'''
Add new entry
'''
nAlert = alertinfo()
nAlert.trigid = row[trigid]
nAlert.trig_tjd = row[trig_tjd]
nAlert.trig_sod = row[trig_sod]
nAlert.updated_date = row[updated_date]
nAlert.sent = 0
carr = [nAlert.__getattribute__(cattr) for cattr in alertdbcfg.dbstruct.keys() ]
cintstr = alertdbcfg.inststr%tuple(carr)
alertdbcfg.curs.execute(cintstr)
alertdbcfg.dbconn.commit()
alertdbcfg.curs.execute(qstr)
camtchs = alertdbcfg.curs.fetchall()
upd = True
elif len(camtchs) > 1:
'''
This should never happen so assume it is an error and skip
'''
logging.info('Found multiple entries for %s'%row[trigid])
continue
rEUD = datetime.strptime(str(row[updated_date]),updfmt)
for m in camtchs:
mEUD = datetime.strptime(str(m[a_updated_date]),updfmt)
if rEUD > mEUD:
upd = True
sentflg += m[a_sent]
# Calculate position at site
qstr = "SELECT * FROM %s WHERE id=%s;"%(dbcfg.dbname,row[id])
dbcfg.curs.execute(qstr)
cmtchs = dbcfg.curs.fetchall()
curinfo = MakeEntry(cmtchs[0],gcninfo,dbcfg.dbstruct)
evtTime = tjd2dttm(curinfo.trig_tjd + curinfo.trig_sod/secInday)
evtRA = deg2rad(float(curinfo.ra))
evtDec = deg2rad(float(curinfo.dec))
evtAlt,evtAz = eq2horz(obslat,obslon,evtTime,evtRA,evtDec)
evtdZenith = 90. - rad2deg(evtAlt)
if upd:
logging.debug("Updated %s"%(curinfo.trigid))
ustr = "UPDATE %s SET updated_date='%s' WHERE id='%s';"
ustr = ustr%(alertdbcfg.dbname, row[updated_date], camtchs[0][a_id])
try:
alertdbcfg.curs.execute(ustr)
alertdbcfg.dbconn.commit()
except:
logging.error( 'Failed to update Alert DB:\n%s'%traceback.format_exc())
if evtdZenith < obshorizon and sentflg == 0:
sbjct = sbjctfmt%(evtTime.strftime("%Y-%m-%d %H:%M:%S"),evtdZenith)
txt = gettxt(curinfo,evtdZenith,sitetag,sitelink)
ustr = "UPDATE %s SET sent=1 WHERE id='%s';"%(alertdbcfg.dbname,\
camtchs[0][a_id])
try:
alertdbcfg.curs.execute(ustr)
alertdbcfg.dbconn.commit()
email(sender,recipients,sbjct,txt)
logging.info( 'Sent: %s'%(sbjct))
except:
logging.error( 'Failed to send notification or update Alert DB:\n%s'%traceback.format_exc())
continue
#Save to XML
curgcn = ET.SubElement(gcns, "gcn")
for cattr in dbcfg.dbstruct.keys():
cursubelm = ET.SubElement(curgcn,cattr)
cursubelm.text = str(curinfo.__getattribute__(cattr))
cursubelm = ET.SubElement(curgcn,'trig_date')
utt = evtTime.utctimetuple()
cursubelm.text = "%i-%02i-%02i %02i:%02i:%02i"%(utt.tm_year,utt.tm_mon,\
utt.tm_mday,\
utt.tm_hour,utt.tm_min,\
utt.tm_sec)
# Save XML
logging.info( 'Updating XML')
xmlfname = '%s/gcns.xml'%gcnweb
fout = open(xmlfname,'w')
if fout.closed:
logging.error( 'Failed to open output XML file: %s'%(xmlfname))
easy_exit(-6,[dbcfg,alertdbcfg])
try:
root.write(fout,pretty_print=True)
fout.close()
except:
try:
outtxt = ET.tostring(root)
fout.write(outtxt)
fout.close()
except:
fout.close()
logging.error( 'Failed to open output XML file: %s'%(xmlfname))
easy_exit(-6,[dbcfg,alertdbcfg])
# Close DB connections
dbcfg.curs.close()
dbcfg.dbconn.commit()
# Remove lock
easy_exit(0,[dbcfg,alertdbcfg])
|
bbaugh/gcn-parser
|
site-alerter.py
|
Python
|
gpl-2.0
| 12,421
|
[
"Brian"
] |
e908715ec22f772f173f6722958c735b99d54d6b19d90173b4429a6345dfcf00
|
from lettuce import step, world
# Browse from page to page
@step(r'(?:visit|access|open) the url "([^"]*)"')
def go_to_the_url(step, url):
world.response = world.browser.visit(url)
@step(r'go back(?: a page)?')
def go_back(step):
world.browser.back()
@step(r'go forward(?: a page)?')
def go_forward(step):
world.browser.forward()
@step(r'(?:reload|refresh)(?: the page)?')
def reload(step):
world.browser.reload()
|
salad/salad
|
salad/steps/browser/navigation.py
|
Python
|
bsd-3-clause
| 438
|
[
"VisIt"
] |
104f01b04cf410b3ac5a61730184c6b2e731eead6ca48e891766226edaa4fc3d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
LAYMAN CONFIGURATION
File: config.py
Handles Basic layman configuration
Copyright:
(c) 2005 - 2009 Gunnar Wrobel
(c) 2009 Sebastian Pipping
(c) 2010 - 2011 Brian Dolbec
Distributed under the terms of the GNU General Public License v2
Author(s):
Gunnar Wrobel <wrobel@gentoo.org>
Sebastian Pipping <sebastian@pipping.org>
Brian Dolbec <brian.dolbec@gmail.com>
"""
'''Defines the configuration options.'''
__version__ = "0.2"
import sys
import os
import ConfigParser
from layman.output import Message
from layman.utils import path
def read_layman_config(config=None, defaults=None, output=None):
"""reads the config file defined in defaults['config']
and updates the config
@param config: ConfigParser.ConfigParser(self.defaults) instance
@param defaults: dict
@modifies config['MAIN']['overlays']
"""
read_files = config.read(defaults['config'])
if read_files == [] and output is not None:
output.warn("Warning: not able to parse config file: %s"
% defaults['config'])
if config.get('MAIN', 'overlay_defs'):
try:
filelist = os.listdir(config.get('MAIN', 'overlay_defs'))
except OSError:
return
filelist = [f for f in filelist if f.endswith('.xml')]
overlays = set(config.get('MAIN', 'overlays').split('\n'))
for _file in filelist:
_path = os.path.join(config.get('MAIN', 'overlay_defs'), _file)
if os.path.isfile(_path):
overlays.update(["file://" + _path])
config.set('MAIN', 'overlays', '\n'.join(overlays))
# establish the eprefix, initially set so eprefixify can
# set it on install
EPREFIX = "@GENTOO_PORTAGE_EPREFIX@"
# check and set it if it wasn't
if "GENTOO_PORTAGE_EPREFIX" in EPREFIX:
EPREFIX = ''
class BareConfig(object):
'''Handles the configuration only.'''
def __init__(self, output=None, stdout=None, stdin=None, stderr=None,
config=None, read_configfile=False, quiet=False, quietness=4,
verbose=False, nocolor=False, width=0, root=None
):
'''
Creates a bare config with defaults and a few output options.
>>> a = BareConfig()
>>> a['overlays']
'http://www.gentoo.org/proj/en/overlays/repositories.xml'
>>> sorted(a.keys())
['bzr_addopts', 'bzr_command', 'bzr_postsync', 'bzr_syncopts', 'cache', 'config', 'configdir', 'custom_news_func', 'custom_news_pkg', 'cvs_addopts', 'cvs_command', 'cvs_postsync', 'cvs_syncopts', 'darcs_addopts', 'darcs_command', 'darcs_postsync', 'darcs_syncopts', 'g-common_command', 'g-common_generateopts', 'g-common_postsync', 'g-common_syncopts', 'git_addopts', 'git_command', 'git_email', 'git_postsync', 'git_syncopts', 'git_user', 'installed', 'local_list', 'make_conf', 'mercurial_addopts', 'mercurial_command', 'mercurial_postsync', 'mercurial_syncopts', 'news_reporter', 'nocheck', 'nocolor', 'output', 'overlay_defs', 'overlays', 'proxy', 'quiet', 'quietness', 'rsync_command', 'rsync_postsync', 'rsync_syncopts', 'stderr', 'stdin', 'stdout', 'storage', 'svn_addopts', 'svn_command', 'svn_postsync', 'svn_syncopts', 't/f_options', 'tar_command', 'tar_postsync', 'umask', 'verbose', 'width']
>>> a.get_option('nocheck')
True
'''
if root is None:
self.root = ''
else:
self.root = root
self._defaults = {
'configdir': path([self.root, EPREFIX,'/etc/layman']),
'config' : '%(configdir)s/layman.cfg',
'storage' : path([self.root, EPREFIX,'/var/lib/layman']),
'cache' : '%(storage)s/cache',
'local_list': '%(storage)s/overlays.xml',
'installed': '%(storage)s/installed.xml',
'make_conf' : '%(storage)s/make.conf',
'nocheck' : 'yes',
'proxy' : '',
'umask' : '0022',
'news_reporter': 'portage',
'custom_news_pkg': '',
'gpg_detached_lists': '',
'gpg_signed_lists': '',
'overlays' :
'http://www.gentoo.org/proj/en/overlays/repositories.xml',
'overlay_defs': '%(configdir)s/overlays',
'bzr_command': path([self.root, EPREFIX,'/usr/bin/bzr']),
'cvs_command': path([self.root, EPREFIX,'/usr/bin/cvs']),
'darcs_command': path([self.root, EPREFIX,'/usr/bin/darcs']),
'git_command': path([self.root, EPREFIX,'/usr/bin/git']),
'g-common_command': path([self.root, EPREFIX,'/usr/bin/g-common']),
'mercurial_command': path([self.root, EPREFIX,'/usr/bin/hg']),
'rsync_command': path([self.root, EPREFIX,'/usr/bin/rsync']),
'svn_command': path([self.root, EPREFIX,'/usr/bin/svn']),
'tar_command': path([self.root, EPREFIX,'/bin/tar']),
't/f_options': ['nocheck'],
'bzr_addopts' : '',
'bzr_syncopts' : '',
'cvs_addopts' : '',
'cvs_syncopts' : '',
'darcs_addopts' : '',
'darcs_syncopts' : '',
'git_addopts' : '',
'git_syncopts' : '',
'mercurial_addopts' : '',
'mercurial_syncopts' : '',
'rsync_syncopts' : '',
'svn_addopts' : '',
'svn_syncopts' : '',
'g-common_generateopts' : '',
'g-common_syncopts' : '',
'bzr_postsync' : '',
'cvs_postsync' : '',
'darcs_postsync' : '',
'git_postsync' : '',
'mercurial_postsync' : '',
'rsync_postsync' : '',
'svn_postsync' : '',
'tar_postsync' : '',
'g-common_postsync' : '',
'git_user': 'layman',
'git_email': 'layman@localhost',
}
self._options = {
'config': config if config else self._defaults['config'],
'stdout': stdout if stdout else sys.stdout,
'stdin': stdin if stdin else sys.stdin,
'stderr': stderr if stderr else sys.stderr,
'output': output if output else Message(),
'quietness': quietness,
'nocolor': nocolor,
'width': width,
'verbose': verbose,
'quiet': quiet,
'custom_news_func': None,
}
self._set_quietness(quietness)
self.config = None
if read_configfile:
defaults = self.get_defaults()
if "%(configdir)s" in defaults['config']:
# fix the config path
defaults['config'] = defaults['config'] \
% {'configdir': defaults['configdir']}
self.read_config(defaults)
def read_config(self, defaults):
self.config = ConfigParser.ConfigParser(defaults)
self.config.add_section('MAIN')
read_layman_config(self.config, defaults, self._options['output'])
def keys(self):
'''Special handler for the configuration keys.
'''
self._options['output'].debug(
'Retrieving %s options' % self.__class__.__name__, 9)
keys = [i for i in self._options]
self._options['output'].debug(
'Retrieving %s defaults' % self.__class__.__name__, 9)
keys += [i for i in self._defaults
if not i in keys]
self._options['output'].debug(
'Retrieving %s done...' % self.__class__.__name__, 9)
return keys
def get_defaults(self):
"""returns our defaults dictionary"""
_defaults = self._defaults.copy()
_defaults['config'] = self._options['config']
return _defaults
def get_option(self, key):
"""returns the current option's value"""
return self._get_(key)
def set_option(self, option, value):
"""Sets an option to the value """
self._options[option] = value
# handle quietness
if option == 'quiet':
if self._options['quiet']:
self._set_quietness(1)
self._options['quietness'] = 1
else:
self._set_quietness(4)
if option == 'quietness':
self._set_quietness(value)
def _set_quietness(self, value):
self._options['output'].set_info_level(value)
self._options['output'].set_warn_level(value)
def __getitem__(self, key):
return self._get_(key)
def _get_(self, key):
self._options['output'].debug(
'Retrieving %s option: %s' % (self.__class__.__name__, key), 9)
if key == 'overlays':
overlays = ''
if (key in self._options
and not self._options[key] is None):
overlays = '\n'.join(self._options[key])
if self.config and self.config.has_option('MAIN', 'overlays'):
overlays += '\n' + self.config.get('MAIN', 'overlays')
if overlays:
return overlays
if (key in self._options
and not self._options[key] is None):
return self._options[key]
if self.config and self.config.has_option('MAIN', key):
if key in self._defaults['t/f_options']:
return self.t_f_check(self.config.get('MAIN', key))
return self.config.get('MAIN', key)
self._options['output'].debug('Retrieving BareConfig default', 9)
if key in self._defaults['t/f_options']:
return self.t_f_check(self._defaults[key])
if key in self._defaults:
if '%(storage)s' in self._defaults[key]:
return self._defaults[key] %{'storage': self._defaults['storage']}
return self._defaults[key]
return None
@staticmethod
def t_f_check(option):
"""evaluates the option and returns
True or False
"""
return option.lower() in ['yes', 'true', 'y', 't']
class OptionConfig(BareConfig):
"""This subclasses BareCongig adding functions to make overriding
or resetting defaults and/or setting options much easier
by using dictionaries.
"""
def __init__(self, options=None, defaults=None, root=None):
"""
@param options: dictionary of {'option': value, ...}
@rtype OptionConfig class instance.
>>> options = {"overlays": ["http://www.gentoo-overlays.org/repositories.xml"]}
>>> new_defaults = {"configdir": "/etc/test-dir"}
>>> a = OptionConfig(options=options, defaults=new_defaults)
>>> a['overlays']
'http://www.gentoo-overlays.org/repositories.xml'
>>> a["configdir"]
'/etc/test-dir'
>>> sorted(a.keys())
['bzr_addopts', 'bzr_command', 'bzr_postsync', 'bzr_syncopts', 'cache', 'config', 'configdir', 'custom_news_func', 'custom_news_pkg', 'cvs_addopts', 'cvs_command', 'cvs_postsync', 'cvs_syncopts', 'darcs_addopts', 'darcs_command', 'darcs_postsync', 'darcs_syncopts', 'g-common_command', 'g-common_generateopts', 'g-common_postsync', 'g-common_syncopts', 'git_addopts', 'git_command', 'git_email', 'git_postsync', 'git_syncopts', 'git_user', 'installed', 'local_list', 'make_conf', 'mercurial_addopts', 'mercurial_command', 'mercurial_postsync', 'mercurial_syncopts', 'news_reporter', 'nocheck', 'nocolor', 'output', 'overlay_defs', 'overlays', 'proxy', 'quiet', 'quietness', 'rsync_command', 'rsync_postsync', 'rsync_syncopts', 'stderr', 'stdin', 'stdout', 'storage', 'svn_addopts', 'svn_command', 'svn_postsync', 'svn_syncopts', 't/f_options', 'tar_command', 'tar_postsync', 'umask', 'verbose', 'width']
"""
BareConfig.__init__(self, root=root)
self.update_defaults(defaults)
self.update(options)
return
def update(self, options):
"""update the options with new values passed in via options
@param options
"""
if options is not None:
keys = sorted(options)
if 'quiet' in keys:
self.set_option('quiet', options['quiet'])
options.pop('quiet')
if 'quietness' in keys and not options['quiet']:
self._set_quietness(options['quietness'])
options.pop('quietness')
self._options.update(options)
return
def update_defaults(self, new_defaults):
"""update the options with new values passed in via options
@param options
"""
if new_defaults is not None:
self._defaults.update(new_defaults)
return
#===============================================================================
#
# Testing
#
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
|
jmesmon/layman
|
layman/config.py
|
Python
|
gpl-2.0
| 13,518
|
[
"Brian"
] |
ee56f86c3764c033c87012b6f0de9fb692609c09c660a4f9c36be6c25107be92
|
import vtk
import math
import numpy as np
sigmoid_grad = -3.0
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-sigmoid_grad * x))
inner_par_rad = 1/2.0
atp_base = 0.35
sigmoind_domain_min = -2.0
sigmoind_domain_max = 2.0
sigmoid_domain = sigmoind_domain_max - sigmoind_domain_min
scaling_to_sigmoid_domain = sigmoid_domain / (1.0 - inner_par_rad)
ecReader = vtk.vtkXMLPolyDataReader()
ecReader.SetFileName('quadMeshFullECc2000.vtp')
ecReader.Update()
polydata = ecReader.GetOutput()
# Create new ATP map array.
newMapArray = vtk.vtkDoubleArray()
newMapArray.SetName('initialATP')
newMapArray.SetNumberOfComponents(1)
newMapArray.SetNumberOfTuples(polydata.GetNumberOfCells())
# Set background value in the ATP array.
for cid in range(0, polydata.GetNumberOfCells()):
newMapArray.SetValue(cid, atp_base)
# Get cell centres.
centresFilter = vtk.vtkCellCenters()
centresFilter.SetInput(polydata)
centresFilter.Update()
centres = centresFilter.GetOutput()
sphere_rad = 1500
centre = (5000, 6500, 0)
max_atp = 0.68
# Do HI ATP by running over all cell centres in polydata and processing only the ones that fall in the specified radius.
for cid in range (0, centres.GetNumberOfPoints()):
# Distance from the point to sphere centre.
dist2 = vtk.vtkMath.Distance2BetweenPoints(centres.GetPoint(cid), centre)
# If the point falls into the sphere radius.
if dist2 < sphere_rad**2:
# Parametric distance from point to sphere centre.
par_dist = math.sqrt(dist2) / sphere_rad
if par_dist <= inner_par_rad:
# The centre is atp_max.
atp_val = max_atp
else:
# Discard the inner radius from our parametric distance.
x0 = par_dist - inner_par_rad
# Map distance to sigmoind domain min to max range.
x1 = x0 * scaling_to_sigmoid_domain
x2 = x1 + sigmoind_domain_min
# Calculate sigmoid value and subtract the min sigmoid value for this domain.
s = sigmoid(x2)
s1 = s
# Scale to atp range.
atp_range = max_atp - atp_base
atp_val = atp_base + s1 * atp_range
newMapArray.SetValue(cid, atp_val)
sphere_rad = 1500
centre = (3000, 3022.5, 0)
max_atp = 0.25
# Do LO ATP 1 by running over all cell centress in polydata and processing only the ones that fall in the specified radius.
for cid in range (0, centres.GetNumberOfPoints()):
# Distance from the point to sphere centre.
dist2 = vtk.vtkMath.Distance2BetweenPoints(centres.GetPoint(cid), centre)
# If the point falls into the sphere radius.
if dist2 < sphere_rad**2:
# Parametric distance from point to sphere centre.
par_dist = math.sqrt(dist2) / sphere_rad
if par_dist <= inner_par_rad:
# The centre is atp_max.
atp_val = max_atp
else:
# Discard the inner radius from our parametric distance.
x0 = par_dist - inner_par_rad
# Map distance to sigmoind domain min to max range.
x1 = x0 * scaling_to_sigmoid_domain
x2 = x1 + sigmoind_domain_min
# Calculate sigmoid value and subtract the min sigmoid value for this domain.
s = sigmoid(x2)
s1 = s
# Scale to atp range.
atp_range = max_atp - atp_base
atp_val = atp_base + s1 * atp_range
newMapArray.SetValue(cid, atp_val)
sphere_rad = 1500
centre = (7000, 3022.5, 0)
max_atp = 0.3
# Do LO ATP 2 by running over all cell centres in polydata and processing only the ones that fall in the specified radius.
for cid in range (0, centres.GetNumberOfPoints()):
# Distance from the point to sphere centre.
dist2 = vtk.vtkMath.Distance2BetweenPoints(centres.GetPoint(cid), centre)
# If the point falls into the sphere radius.
if dist2 < sphere_rad**2:
# Parametric distance from point to sphere centre.
par_dist = math.sqrt(dist2) / sphere_rad
if par_dist <= inner_par_rad:
# The centre is atp_max.
atp_val = max_atp
else:
# Discard the inner radius from our parametric distance.
x0 = par_dist - inner_par_rad
# Map distance to sigmoind domain min to max range.
x1 = x0 * scaling_to_sigmoid_domain
x2 = x1 + sigmoind_domain_min
# Calculate sigmoid value and subtract the min sigmoid value for this domain.
s = sigmoid(x2)
s1 = s
# Scale to atp range.
atp_range = max_atp - atp_base
atp_val = atp_base + s1 * atp_range
newMapArray.SetValue(cid, atp_val)
# Set new map.
polydata.GetCellData().AddArray(newMapArray)
polydata.GetCellData().SetActiveScalars('initialATP')
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Setup render window, renderer, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderWindow.Render()
renderWindowInteractor.Start()
mapWriter = vtk.vtkXMLPolyDataWriter()
mapWriter.SetFileName('quadMeshFullATPc2000.vtp')
mapWriter.SetInput(polydata)
mapWriter.Update()
|
BlueFern/DBiharMesher
|
meshes/c2000f/Generate2000ATPMeshSynCFD.py
|
Python
|
gpl-2.0
| 5,498
|
[
"VTK"
] |
58e902f0aaf830f40270b4f51da5519de2b1d32c9707648bfd7b2d7480add898
|
#!/usr/bin/python
import tweetproc
import os
import io
from datetime import *
from tweetproc.sitesdata import *
'''
caseradius=100000
caseradius=50000
campusradius=15000
campusradius=25000
campussites=[
["Tuscarawas",40.467441, -81.407072, campusradius],
["Salem", 40.864983, -80.835811, campusradius],
["Trumbull", 41.279297, -80.838332, campusradius],
["Ashtabula", 41.889173, -80.831944, campusradius],
["ELiverpool",40.617236, -80.576320, campusradius],
["Geauga", 41.509121, -81.150659, campusradius],
["Stark", 40.866924, -81.436568, campusradius],
["Kent", 41.149326, -81.341411, campusradius]
]
texassites=[
["THPHDallas", 32.881525, -96.762410, campusradius], # Texas Health Presbyterian Hospital Dallas, 8200 Walnut Hill Lane, Dallas, TX 75231 (duplicate)
["THPHAllen", 33.116341, -96.673193, campusradius], # Texas Health Presbyterian Hospital Allen 1105 Central Expressway Allen, TX 75013
["THPHDenton", 33.217850, -97.166504, campusradius], # Texas Health Presbyterian Hospital Denton 3000 North I-35 Denton, TX 76201
["THPHKaufman", 32.591426, -96.318062, campusradius], # Texas Health Presbyterian Hospital Kaufman 850 Ed Hall Drive Kaufman, TX 75142
["THPHPlano", 33.044414, -96.835895, campusradius], # Texas Health Presbyterian Hospital Plano 6200 West Parker Road Plano, TX 75093
["THPHFlowerMound", 33.045827, -97.067547, campusradius], # Texas Health Presbyterian Hospital Flower Mound 4400 Long Prairie Road Flower Mound, TX 75028
["THPHRockwall", 32.884425, -96.466031, campusradius], # Texas Health Presbyterian Hospital Rockwall 3150 Horizon Road Rockwall, TX 75032
]
casesites=[
#Sitename #Latitude #Longitude #Radius (meters)
["Ohio", 41.071312, -81.400874, caseradius], # Stonegate Trail, Tallmadge, OH (Development where Amber Vinson stayed while in Ohio)
["Texas", 32.881525, -96.762410, caseradius], # Texas Health Presbyterian Hospital Dallas, 8200 Walnut Hill Lane, Dallas, TX 75231
["NewYork", 40.738766, -73.975368, caseradius] # Bellevue Hospital Center, 462 1st Avenue, New York, NY 10016
]
controlsites=[
["Maryland", 39.003409, -77.104474, caseradius], # NIH Clinical Center, National Institutes of Health, 10 Center Drive, Bethesda, MD 20814
["Georgia", 33.792749, -84.321321, caseradius], # Emory University Hospital, 1364 Clifton Road Northeast, Atlanta, GA 30322
["Kansas", 37.6907, -97.3427, caseradius],
["LA", 34.0194, -118.4108, caseradius],
["SanDiego", 32.8153, -117.1350, caseradius],
["Colorado", 38.8673, -104.7607, caseradius],
["Boston", 42.3320, -71.0202, caseradius]
]
# WIDE OPEN DATES 2014-2015
wotime1=datetime(2014,1,1)
wotime2=datetime(2015,1,1)
# Survey period
sptime1=datetime(2014,10,28)
sptime2=datetime(2014,11,15)
# Thomas Duncan
tdtime1=datetime(2014,9,30)
tdtime2=datetime(2014,10,01)
# Nina Pham
nptime1=datetime(2014,10,12)
nptime2=datetime(2014,10,13)
# Amber Vinson
avtime1=datetime(2014,10,15)
avtime2=datetime(2014,10,16)
# Craig Spencer
cstime1=datetime(2014,10,23)
cstime2=datetime(2014,10,24)
'''
# Global variable used for processing
siteiter=-1
# Custom process for this script
def process(inputdir,outfilename,tictime1,tictime2):
# Empty dictionary to store the tweet counts for each user
userlist={}
# Process all the files in the input directory
filelist=tweetproc.jsonindir(inputdir)
for file in filelist:
usercounts=tweetproc.tic(os.path.join(inputdir,file),tictime1,tictime2)
# Loop over all of the users in the usercounts dictionary returned by the tic method
# If the user is already in userlist, then add the count, otherwise set the count
for user in usercounts:
if user in userlist:
userlist[user]+=usercounts[user]
else:
userlist[user]=usercounts[user]
print "Processing",outfilename
with io.open(outfilename,'w',encoding="utf-8",errors='ignore') as outfile:
# Loop over all users in the userlist and save the values to the CSV file
for user, value in sorted(userlist.items()):
userstr=user+","+str(value)+"\n"
outfile.write(unicode(userstr))
# Loop over all users in the userlist and classify the user based on the number of tweets
# The count goes from 1-5.
counts=[0,0,0,0,0,0]
for user, value in sorted(userlist.items()):
if value >= 5:
counts[5]+=1
else:
counts[value]+=1
sum=len(userlist)
# For convenience calculate the ratio of users in each category
# Write out the percentage of users with 1, 2, 3, 4, 5+ tweets
for i in xrange(0,len(counts)):
if sum>0:
percent=float(counts[i]*100)/float(sum)
else:
percent=0.0
outfile.write(unicode(str(i)+","+str(counts[i])+","+str(round(percent,2))+"\n"))
# Then write out the sum
outfile.write(unicode("sum,"+str(sum)+"\n"))
# Run TIC for multiple sites (lists defined above)
def runtic(sites,tictime1,tictime2):
# Loop over each site in the sites list
for i in xrange(len(sites)):
siteiter=i
radius=sites[siteiter][3]
print "Processing",sites[siteiter][0],"radius",radius
# Calculate which input directory to process based on the site information (name and radius)
inputdir="data/geoebola-sites-"+sites[siteiter][0]+"-"+str(radius)
# Generate a CSV output file to save TIC results
outfilename="csv/out.tic-sites-"+sites[siteiter][0]+"-"+str(radius)+"-"+str(tictime1.date())+"-"+str(tictime2.date())+".csv"
process(inputdir,outfilename,tictime1,tictime2)
# Calculate TIC for KSU campuses when Amber Vinson was diagnosed and during the survey period
runtic(tweetproc.campussites,avtime1,avtime2)
'''
runtic(tweetproc.campussites,sptime1,sptime2)
'''
'''
# Calculate TIC for US-Diagnosed Ebola case locations for each Ebola case
#runtic(casesites,tdtime1,tdtime2)
#runtic(casesites,nptime1,nptime2)
#runtic(casesites,avtime1,avtime2)
#runtic(casesites,cstime1,cstime2)
'''
#runtic(campussites,tdtime1,tdtime2)
#runtic(texassites,tdtime1,tdtime2)
#runtic(texassites,nptime1,nptime2)
#runtic(texassites,avtime1,avtime2)
#runtic(texassites,cstime1,cstime2)
|
HPCGISLab/TwitterMethods
|
workflow-sites-to-tic.py
|
Python
|
bsd-3-clause
| 6,285
|
[
"Amber"
] |
9cddce6e32fa273b0978688e36cee3f70aab63205339c2bf15e3f5ae93151a9d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import z2pack
# Edit the paths to your Quantum Espresso and Wannier90 here
qedir = '/home/greschd/software/espresso-5.4.0/bin/'
wandir = '/home/greschd/software/wannier90-1.2'
# Commands to run pw, pw2wannier90, wannier90
mpirun = 'mpirun -np 4 '
pwcmd = mpirun + qedir + '/pw.x '
pw2wancmd = mpirun + qedir + '/pw2wannier90.x '
wancmd = wandir + '/wannier90.x'
z2cmd = (
wancmd + ' bi -pp;' + pwcmd + '< bi.nscf.in >& pw.log;' + pw2wancmd +
'< bi.pw2wan.in >& pw2wan.log;'
)
# creating the results folder, running the SCF calculation if needed
if not os.path.exists('./plots'):
os.mkdir('./plots')
if not os.path.exists('./results'):
os.mkdir('./results')
if not os.path.exists('./scf'):
os.makedirs('./scf')
print("Running the scf calculation")
shutil.copyfile('input/bi.scf.in', 'scf/bi.scf.in')
out = subprocess.call(
pwcmd + ' < bi.scf.in > scf.out', shell=True, cwd='./scf'
)
if out != 0:
raise RuntimeError(
'Error in SCF call. Inspect scf folder for details, and delete it to re-run the SCF calculation.'
)
# Copying the lattice parameters from bi.save/data-file.xml into bi.win
cell = ET.parse('scf/bi.save/data-file.xml'
).find('CELL').find('DIRECT_LATTICE_VECTORS')
unit = cell[0].attrib['UNITS']
lattice = '\n '.join([line.text.strip('\n ') for line in cell[1:]])
with open('input/tpl_bi.win', 'r') as f:
tpl_bi_win = f.read()
with open('input/bi.win', 'w') as f:
f.write(tpl_bi_win.format(unit=unit, lattice=lattice))
# Creating the System. Note that the SCF charge file does not need to be
# copied, but instead can be referenced in the .files file.
# The k-points input is appended to the .in file
input_files = [
'input/' + name for name in ["bi.nscf.in", "bi.pw2wan.in", "bi.win"]
]
system = z2pack.fp.System(
input_files=input_files,
kpt_fct=[z2pack.fp.kpoint.qe, z2pack.fp.kpoint.wannier90],
kpt_path=["bi.nscf.in", "bi.win"],
command=z2cmd,
executable='/bin/bash',
mmn_path='bi.mmn'
)
# Run the WCC calculations
result_0 = z2pack.surface.run(
system=system,
surface=lambda s, t: [0, s / 2, t],
save_file='./results/res_0.json',
load=True
)
result_1 = z2pack.surface.run(
system=system,
surface=lambda s, t: [0.5, s / 2, t],
save_file='./results/res_1.json',
load=True
)
# Combining the two plots
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(9, 5))
z2pack.plot.wcc(result_0, axis=ax[0])
z2pack.plot.wcc(result_1, axis=ax[1])
plt.savefig('plots/plot.pdf', bbox_inches='tight')
print(
'Z2 topological invariant at kx = 0: {0}'.format(
z2pack.invariant.z2(result_0)
)
)
print(
'Z2 topological invariant at kx = 0.5: {0}'.format(
z2pack.invariant.z2(result_1)
)
)
|
Z2PackDev/Z2Pack
|
examples/fp/espresso/5/Bi/run.py
|
Python
|
gpl-3.0
| 2,945
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90"
] |
374a67da1ea6216b6b35dd312b5777fe373d0b01db1b0976a0e29b8fbda16f5c
|
#!/usr/bin/env python
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
setup(
name="genderator",
version="0.1",
description="Get gender from first name. Uses gender.c file.",
author="Brian Muller",
author_email="bamuller@gmail.com",
license="GPLv3",
url="http://github.com/bmuller/genderator",
packages=["genderator"],
package_data={'genderator': ['data/nam_dict.txt']}
)
|
bmuller/genderator
|
setup.py
|
Python
|
gpl-3.0
| 482
|
[
"Brian"
] |
e55b0a6defbb6206ae7773d9ca7a11b34902a8f8b5f3c574e56778b20e673e25
|
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# Copyright (C) 2001,2003 greg Landrum and Rational Discovery LLC
#
""" unit tests for the QuantTree implementation """
from __future__ import print_function
import unittest
import io
from rdkit import RDConfig
from rdkit.ML.DecTree import BuildQuantTree
from rdkit.ML.DecTree.QuantTree import QuantTreeNode
from rdkit.ML.Data import MLData
from rdkit.six.moves import cPickle, xrange
from rdkit.six import cmp
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: ' % self.shortDescription(), end='')
self.qTree1Name = RDConfig.RDCodeDir + '/ML/DecTree/test_data/QuantTree1.pkl'
self.qTree2Name = RDConfig.RDCodeDir + '/ML/DecTree/test_data/QuantTree2.pkl'
def _setupTree1(self):
examples1 = [['p1', 0, 1, 0.1, 0], ['p2', 0, 0, 0.1, 1], ['p3', 0, 0, 1.1, 2],
['p4', 0, 1, 1.1, 2], ['p5', 1, 0, 0.1, 2], ['p6', 1, 0, 1.1, 2],
['p7', 1, 1, 0.1, 2], ['p8', 1, 1, 1.1, 0]]
attrs = range(1, len(examples1[0]) - 1)
nPossibleVals = [0, 2, 2, 0, 3]
boundsPerVar = [0, 0, 0, 1, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples1 = examples1
def _setupTree2(self):
examples1 = [['p1', 0.1, 1, 0.1, 0], ['p2', 0.1, 0, 0.1, 1], ['p3', 0.1, 0, 1.1, 2],
['p4', 0.1, 1, 1.1, 2], ['p5', 1.1, 0, 0.1, 2], ['p6', 1.1, 0, 1.1, 2],
['p7', 1.1, 1, 0.1, 2], ['p8', 1.1, 1, 1.1, 0]]
attrs = range(1, len(examples1[0]) - 1)
nPossibleVals = [0, 0, 2, 0, 3]
boundsPerVar = [0, 1, 0, 1, 0]
self.t2 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples2 = examples1
def _setupTree1a(self):
examples1 = [['p1', 0, 1, 0.1, 4.0, 0], ['p2', 0, 0, 0.1, 4.1, 1], ['p3', 0, 0, 1.1, 4.2, 2],
['p4', 0, 1, 1.1, 4.2, 2], ['p5', 1, 0, 0.1, 4.2, 2], ['p6', 1, 0, 1.1, 4.2, 2],
['p7', 1, 1, 0.1, 4.2, 2], ['p8', 1, 1, 1.1, 4.0, 0]]
attrs = range(1, len(examples1[0]) - 1)
nPossibleVals = [0, 2, 2, 0, 0, 3]
boundsPerVar = [0, 0, 0, 1, -1, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples1 = examples1
def test0Cmp(self):
" testing tree comparisons "
self._setupTree1()
self._setupTree2()
assert self.t1 == self.t1, 'self equals failed'
assert self.t2 == self.t2, 'self equals failed'
assert self.t1 != self.t2, 'not equals failed'
def test1Tree(self):
" testing tree1 "
self._setupTree1()
with open(self.qTree1Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def test2Tree(self):
" testing tree2 "
self._setupTree2()
with open(self.qTree2Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t2 == t2, 'Incorrect tree generated.'
def test3Classify(self):
" testing classification "
self._setupTree1()
self._setupTree2()
for i in xrange(len(self.examples1)):
assert self.t1.ClassifyExample(self.examples1[i])==self.examples1[i][-1],\
'examples1[%d] misclassified'%i
for i in xrange(len(self.examples2)):
assert self.t2.ClassifyExample(self.examples2[i])==self.examples2[i][-1],\
'examples2[%d] misclassified'%i
def test4UnusedVars(self):
" testing unused variables "
self._setupTree1a()
with open(self.qTree1Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
for i in xrange(len(self.examples1)):
assert self.t1.ClassifyExample(self.examples1[i])==self.examples1[i][-1],\
'examples1[%d] misclassified'%i
def test5Bug29(self):
""" a more extensive test of the cmp stuff using hand-built trees """
import copy
t1 = QuantTreeNode(None, 't1')
t1.SetQuantBounds([1.])
c1 = QuantTreeNode(t1, 'c1')
c1.SetQuantBounds([2.])
t1.AddChildNode(c1)
c2 = QuantTreeNode(t1, 'c2')
c2.SetQuantBounds([2.])
t1.AddChildNode(c2)
c11 = QuantTreeNode(c1, 'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1, 'c12')
c12.SetQuantBounds([3.])
c1.AddChildNode(c12)
assert not cmp(t1, copy.deepcopy(t1)), 'self equality failed'
t2 = QuantTreeNode(None, 't1')
t2.SetQuantBounds([1.])
c1 = QuantTreeNode(t2, 'c1')
c1.SetQuantBounds([2.])
t2.AddChildNode(c1)
c2 = QuantTreeNode(t2, 'c2')
c2.SetQuantBounds([2.])
t2.AddChildNode(c2)
c11 = QuantTreeNode(c1, 'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1, 'c12')
c12.SetQuantBounds([3.00003])
c1.AddChildNode(c12)
assert cmp(t1, t2), 'inequality failed'
def test6Bug29_2(self):
""" a more extensive test of the cmp stuff using pickled trees"""
import os
with open(os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'CmpTree1.pkl'),
'r') as t1TFile:
buf = t1TFile.read().replace('\r\n', '\n').encode('utf-8')
t1TFile.close()
with io.BytesIO(buf) as t1File:
t1 = cPickle.load(t1File)
with open(os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'CmpTree2.pkl'),
'r') as t2TFile:
buf = t2TFile.read().replace('\r\n', '\n').encode('utf-8')
t2TFile.close()
with io.BytesIO(buf) as t2File:
t2 = cPickle.load(t2File)
assert cmp(t1, t2), 'equality failed'
def test7Recycle(self):
""" try recycling descriptors """
examples1 = [[3, 0, 0],
[3, 1, 1],
[1, 0, 0],
[0, 0, 1],
[1, 1, 0], ]
attrs = range(2)
nPossibleVals = [2, 2, 2]
boundsPerVar = [1, 0, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar,
recycleVars=1)
assert self.t1.GetLabel() == 0, self.t1.GetLabel()
assert self.t1.GetChildren()[0].GetLabel() == 1
assert self.t1.GetChildren()[1].GetLabel() == 1
assert self.t1.GetChildren()[1].GetChildren()[0].GetLabel() == 0
assert self.t1.GetChildren()[1].GetChildren()[1].GetLabel() == 0
def test8RandomForest(self):
""" try random forests descriptors """
import random
random.seed(23)
nAttrs = 100
nPts = 10
examples = []
for i in range(nPts):
descrs = [int(random.random() > 0.5) for x in range(nAttrs)]
act = sum(descrs) > nAttrs / 2
examples.append(descrs + [act])
attrs = list(range(nAttrs))
nPossibleVals = [2] * (nAttrs + 1)
boundsPerVar = [0] * nAttrs + [0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples, attrs, nPossibleVals, boundsPerVar, maxDepth=1,
recycleVars=1, randomDescriptors=3)
self.assertEqual(self.t1.GetLabel(), 49)
self.assertEqual(self.t1.GetChildren()[0].GetLabel(), 3)
self.assertEqual(self.t1.GetChildren()[1].GetLabel(), 54)
if __name__ == '__main__':
unittest.main()
|
jandom/rdkit
|
rdkit/ML/DecTree/UnitTestQuantTree.py
|
Python
|
bsd-3-clause
| 7,485
|
[
"RDKit"
] |
9195f8ce8890cf43e0c5e320804baaafebcded0c86002c21bd7cf368a7a56df9
|
# Author: Gael Varoquaux <gael.varoquaux at normalesup.org>
# Copyright (c) 2007-2015, Enthought, Inc.
# License: BSD Style.
print("!! mayavi.tools.mlab is obsolete and has been replaced by !!")
print("!! mayavi.mlab. Please update your code. !!")
from mayavi.mlab import *
|
dmsurti/mayavi
|
mayavi/tools/mlab.py
|
Python
|
bsd-3-clause
| 293
|
[
"Mayavi"
] |
d2636ce08034708f23a4c306276d89da0667052481958ffde2e55db613b0d22b
|
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import math
# draw a line from pt1 to pt2 with the given color
def drawLine(myscreen, pt1, pt2, lineColor):
myscreen.addActor(ovdvtk.Line(p1=(pt1.x, pt1.y, 0), p2=(pt2.x, pt2.y, 0), color=lineColor))
def drawLinesegs(myscreen, points, lines):
for l in lines:
pt1 = ovd.Point(points[l[0]][0], points[l[0]][1])
pt2 = ovd.Point(points[l[1]][0], points[l[1]][1])
drawLine(myscreen, pt1, pt2, ovdvtk.yellow)
# rotate a point in 2D by cos/sin. from emc2 gcodemodule.cc
def rotate(x, y, c, s):
tx = x * c - y * s;
y = x * s + y * c;
x = tx;
return [x, y]
# draw an arc using many line-segments
# start at pt1, and at pt2, center at cen
# radius r, cw is a bool cw/ccw flag
def drawArc(myscreen, pt1, pt2, r, cen, cw, arcColor):
start = pt1 - cen
end = pt2 - cen
theta1 = math.atan2(start.x, start.y)
theta2 = math.atan2(end.x, end.y)
alfa = [] # the list of angles
da = 0.1
CIRCLE_FUZZ = 1e-9
# idea from emc2 / cutsim g-code interp G2/G3
if (cw == False):
while ((theta2 - theta1) > -CIRCLE_FUZZ):
theta2 -= 2 * math.pi
else:
while ((theta2 - theta1) < CIRCLE_FUZZ):
theta2 += 2 * math.pi
dtheta = theta2 - theta1
arclength = r * dtheta
dlength = min(0.01, arclength / 10)
steps = int(float(arclength) / float(dlength))
rsteps = float(1) / float(steps)
dc = math.cos(-dtheta * rsteps) # delta-cos
ds = math.sin(-dtheta * rsteps) # delta-sin
previous = pt1
tr = [start.x, start.y]
for i in range(steps):
tr = rotate(tr[0], tr[1], dc, ds) # ; // rotate center-start vector by a small amount
x = cen.x + tr[0]
y = cen.y + tr[1]
current = ovd.Point(x, y)
myscreen.addActor(ovdvtk.Line(p1=(previous.x, previous.y, 0), p2=(current.x, current.y, 0), color=arcColor))
previous = current
# draw many offsets
def drawOffsets(myscreen, ofs):
# draw loops
nloop = 0
lineColor = ovdvtk.green
arcColor = ovdvtk.grass
for lop in ofs:
n = 0
N = len(lop)
first_point = []
previous = []
for p in lop:
# p[0] is the Point
# p[1] is -1 for lines, and r for arcs
if n == 0: # don't draw anything on the first iteration
previous = p[0]
else:
cw = p[3]
cen = p[2]
r = p[1]
p = p[0]
if r == -1: # this offset element is a line
drawLine(myscreen, previous, p, lineColor)
else: # this offset element is an arc
drawArc(myscreen, previous, p, r, cen, cw, arcColor)
previous = p
n = n + 1
print "rendered loop ", nloop, " with ", len(lop), " points"
nloop = nloop + 1
# return a list of points corresponding to an arc
# so that we can draw the arc as many short line-segments
def arc_pts(pt1, pt2, r, cen, cw): # (start, end, radius, center, cw )
# draw arc as many line-segments
start = pt1 - cen
end = pt2 - cen
theta1 = math.atan2(start.x, start.y)
theta2 = math.atan2(end.x, end.y)
alfa = [] # the list of angles
da = 0.1
CIRCLE_FUZZ = 1e-9
# idea from emc2 / cutsim g-code interp G2/G3
if (cw == False):
while ((theta2 - theta1) > -CIRCLE_FUZZ):
theta2 -= 2 * math.pi
else:
while ((theta2 - theta1) < CIRCLE_FUZZ):
theta2 += 2 * math.pi
dtheta = theta2 - theta1
arclength = r * dtheta
dlength = min(0.001, arclength / 10)
steps = int(float(arclength) / float(dlength))
rsteps = float(1) / float(steps)
dc = math.cos(-dtheta * rsteps) # delta-cos
ds = math.sin(-dtheta * rsteps) # delta-sin
previous = pt1
tr = [start.x, start.y]
pts = []
for i in range(steps):
# f = (i+1) * rsteps #; // varies from 1/rsteps..1 (?)
# theta = theta1 + i* dtheta
tr = rotate(tr[0], tr[1], dc, ds) # ; // rotate center-start vector by a small amount
x = cen.x + tr[0]
y = cen.y + tr[1]
current = ovd.Point(x, y)
# myscreen.addActor( ovdvtk.Line(p1=(previous.x,previous.y,0),p2=(current.x,current.y,0),color=arcColor) )
pts.extend([previous, current])
previous = current
return pts
# faster drawing of offsets using vtkPolyData
def drawOffsets2(myscreen, ofs):
# draw loops
nloop = 0
lineColor = ovdvtk.lgreen
arcColor = ovdvtk.green # grass
ofs_points = []
for lop in ofs:
points = []
n = 0
N = len(lop)
first_point = []
previous = []
for p in lop:
# p[0] is the Point
# p[1] is -1 for lines, and r for arcs
if n == 0: # don't draw anything on the first iteration
previous = p[0]
# first_point = p[0]
else:
cw = p[3] # cw/ccw flag
cen = p[2] # center
r = p[1] # radius
p = p[0] # target point
if r == -1: # r=-1 means line-segment
points.extend([previous, p]) # drawLine(myscreen, previous, p, lineColor)
else: # otherwise we have an arc
points.extend(arc_pts(previous, p, r, cen, cw))
previous = p
n = n + 1
ofs_points.append(points)
# print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop + 1
# now draw each loop with polydata
oPoints = vtk.vtkPoints()
lineCells = vtk.vtkCellArray()
# self.colorLUT = vtk.vtkLookupTable()
print "offset2vtk.drawOffsets2(): ", len(ofs_points), " loops to render:"
idx = 0
last_idx = 0
for of in ofs_points:
epts = of
segs = []
first = 1
print " loop with ", len(epts), " points"
for p in epts:
oPoints.InsertNextPoint(p.x, p.y, 0)
if first == 0:
seg = [last_idx, idx]
segs.append(seg)
first = 0
last_idx = idx
idx = idx + 1
# create line and cells
for seg in segs:
line = vtk.vtkLine()
line.GetPointIds().SetId(0, seg[0])
line.GetPointIds().SetId(1, seg[1])
# print " indexes: ", seg[0]," to ",seg[1]
lineCells.InsertNextCell(line)
linePolyData = vtk.vtkPolyData()
linePolyData.SetPoints(oPoints)
linePolyData.SetLines(lineCells)
linePolyData.Modified()
# linePolyData.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(linePolyData)
edge_actor = vtk.vtkActor()
edge_actor.SetMapper(mapper)
edge_actor.GetProperty().SetColor(ovdvtk.lgreen)
myscreen.addActor(edge_actor)
if __name__ == "__main__":
# w=2500 # screen resolution for big screens
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h) # a VTK window for drawing
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version()) # the OpenVoronoi text, revision, and date
scale = 1
myscreen.render()
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
# these actions on the vod object control how the VD is drawn using VTK
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 1
vod.drawGenerators = 1
vod.offsetEdges = 0 # for debug. a bool flag to set null-edge drawing on/off. use together with setEdgeOffset()
vd.setEdgeOffset(0.05) # for debug. a non-zero value will draw null-edges as circular arcs
# null-edges are an internal openvoronoi construction to avoid high-degree vertices in the VD-graph
# they are not relevant for upstream or downstream algorithms
# input points (vertices/sites)
p1 = ovd.Point(-0.1, -0.2)
p2 = ovd.Point(0.2, 0.1)
p3 = ovd.Point(0.4, 0.2)
p4 = ovd.Point(0.6, 0.6)
p5 = ovd.Point(-0.6, 0.3)
pts = [p1, p2, p3, p4, p5] # a list of all points in the input
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts: # add all points before adding line-segments
id_list.append(vd.addVertexSite(p))
# print m," added vertex", seg_id[0]
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
print "all point sites inserted. "
print "VD check: ", vd.check()
t_before = time.time()
# now add line-segments, by using the integer indexes returned by vd.addVertexSite() above
vd.addLineSite(id_list[0], id_list[1])
vd.addLineSite(id_list[1], id_list[2])
vd.addLineSite(id_list[2], id_list[3])
vd.addLineSite(id_list[3], id_list[4])
vd.addLineSite(id_list[4], id_list[0])
vd.check()
t_after = time.time()
line_time = t_after - t_before
if line_time < 1e-3:
line_time = 1
times.append(line_time)
# we now have a VD of the input sites
# we can now run downstream algorithms on the VD such as
# Offset, MedialAxis, etc.
of = ovd.Offset(vd.getGraph()) # pass the created graph to the Offset class
of.str() # text output, for debug
ofs = of.offset(0.123) # generate offsets at the given distance.
drawOffsets(myscreen, ofs) # draw the generated offsets
vod.setVDText2(times)
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
|
aewallin/openvoronoi
|
python_examples/ma-offset/offset2vtk.py
|
Python
|
lgpl-2.1
| 10,097
|
[
"VTK"
] |
635fb89a56fd49297f7496147befe7b43f2739893908ade99ccd7879bd38b9e1
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.284776
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/getcurrent.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getcurrent(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getcurrent, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_75148847 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2currentserviceinformation>
\t<e2service>
\t\t<e2servicereference>''')
_v = VFFSL(SL,"info.ref",True) # u'$info.ref' on line 5, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$info.ref')) # from line 5, col 23.
write(u'''</e2servicereference>
\t\t<e2servicename>''')
_v = VFFSL(SL,"info.name",True) # u'$info.name' on line 6, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$info.name')) # from line 6, col 18.
write(u'''</e2servicename>
\t\t<e2providername>''')
_v = VFFSL(SL,"info.provider",True) # u'$info.provider' on line 7, col 19
if _v is not None: write(_filter(_v, rawExpr=u'$info.provider')) # from line 7, col 19.
write(u'''</e2providername>
\t\t<e2videowidth>''')
_v = VFFSL(SL,"info.width",True) # u'$info.width' on line 8, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$info.width')) # from line 8, col 17.
write(u'''</e2videowidth>
\t\t<e2videoheight>''')
_v = VFFSL(SL,"info.height",True) # u'$info.height' on line 9, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$info.height')) # from line 9, col 18.
write(u'''</e2videoheight>
\t\t<e2servicevideosize>''')
_v = VFFSL(SL,"info.width",True) # u'${info.width}' on line 10, col 23
if _v is not None: write(_filter(_v, rawExpr=u'${info.width}')) # from line 10, col 23.
write(u'''x''')
_v = VFFSL(SL,"info.height",True) # u'${info.height}' on line 10, col 37
if _v is not None: write(_filter(_v, rawExpr=u'${info.height}')) # from line 10, col 37.
write(u'''</e2servicevideosize>
\t\t<e2iswidescreen>
''')
if VFFSL(SL,"info.iswidescreen",True) : # generated from line 12, col 4
_v = "1"
if _v is not None: write(_filter(_v))
else:
_v = "0"
if _v is not None: write(_filter(_v))
write(u'''\t\t</e2iswidescreen>
\t\t<e2apid>''')
_v = VFFSL(SL,"info.apid",True) # u'$info.apid' on line 14, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$info.apid')) # from line 14, col 11.
write(u'''</e2apid>
\t\t<e2vpid>''')
_v = VFFSL(SL,"info.vpid",True) # u'$info.vpid' on line 15, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$info.vpid')) # from line 15, col 11.
write(u'''</e2vpid>
\t\t<e2pcrpid>''')
_v = VFFSL(SL,"info.pcrpid",True) # u'$info.pcrpid' on line 16, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$info.pcrpid')) # from line 16, col 13.
write(u'''</e2pcrpid>
\t\t<e2pmtpid>''')
_v = VFFSL(SL,"info.pmtpid",True) # u'$info.pmtpid' on line 17, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$info.pmtpid')) # from line 17, col 13.
write(u'''</e2pmtpid>
\t\t<e2txtpid>''')
_v = VFFSL(SL,"info.txtpid",True) # u'$info.txtpid' on line 18, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$info.txtpid')) # from line 18, col 13.
write(u'''</e2txtpid>
\t\t<e2tsid>''')
_v = VFFSL(SL,"info.tsid",True) # u'$info.tsid' on line 19, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$info.tsid')) # from line 19, col 11.
write(u'''</e2tsid>
\t\t<e2onid>''')
_v = VFFSL(SL,"info.onid",True) # u'$info.onid' on line 20, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$info.onid')) # from line 20, col 11.
write(u'''</e2onid>
\t\t<e2sid>''')
_v = VFFSL(SL,"info.sid",True) # u'$info.sid' on line 21, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$info.sid')) # from line 21, col 10.
write(u'''</e2sid>
\t</e2service>
\t<e2eventlist>
\t\t<e2event>
\t\t\t<e2eventservicereference>''')
_v = VFFSL(SL,"now.sref",True) # u'$now.sref' on line 25, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$now.sref')) # from line 25, col 29.
write(u'''</e2eventservicereference>
\t\t\t<e2eventservicename>''')
_v = VFFSL(SL,"now.sname",True) # u'$now.sname' on line 26, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$now.sname')) # from line 26, col 24.
write(u'''</e2eventservicename>
\t\t\t<e2eventprovidername>''')
_v = VFFSL(SL,"now.provider",True) # u'$now.provider' on line 27, col 25
if _v is not None: write(_filter(_v, rawExpr=u'$now.provider')) # from line 27, col 25.
write(u'''</e2eventprovidername>
\t\t\t<e2eventid>''')
_v = VFFSL(SL,"now.id",True) # u'$now.id' on line 28, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$now.id')) # from line 28, col 15.
write(u'''</e2eventid>
\t\t\t<e2eventname>''')
_v = VFFSL(SL,"now.title",True) # u'$now.title' on line 29, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$now.title')) # from line 29, col 17.
write(u'''</e2eventname>
\t\t\t<e2eventtitle>''')
_v = VFFSL(SL,"now.title",True) # u'$now.title' on line 30, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$now.title')) # from line 30, col 18.
write(u'''</e2eventtitle>
\t\t\t<e2eventdescription>''')
_v = VFFSL(SL,"now.shortdesc",True) # u'$now.shortdesc' on line 31, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$now.shortdesc')) # from line 31, col 24.
write(u'''</e2eventdescription>
\t\t\t<e2eventstart>''')
_v = VFFSL(SL,"now.begin_timestamp",True) # u'$now.begin_timestamp' on line 32, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$now.begin_timestamp')) # from line 32, col 18.
write(u'''</e2eventstart>
\t\t\t<e2eventduration>''')
_v = VFFSL(SL,"now.duration_sec",True) # u'$now.duration_sec' on line 33, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$now.duration_sec')) # from line 33, col 21.
write(u'''</e2eventduration>
\t\t\t<e2eventremaining>''')
_v = VFFSL(SL,"now.remaining",True) # u'$now.remaining' on line 34, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$now.remaining')) # from line 34, col 22.
write(u'''</e2eventremaining>
\t\t\t<e2eventcurrenttime>''')
_v = VFFSL(SL,"now.now_timestamp",True) # u'$now.now_timestamp' on line 35, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$now.now_timestamp')) # from line 35, col 24.
write(u'''</e2eventcurrenttime>
\t\t\t<e2eventdescriptionextended>''')
_v = VFFSL(SL,"now.longdesc",True) # u'$now.longdesc' on line 36, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$now.longdesc')) # from line 36, col 32.
write(u'''</e2eventdescriptionextended>
\t\t</e2event>
\t\t<e2event>
\t\t\t<e2eventservicereference>''')
_v = VFFSL(SL,"next.sref",True) # u'$next.sref' on line 39, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$next.sref')) # from line 39, col 29.
write(u'''</e2eventservicereference>
\t\t\t<e2eventservicename>''')
_v = VFFSL(SL,"next.sname",True) # u'$next.sname' on line 40, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$next.sname')) # from line 40, col 24.
write(u'''</e2eventservicename>
\t\t\t<e2eventprovidername>''')
_v = VFFSL(SL,"next.provider",True) # u'$next.provider' on line 41, col 25
if _v is not None: write(_filter(_v, rawExpr=u'$next.provider')) # from line 41, col 25.
write(u'''</e2eventprovidername>
\t\t\t<e2eventid>''')
_v = VFFSL(SL,"next.id",True) # u'$next.id' on line 42, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$next.id')) # from line 42, col 15.
write(u'''</e2eventid>
\t\t\t<e2eventname>''')
_v = VFFSL(SL,"next.title",True) # u'$next.title' on line 43, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$next.title')) # from line 43, col 17.
write(u'''</e2eventname>
\t\t\t<e2eventtitle>''')
_v = VFFSL(SL,"next.title",True) # u'$next.title' on line 44, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$next.title')) # from line 44, col 18.
write(u'''</e2eventtitle>
\t\t\t<e2eventdescription>''')
_v = VFFSL(SL,"next.shortdesc",True) # u'$next.shortdesc' on line 45, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$next.shortdesc')) # from line 45, col 24.
write(u'''</e2eventdescription>
\t\t\t<e2eventstart>''')
_v = VFFSL(SL,"next.begin_timestamp",True) # u'$next.begin_timestamp' on line 46, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$next.begin_timestamp')) # from line 46, col 18.
write(u'''</e2eventstart>
\t\t\t<e2eventduration>''')
_v = VFFSL(SL,"next.duration_sec",True) # u'$next.duration_sec' on line 47, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$next.duration_sec')) # from line 47, col 21.
write(u'''</e2eventduration>
\t\t\t<e2eventremaining>''')
_v = VFFSL(SL,"next.remaining",True) # u'$next.remaining' on line 48, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$next.remaining')) # from line 48, col 22.
write(u'''</e2eventremaining>
\t\t\t<e2eventcurrenttime>''')
_v = VFFSL(SL,"next.now_timestamp",True) # u'$next.now_timestamp' on line 49, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$next.now_timestamp')) # from line 49, col 24.
write(u'''</e2eventcurrenttime>
\t\t\t<e2eventdescriptionextended>''')
_v = VFFSL(SL,"next.longdesc",True) # u'$next.longdesc' on line 50, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$next.longdesc')) # from line 50, col 32.
write(u'''</e2eventdescriptionextended>
\t\t</e2event>
\t</e2eventlist>
</e2currentserviceinformation>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_75148847
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getcurrent= 'respond'
## END CLASS DEFINITION
if not hasattr(getcurrent, '_initCheetahAttributes'):
templateAPIClass = getattr(getcurrent, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getcurrent)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getcurrent()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/getcurrent.py
|
Python
|
gpl-2.0
| 14,438
|
[
"VisIt"
] |
5e8c7875f5ee9f4e21238ff523a7395623e4d78badfbc1f12bb08e4effb1160a
|
#!/usr/bin/env python
from __future__ import division
import copy
import imp
import os
import traceback
from optparse import OptionParser
import cv2
import rospy
from cv_bridge import CvBridge
from cv_bridge import CvBridgeError
from sensor_msgs.msg import Image
from multi_tracker.msg import DeltaVid
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: /camera/image_raw
# for firefley cameras, camera1394 does not provide timestamps but otherwise
# works. use point grey drivers.
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: /camera/image_mono
from distutils.version import LooseVersion, StrictVersion
print 'Using open cv: ' + cv2.__version__
if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"):
OPENCV_VERSION = 3
print 'Open CV 3'
else:
OPENCV_VERSION = 2
print 'Open CV 2'
# The main tracking class, a ROS node
class DeCompressor:
def __init__(self, topic_in, topic_out, directory, config=None, mode='mono',
saveto='', fps=5.0):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
# initialize the node
rospy.init_node('delta_decompressor')
# Publishers - publish contours
self.pubDeltaVid = rospy.Publisher(topic_out, Image, queue_size=30)
self.subDeltaVid = rospy.Subscriber(topic_in, DeltaVid,
self.delta_image_callback, queue_size=30)
self.cvbridge = CvBridge()
#rospy.get_param('multi_tracker/delta_video/directory', default='')
self.directory = directory
self.backgroundImage = None
self.background_img_filename = 'none'
self.config = config
self.mode = mode
if len(saveto) > 0:
self.saveto = saveto
self.videowriter = None
else:
self.saveto = None
self.videowriter = None
self.fps = fps
sim_time = rospy.get_param('/use_sim_time', False)
# TODO TODO How to get original images (to republish w/o bg image in
# this case). I should... right? Just integrate r.t. decompression in to
# compressor?
self.real_time = (not sim_time)
def delta_image_callback(self, delta_vid):
if (self.background_img_filename != delta_vid.background_image or
self.backgroundImage is None):
self.background_img_filename = delta_vid.background_image
basename = os.path.basename(self.background_img_filename)
directory_with_basename = os.path.join(self.directory, basename)
try:
if not os.path.exists(directory_with_basename):
raise IOError('background image file ' +
directory_with_basename + ' did not exist')
if not os.path.getsize(directory_with_basename) > 0:
raise IOError('background image file ' +
directory_with_basename + ' was empty')
except IOError:
traceback.print_exc()
# this (should) just shutdown the current node, which can be
# marked as required in the launch file (bringing everything
# down if it goes down)
rospy.signal_shutdown(
'cannot proceed without background images.')
self.backgroundImage = cv2.imread(directory_with_basename,
cv2.CV_8UC1)
try:
# for hydro
self.backgroundImage = self.backgroundImage.reshape(
[self.backgroundImage.shape[0], self.backgroundImage[1], 1])
# TODO handle cases by version explicitly or at least specify
# expected error
except:
# for indigo
pass
if self.backgroundImage is not None:
new_image = copy.copy(self.backgroundImage)
if delta_vid.values is not None:
if len(delta_vid.values) > 0:
# TODO TODO check whether range of delta_vid.<>pixels is
# same as that of original frame, or cropped. trying to set
# values outside of frame? what's behavior in that case?
# assertion?
try:
# for hydro
new_image[delta_vid.xpixels, delta_vid.ypixels, 0] = \
delta_vid.values
except:
# for indigo
new_image[delta_vid.xpixels, delta_vid.ypixels] = \
delta_vid.values
if self.mode == 'color':
new_image = cv2.cvtColor(new_image, cv2.COLOR_GRAY2RGB)
if self.config is not None:
# just use ros time conversion func
t = (delta_vid.header.stamp.secs +
delta_vid.header.stamp.nsecs * 1e-9)
self.config.draw(new_image, t)
if self.saveto is not None:
# TODO why not move this to init?
if self.videowriter is not None:
self.videowriter.write(new_image)
else:
'''
if OPENCV_VERSION == 2:
# works on Linux and Windows
self.videowriter = cv2.VideoWriter(self.saveto,
cv2.cv.CV_FOURCC('m','p','4','v'), 300,
(new_image.shape[1], new_image.shape[0]), True)
elif OPENCV_VERSION == 3:
self.videowriter = cv2.VideoWriter(self.saveto,
cv2.VideoWriter_fourcc('m','p','4','v'), 300,
(new_image.shape[1], new_image.shape[0]), True)
'''
# TODO handle iscolor flag appropriately
self.videowriter = cv2.VideoWriter(self.saveto, \
cv2.VideoWriter_fourcc(*'XVID'), self.fps, \
(new_image.shape[1], new_image.shape[0]), False)
#self.videowriter.open(self.saveto,
# cv.CV_FOURCC('P','I','M','1'), 30,
# (new_image.shape[0], new_image.shape[1]))
if self.mode == 'mono':
image_message = self.cvbridge.cv2_to_imgmsg(new_image,
encoding="mono8")
elif self.mode == 'color':
image_message = self.cvbridge.cv2_to_imgmsg(new_image,
encoding="bgr8")
image_message.header = delta_vid.header
self.pubDeltaVid.publish(image_message)
def main(self):
rospy.spin()
if self.videowriter is not None:
self.videowriter.release()
print ('Note: use this command to make a mac / quicktime friendly' +
'video: avconv -i test.avi -c:v libx264 -c:a copy ' +
'outputfile.mp4')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--in", type="str", dest="input",
default='multi_tracker/delta_video', help="input topic name")
parser.add_option("--out", type="str", dest="output",
default='camera/image_decompressed', help="output topic name")
parser.add_option("--directory", type="str", dest="directory",
default=os.getcwd(),
help="directory where background images can be found")
# TODO --draw or something less generic than config? previous uses?
parser.add_option("--config", type="str", dest="config", default='',
help=("configuration file, which should describe a class " +
"that has a method draw"))
parser.add_option("--mode", type="str", dest="mode", default='mono',
help="color if desired to convert to color image")
parser.add_option("--saveto", type="str", dest="saveto", default='',
help=("filename where to save video, default is none. Note: use this" +
"command to make a mac / quicktime friendly video: avconv -i " +
"test.avi -c:v libx264 -c:a copy outputfile.mp4"))
# TODO add fps option
(options, args) = parser.parse_args()
if len(options.config) > 0:
config = imp.load_source('config', options.config)
c = config.Config(options.config)
else:
c = None
decompressor = DeCompressor(options.input, options.output,
options.directory, c, options.mode, options.saveto)
decompressor.main()
|
tom-f-oconnell/multi_tracker
|
nodes/delta_video_player.py
|
Python
|
mit
| 9,129
|
[
"Firefly"
] |
a08bea5b655706e18ae1322e49c326d2128ddd38979128daf6ba17e66fff794c
|
#
# Copyright 2014-2015, 2017, 2019-2021 Lars Pastewka (U. Freiburg)
# 2019-2020 Wolfram G. Nöhring (U. Freiburg)
# 2020 Johannes Hoermann (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import gzip
import random
import unittest
import numpy as np
from numpy.linalg import norm
import ase.io as io
from ase.calculators.test import numeric_force
from ase.constraints import StrainFilter, UnitCellFilter
from ase.lattice.compounds import B1, B2, L1_0, L1_2
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.hexagonal import HexagonalClosedPacked
from ase.optimize import FIRE
from ase.units import GPa
import matscipytest
from matscipy.calculators.eam import EAM
from matscipy.elasticity import fit_elastic_constants, Voigt_6x6_to_cubic
from matscipy.neighbours import neighbour_list
from matscipy.numerical import numerical_forces, numerical_stress
###
class TestEAMCalculator(matscipytest.MatSciPyTestCase):
disp = 1e-6
tol = 2e-6
def test_forces(self):
for calc in [EAM('Au-Grochola-JCP05.eam.alloy')]:
a = io.read('Au_923.xyz')
a.center(vacuum=10.0)
a.set_calculator(calc)
f = a.get_forces()
for i in range(9):
atindex = i*100
fn = [numeric_force(a, atindex, 0, self.disp),
numeric_force(a, atindex, 1, self.disp),
numeric_force(a, atindex, 2, self.disp)]
self.assertArrayAlmostEqual(f[atindex], fn, tol=self.tol)
def test_stress(self):
a = FaceCenteredCubic('Au', size=[2,2,2])
calc = EAM('Au-Grochola-JCP05.eam.alloy')
a.set_calculator(calc)
self.assertArrayAlmostEqual(a.get_stress(), numerical_stress(a), tol=self.tol)
sx, sy, sz = a.cell.diagonal()
a.set_cell([sx, 0.9*sy, 1.2*sz], scale_atoms=True)
self.assertArrayAlmostEqual(a.get_stress(), numerical_stress(a), tol=self.tol)
a.set_cell([[sx, 0.1*sx, 0], [0, 0.9*sy, 0], [0, -0.1*sy, 1.2*sz]], scale_atoms=True)
self.assertArrayAlmostEqual(a.get_stress(), numerical_stress(a), tol=self.tol)
def test_Grochola(self):
a = FaceCenteredCubic('Au', size=[2,2,2])
calc = EAM('Au-Grochola-JCP05.eam.alloy')
a.set_calculator(calc)
FIRE(StrainFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
a0 = a.cell.diagonal().mean()/2
self.assertTrue(abs(a0-4.0701)<2e-5)
self.assertTrue(abs(a.get_potential_energy()/len(a)+3.924)<0.0003)
C, C_err = fit_elastic_constants(a, symmetry='cubic', verbose=False)
C11, C12, C44 = Voigt_6x6_to_cubic(C)
self.assertTrue(abs((C11-C12)/GPa-32.07)<0.7)
self.assertTrue(abs(C44/GPa-45.94)<0.5)
def test_direct_evaluation(self):
a = FaceCenteredCubic('Au', size=[2,2,2])
a.rattle(0.1)
calc = EAM('Au-Grochola-JCP05.eam.alloy')
a.set_calculator(calc)
f = a.get_forces()
calc2 = EAM('Au-Grochola-JCP05.eam.alloy')
i_n, j_n, dr_nc, abs_dr_n = neighbour_list('ijDd', a, cutoff=calc2.cutoff)
epot, virial, f2 = calc2.energy_virial_and_forces(a.numbers, i_n, j_n, dr_nc, abs_dr_n)
self.assertArrayAlmostEqual(f, f2)
a = FaceCenteredCubic('Cu', size=[2,2,2])
calc = EAM('CuAg.eam.alloy')
a.set_calculator(calc)
FIRE(StrainFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e_Cu = a.get_potential_energy()/len(a)
a = FaceCenteredCubic('Ag', size=[2,2,2])
a.set_calculator(calc)
FIRE(StrainFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e_Ag = a.get_potential_energy()/len(a)
self.assertTrue(abs(e_Ag+2.85)<1e-6)
a = L1_2(['Ag', 'Cu'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e = a.get_potential_energy()
syms = np.array(a.get_chemical_symbols())
self.assertTrue(abs((e-(syms=='Cu').sum()*e_Cu-
(syms=='Ag').sum()*e_Ag)/len(a)-0.096)<0.0005)
a = B1(['Ag', 'Cu'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e = a.get_potential_energy()
syms = np.array(a.get_chemical_symbols())
self.assertTrue(abs((e-(syms=='Cu').sum()*e_Cu-
(syms=='Ag').sum()*e_Ag)/len(a)-0.516)<0.0005)
a = B2(['Ag', 'Cu'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e = a.get_potential_energy()
syms = np.array(a.get_chemical_symbols())
self.assertTrue(abs((e-(syms=='Cu').sum()*e_Cu-
(syms=='Ag').sum()*e_Ag)/len(a)-0.177)<0.0003)
a = L1_2(['Cu', 'Ag'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
e = a.get_potential_energy()
syms = np.array(a.get_chemical_symbols())
self.assertTrue(abs((e-(syms=='Cu').sum()*e_Cu-
(syms=='Ag').sum()*e_Ag)/len(a)-0.083)<0.0005)
def test_CuZr(self):
# This is a test for the potential published in:
# Mendelev, Sordelet, Kramer, J. Appl. Phys. 102, 043501 (2007)
a = FaceCenteredCubic('Cu', size=[2,2,2])
calc = EAM('CuZr_mm.eam.fs', kind='eam/fs')
a.set_calculator(calc)
FIRE(StrainFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
a_Cu = a.cell.diagonal().mean()/2
#print('a_Cu (3.639) = ', a_Cu)
self.assertAlmostEqual(a_Cu, 3.639, 3)
a = HexagonalClosedPacked('Zr', size=[2,2,2])
a.set_calculator(calc)
FIRE(StrainFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
a, b, c = a.cell/2
#print('a_Zr (3.220) = ', norm(a), norm(b))
#print('c_Zr (5.215) = ', norm(c))
self.assertAlmostEqual(norm(a), 3.220, 3)
self.assertAlmostEqual(norm(b), 3.220, 3)
self.assertAlmostEqual(norm(c), 5.215, 3)
# CuZr3
a = L1_2(['Cu', 'Zr'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
self.assertAlmostEqual(a.cell.diagonal().mean()/2, 4.324, 3)
# Cu3Zr
a = L1_2(['Zr', 'Cu'], size=[2,2,2], latticeconstant=4.0)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
self.assertAlmostEqual(a.cell.diagonal().mean()/2, 3.936, 3)
# CuZr
a = B2(['Zr', 'Cu'], size=[2,2,2], latticeconstant=3.3)
a.set_calculator(calc)
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.001)
self.assertAlmostEqual(a.cell.diagonal().mean()/2, 3.237, 3)
def test_forces_CuZr_glass(self):
"""Calculate interatomic forces in CuZr glass
Reference: tabulated forces from a calculation
with Lammmps (git version patch_29Mar2019-2-g585403d65)
The forces can be re-calculated using the following
Lammps commands:
units metal
atom_style atomic
boundary p p p
read_data CuZr_glass_460_atoms.lammps.data.gz
pair_style eam/alloy
pair_coeff * * ZrCu.onecolumn.eam.alloy Zr Cu
# The initial configuration is in equilibrium
# and the remaining forces are small
# Swap atom types to bring system out of
# equilibrium and create nonzero forces
group originally_Zr type 1
group originally_Cu type 2
set group originally_Zr type 2
set group originally_Cu type 1
run 0
write_dump all custom &
CuZr_glass_460_atoms_forces.lammps.dump.gz &
id type x y z fx fy fz &
modify sort id format float "%.14g"
"""
format = "lammps-dump" if "lammps-dump" in io.formats.all_formats.keys() else "lammps-dump-text"
atoms = io.read("CuZr_glass_460_atoms_forces.lammps.dump.gz", format=format)
old_atomic_numbers = atoms.get_atomic_numbers()
sel, = np.where(old_atomic_numbers == 1)
new_atomic_numbers = np.zeros_like(old_atomic_numbers)
new_atomic_numbers[sel] = 40 # Zr
sel, = np.where(old_atomic_numbers == 2)
new_atomic_numbers[sel] = 29 # Cu
atoms.set_atomic_numbers(new_atomic_numbers)
calculator = EAM('ZrCu.onecolumn.eam.alloy')
atoms.set_calculator(calculator)
atoms.pbc = [True, True, True]
forces = atoms.get_forces()
# Read tabulated forces and compare
with gzip.open("CuZr_glass_460_atoms_forces.lammps.dump.gz") as file:
for line in file:
if line.startswith(b"ITEM: ATOMS "): # ignore header
break
dump = np.loadtxt(file)
forces_dump = dump[:, 5:8]
self.assertArrayAlmostEqual(forces, forces_dump, tol=1e-3)
def test_funcfl(self):
"""Test eam kind 'eam' (DYNAMO funcfl format)
variable da equal 0.02775
variable amin equal 2.29888527117067752084
variable amax equal 5.55*sqrt(2.0)
variable i loop 201
label loop_head
clear
variable lattice_parameter equal ${amin}+${da}*${i}
units metal
atom_style atomic
boundary p p p
lattice fcc ${lattice_parameter}
region box block 0 5 0 5 0 5
create_box 1 box
pair_style eam
pair_coeff * * Au_u3.eam
create_atoms 1 box
thermo 1
run 0
variable x equal pe
variable potential_energy_fmt format x "%.14f"
print "#a,E: ${lattice_parameter} ${potential_energy_fmt}"
next i
jump SELF loop_head
# use
# grep '^#a,E' log.lammps | awk '{print $2,$3}' > aE.txt
# to extract info from log file
The reference data was calculated using Lammps
(git commit a73f1d4f037f670cd4295ecc1a576399a31680d2).
"""
da = 0.02775
amin = 2.29888527117067752084
amax = 5.55 * np.sqrt(2.0)
for i in range(1, 202):
latticeconstant = amin + i * da
atoms = FaceCenteredCubic(symbol='Au', size=[5,5,5], pbc=(1,1,1), latticeconstant=latticeconstant)
calc = EAM('Au-Grochola-JCP05.eam.alloy')
atoms.set_calculator(calc)
energy = atoms.get_potential_energy()
print(energy)
###
if __name__ == '__main__':
unittest.main()
|
libAtoms/matscipy
|
tests/test_eam_calculator.py
|
Python
|
lgpl-2.1
| 12,830
|
[
"ASE",
"LAMMPS",
"Matscipy"
] |
7f275e2674878dc7907a440638ad840b44a45d108652658fa1b3430ebf450131
|
##############################################################################
# Copyright (c) 2017-2018 Mark Olesen, OpenCFD Ltd.
#
# This file was authored by Mark Olesen <mark.olesen@esi-group.com>
# and is released as part of spack under the LGPL license.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for the LLNL notice and LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USERMPI and use spack to populate an appropriate
# configuration and generate wmake rules for 'USER' and 'USERMPI'
# mpi implementations.
#
# - Resolution of flex, zlib needs more attention (within OpenFOAM)
# - +paraview:
# depends_on should just be 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt"
# packages:
# paraview:
# variants: +plugins +qt
# in ~/.spack/packages.yaml
#
# Known issues
# - Combining +zoltan with +int64 has not been tested, but probably won't work.
# - Combining +mgridgen with +int64 or +float32 probably won't work.
#
# The spack 'develop' version of openfoam-com retains the upstream
# WM_PROJECT_VERSION=plus naming internally.
#
##############################################################################
import glob
import re
import shutil
import os
from spack import *
from spack.environment import EnvironmentModifications
import llnl.util.tty as tty
# Not the nice way of doing things, but is a start for refactoring
__all__ = [
'add_extra_files',
'write_environ',
'rewrite_environ_files',
'mplib_content',
'foamAddPath',
'foamAddLib',
'OpenfoamArch',
]
def add_extra_files(foam_pkg, common, local, **kwargs):
"""Copy additional common and local files into the stage.source_path
from the openfoam-com/common and the package/assets directories,
respectively
"""
outdir = foam_pkg.stage.source_path
indir = join_path(os.path.dirname(__file__), 'common')
for f in common:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
indir = join_path(foam_pkg.package_dir, 'assets')
for f in local:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
def format_export(key, value):
"""Format key,value pair as 'export' with newline for POSIX shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unset'.
"""
if key.startswith('#'):
return '## export {0}={1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unset {0}\n'.format(key)
else:
return 'export {0}={1}\n'.format(key, value)
def format_setenv(key, value):
"""Format key,value pair as 'setenv' with newline for C-shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unsetenv'.
"""
if key.startswith('#'):
return '## setenv {0} {1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unsetenv {0}\n'.format(key)
else:
return 'setenv {0} {1}\n'.format(key, value)
def _write_environ_entries(outfile, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
if isinstance(environ, dict):
for key in sorted(environ):
entry = environ[key]
if isinstance(entry, dict):
_write_environ_entries(outfile, entry, formatter)
elif isinstance(entry, list):
_write_environ_entries(outfile, entry, formatter)
else:
outfile.write(formatter(key, entry))
elif isinstance(environ, list):
for item in environ:
outfile.write(formatter(item[0], item[1]))
def _write_environ_file(output, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
with open(output, 'w') as outfile:
outfile.write('# spack generated\n')
_write_environ_entries(outfile, environ, formatter)
outfile.write('# spack\n')
def write_environ(environ, **kwargs):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile:
_write_environ_file(rcfile, environ, format_export)
rcfile = kwargs.get('cshell', None)
if rcfile:
_write_environ_file(rcfile, environ, format_setenv)
def rewrite_environ_files(environ, **kwargs):
"""Use filter_file to rewrite (existing) POSIX shell or C-shell files.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*export\s+{0})=.*$'.format(k)
if not v:
replace = r'unset {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unset {0} {1}'.format(k, v)
else:
replace = r'\1={0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
rcfile = kwargs.get('cshell', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*setenv\s+{0})\s+.*$'.format(k)
if not v:
replace = r'unsetenv {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unsetenv {0} {1}'.format(k, v)
else:
replace = r'\1 {0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
def foamAddPath(*args):
"""A string with args prepended to 'PATH'"""
return '"' + ':'.join(args) + ':${PATH}"'
def foamAddLib(*args):
"""A string with args prepended to 'LD_LIBRARY_PATH'"""
return '"' + ':'.join(args) + ':${LD_LIBRARY_PATH}"'
def pkglib(package, pre=None):
"""Get lib64 or lib from package prefix.
Optional parameter 'pre' to provide alternative prefix
"""
libdir = package.prefix.lib64
if not os.path.isdir(libdir):
libdir = package.prefix.lib
if pre:
return join_path(pre, os.path.basename(libdir))
else:
return libdir
def mplib_content(spec, pre=None):
"""The mpi settings (from spack) for the OpenFOAM wmake includes, which
allows later reuse within OpenFOAM.
Optional parameter 'pre' to provide alternative prefix
"""
mpi_spec = spec['mpi']
bin = mpi_spec.prefix.bin
inc = mpi_spec.prefix.include
lib = pkglib(mpi_spec)
libname = 'mpi'
if 'mpich' in mpi_spec.name:
libname = 'mpich'
if pre:
bin = join_path(pre, os.path.basename(bin))
inc = join_path(pre, os.path.basename(inc))
lib = join_path(pre, os.path.basename(lib))
else:
pre = mpi_spec.prefix
info = {
'name': '{0}-{1}'.format(mpi_spec.name, mpi_spec.version),
'prefix': pre,
'include': inc,
'bindir': bin,
'libdir': lib,
'FLAGS': '-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX',
'PINC': '-I{0}'.format(inc),
'PLIBS': '-L{0} -l{1}'.format(lib, libname),
}
return info
# -----------------------------------------------------------------------------
class OpenfoamCom(Package):
"""OpenFOAM is a GPL-opensource C++ CFD-toolbox.
This offering is supported by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
OpenCFD Ltd has been developing and releasing OpenFOAM since its debut
in 2004.
"""
maintainers = ['olesenm']
homepage = "http://www.openfoam.com/"
gitrepo = "https://develop.openfoam.com/Development/OpenFOAM-plus.git"
url = "https://sourceforge.net/projects/openfoamplus/files/v1706/OpenFOAM-v1706.tgz"
list_url = "https://sourceforge.net/projects/openfoamplus/files/"
list_depth = 2
version('1712', '6ad92df051f4d52c7d0ec34f4b8eb3bc')
version('1706', '630d30770f7b54d6809efbf94b7d7c8f')
version('1612', 'ca02c491369150ab127cbb88ec60fbdf')
version('develop', branch='develop', git=gitrepo) # Needs credentials
variant('float32', default=False,
description='Use single-precision')
variant('int64', default=False,
description='With 64-bit labels')
variant('knl', default=False,
description='Use KNL compiler settings')
variant('kahip', default=True,
description='With kahip decomposition')
variant('metis', default=False,
description='With metis decomposition')
variant('scotch', default=True,
description='With scotch/ptscotch decomposition')
variant('zoltan', default=False,
description='With zoltan renumbering')
# TODO?# variant('scalasca', default=False,
# TODO?# description='With scalasca profiling')
variant('mgridgen', default=False, description='With mgridgen support')
variant('paraview', default=True,
description='Build paraview plugins and runtime post-processing')
variant('source', default=True,
description='Install library/application sources and tutorials')
provides('openfoam')
depends_on('mpi')
# After 1712, could suggest openmpi+thread_multiple for collated output
# but particular mixes of mpi versions and InfiniBand may not work so well
# conflicts('^openmpi~thread_multiple', when='@1712:')
depends_on('zlib')
depends_on('fftw')
depends_on('boost')
depends_on('cgal')
depends_on('flex', type='build')
depends_on('cmake', type='build')
# Require scotch with ptscotch - corresponds to standard OpenFOAM setup
depends_on('scotch~metis+mpi~int64', when='+scotch~int64')
depends_on('scotch~metis+mpi+int64', when='+scotch+int64')
depends_on('kahip', when='+kahip')
depends_on('metis@5:', when='+metis')
depends_on('metis+int64', when='+metis+int64')
# mgridgen is statically linked
depends_on('parmgridgen', when='+mgridgen', type='build')
depends_on('zoltan', when='+zoltan')
# TODO?# depends_on('scalasca', when='+scalasca')
# For OpenFOAM plugins and run-time post-processing this should just be
# 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt" in
# ~/.spack/packages.yaml
# 1706 ok with newer paraview but avoid pv-5.2, pv-5.3 readers
depends_on('paraview@5.4:', when='@1706:+paraview')
# 1612 plugins need older paraview
depends_on('paraview@:5.0.1', when='@1612+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Version-specific patches
patch('1612-spack-patches.patch', when='@1612')
# Some user config settings
# default: 'compile-option': 'RpathOpt',
# default: 'mplib': 'USERMPI', # Use user mpi for spack
config = {
# Add links into bin/, lib/ (eg, for other applications)
'link': False
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # From patch() method.
#
# - End of definitions / setup -
#
def url_for_version(self, version):
# Prior to 'v1706' and additional '+' in the naming
fmt = self.list_url
if version <= Version('1612'):
fmt += 'v{0}+/OpenFOAM-v{0}+.tgz'
else:
fmt += 'v{0}/OpenFOAM-v{0}.tgz'
return fmt.format(version, version)
def setup_environment(self, spack_env, run_env):
"""Add environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ . $WM_PROJECT_DIR/etc/bashrc
"""
# NOTE: Spack runs setup_environment twice.
# 1) pre-build to set up the build environment
# 2) post-install to determine runtime environment variables
# The etc/bashrc is only available (with corrrect content)
# post-installation.
bashrc = join_path(self.projectdir, 'etc', 'bashrc')
minimal = True
if os.path.isfile(bashrc):
# post-install: source the installed bashrc
try:
mods = EnvironmentModifications.from_sourcing_file(
bashrc,
clean=True, # Remove duplicate entries
blacklist=[ # Blacklist these
# Inadvertent changes
# -------------------
'PS1', # Leave unaffected
'MANPATH', # Leave unaffected
# Unneeded bits
# -------------
'FOAM_SETTINGS', # Do not use with modules
'FOAM_INST_DIR', # Old
'FOAM_(APP|ETC|SRC|SOLVERS|UTILITIES)',
# 'FOAM_TUTORIALS', # can be useful
'WM_OSTYPE', # Purely optional value
# Third-party cruft - only used for orig compilation
# -----------------
'[A-Z].*_ARCH_PATH',
'(KAHIP|METIS|SCOTCH)_VERSION',
# User-specific
# -------------
'FOAM_RUN',
'(FOAM|WM)_.*USER_.*',
],
whitelist=[ # Whitelist these
'MPI_ARCH_PATH', # Can be needed for compilation
])
run_env.extend(mods)
minimal = False
tty.info('OpenFOAM bashrc env: {0}'.format(bashrc))
except Exception:
minimal = True
if minimal:
# pre-build or minimal environment
tty.info('OpenFOAM minimal env {0}'.format(self.prefix))
run_env.set('FOAM_PROJECT_DIR', self.projectdir)
run_env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin added automatically
run_env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('platforms', self.foam_arch, 'bin')
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('platforms', self.foam_arch, 'lib')
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Avoid WM_PROJECT_INST_DIR for ThirdParty, site or jobControl.
#
# Filtering: bashrc,cshrc (using a patch is less flexible)
edits = {
'WM_THIRD_PARTY_DIR':
r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party',
}
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# Filtering: settings
edits = {
'FOAM_EXT_LIBBIN': '#SPACK: No separate third-party', # ie, unset
}
rewrite_environ_files( # etc/config.{csh,sh}/settings
edits,
posix=join_path('etc', 'config.sh', 'settings'),
cshell=join_path('etc', 'config.csh', 'settings'))
# The following filtering is non-vital. It simply prevents 'site' dirs
# from the the wrong level (likely non-existent anyhow) from being
# added to PATH, LD_LIBRARY_PATH.
for rcdir in ['config.sh', 'config.csh']:
rcfile = join_path('etc', rcdir, 'settings')
if os.path.isfile(rcfile):
filter_file(
'WM_PROJECT_INST_DIR/',
'WM_PROJECT_DIR/',
rcfile,
backup=False)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Filtering bashrc, cshrc
edits = {}
edits.update(self.foam_arch.foam_dict())
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
# TODO
# 'CMAKE_ARCH_PATH': spec['cmake'].prefix,
# 'FLEX_ARCH_PATH': spec['flex'].prefix,
# 'ZLIB_ARCH_PATH': spec['zlib'].prefix,
}
# MPI content, using MPI_ARCH_PATH
user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}')
# Content for etc/config.{csh,sh}/ files
self.etc_config = {
'CGAL': [
('BOOST_ARCH_PATH', spec['boost'].prefix),
('CGAL_ARCH_PATH', spec['cgal'].prefix),
('LD_LIBRARY_PATH',
foamAddLib(
pkglib(spec['boost'], '${BOOST_ARCH_PATH}'),
pkglib(spec['cgal'], '${CGAL_ARCH_PATH}'))),
],
'FFTW': [
('FFTW_ARCH_PATH', spec['fftw'].prefix), # Absolute
('LD_LIBRARY_PATH',
foamAddLib(
pkglib(spec['fftw'], '${BOOST_ARCH_PATH}'))),
],
# User-defined MPI
'mpi-user': [
('MPI_ARCH_PATH', spec['mpi'].prefix), # Absolute
('LD_LIBRARY_PATH', foamAddLib(user_mpi['libdir'])),
('PATH', foamAddPath(user_mpi['bindir'])),
],
'scotch': {},
'kahip': {},
'metis': {},
'ensight': {}, # Disable settings
'paraview': [],
'gperftools': [], # Currently unused
}
if '+scotch' in spec:
self.etc_config['scotch'] = {
'SCOTCH_ARCH_PATH': spec['scotch'].prefix,
# For src/parallel/decompose/Allwmake
'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version),
}
if '+kahip' in spec:
self.etc_config['kahip'] = {
'KAHIP_ARCH_PATH': spec['kahip'].prefix,
}
if '+metis' in spec:
self.etc_config['metis'] = {
'METIS_ARCH_PATH': spec['metis'].prefix,
}
if '+paraview' in spec:
pvMajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2))
self.etc_config['paraview'] = [
('ParaView_DIR', spec['paraview'].prefix),
('ParaView_INCLUDE_DIR', '${ParaView_DIR}/include/' + pvMajor),
('PV_PLUGIN_PATH', '$FOAM_LIBBIN/' + pvMajor),
('PATH', foamAddPath('${ParaView_DIR}/bin')),
]
# Optional
if '+mgridgen' in spec:
self.etc_config['mgridgen'] = {
'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix
}
# Optional
if '+zoltan' in spec:
self.etc_config['zoltan'] = {
'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
if self.etc_prefs:
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
# Adjust components to use SPACK variants
for component, subdict in self.etc_config.items():
write_environ(
subdict,
posix=join_path('etc', 'config.sh', component),
cshell=join_path('etc', 'config.csh', component))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = ['-silent']
if self.parallel: # Build in parallel? - pass as an argument
args.append('-j{0}'.format(make_jobs))
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
mkdirp(self.projectdir)
projdir = os.path.basename(self.projectdir)
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Having wmake and ~source is actually somewhat pointless...
# Install 'etc' before 'bin' (for symlinks)
dirs = ['etc', 'bin', 'wmake']
if '+source' in spec:
dirs.extend(['applications', 'src', 'tutorials'])
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
dirs = ['platforms']
if '+source' in spec:
dirs.extend(['doc'])
# Install platforms (and doc) skipping intermediate targets
ignored = ['src', 'applications', 'html', 'Guides']
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path('.spack', 'build.out'),
join_path('log.' + str(self.foam_arch)))
if not self.config['link']:
return
# ln -s platforms/linux64GccXXX/lib lib
with working_dir(self.projectdir):
if os.path.isdir(self.archlib):
os.symlink(self.archlib, 'lib')
# (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .)
with working_dir(join_path(self.projectdir, 'bin')):
for f in [
f for f in glob.glob(join_path('..', self.archbin, "*"))
if os.path.isfile(f)
]:
os.symlink(f, os.path.basename(f))
# -----------------------------------------------------------------------------
class OpenfoamArch(object):
"""OpenfoamArch represents architecture/compiler settings for OpenFOAM.
The string representation is WM_OPTIONS.
Keywords
label-size=[True] supports int32/int64
compile-option[=RpathOpt]
mplib[=USERMPI]
"""
#: Map spack compiler names to OpenFOAM compiler names
# By default, simply capitalize the first letter
compiler_mapping = {'intel': 'icc'}
def __init__(self, spec, **kwargs):
# Some user settings, to be adjusted manually or via variants
self.compiler = None # <- %compiler
self.arch_option = '64' # (32/64-bit on x86_64)
self.label_size = None # <- +int64
self.precision_option = 'DP' # <- +float32
self.compile_option = kwargs.get('compile-option', 'RpathOpt')
self.arch = None
self.options = None
self.rule = None
self.mplib = kwargs.get('mplib', 'USERMPI')
# Normally support WM_LABEL_OPTION, but not yet for foam-extend
if '+int64' in spec:
self.label_size = '64'
elif kwargs.get('label-size', True):
self.label_size = '32'
if '+float32' in spec:
self.precision_option = 'SP'
# spec.architecture.platform is like `uname -s`, but lower-case
platform = spec.architecture.platform
# spec.architecture.target is like `uname -m`
target = spec.architecture.target
if platform == 'linux':
if target == 'i686':
self.arch_option = '32' # Force consistency
elif target == 'x86_64':
if self.arch_option == '64':
platform += '64'
elif target == 'ia64':
platform += 'ia64'
elif target == 'armv7l':
platform += 'ARM7'
elif target == 'aarch64':
platform += 'ARM64'
elif target == 'ppc64':
platform += 'PPC64'
elif target == 'ppc64le':
platform += 'PPC64le'
elif platform == 'darwin':
if target == 'x86_64':
platform += 'Intel'
if self.arch_option == '64':
platform += '64'
# ... and others?
self.arch = platform
# Capitalized version of the compiler name, which usually corresponds
# to how OpenFOAM will camel-case things.
# Use compiler_mapping to handing special cases.
# Also handle special compiler options (eg, KNL)
comp = spec.compiler.name
if comp in self.compiler_mapping:
comp = self.compiler_mapping[comp]
comp = comp.capitalize()
if '+knl' in spec:
comp += 'KNL'
self.compiler = comp
self.rule = self.arch + self.compiler
# Build WM_OPTIONS
# ----
# WM_LABEL_OPTION=Int$WM_LABEL_SIZE
# WM_OPTIONS=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION$WM_LABEL_OPTION$WM_COMPILE_OPTION
# or
# WM_OPTIONS=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION$WM_COMPILE_OPTION
# ----
self.options = ''.join([
self.rule,
self.precision_option,
('Int' + self.label_size if self.label_size else ''),
self.compile_option])
def __str__(self):
return self.options
def __repr__(self):
return str(self)
def foam_dict(self):
"""Returns a dictionary for OpenFOAM prefs, bashrc, cshrc."""
return dict([
('WM_COMPILER', self.compiler),
('WM_ARCH_OPTION', self.arch_option),
('WM_LABEL_SIZE', self.label_size),
('WM_PRECISION_OPTION', self.precision_option),
('WM_COMPILE_OPTION', self.compile_option),
('WM_MPLIB', self.mplib),
])
def _rule_directory(self, projdir=None, general=False):
"""The wmake/rules/ compiler directory"""
if general:
relative = os.path.join('wmake', 'rules', 'General')
else:
relative = os.path.join('wmake', 'rules', self.rule)
if projdir:
return os.path.join(projdir, relative)
else:
return relative
def has_rule(self, projdir):
"""Verify that a wmake/rules/ compiler rule exists in the project
directory.
"""
# Insist on a wmake rule for this architecture/compiler combination
rule_dir = self._rule_directory(projdir)
if not os.path.isdir(rule_dir):
raise InstallError(
'No wmake rule for {0}'.format(self.rule))
if not re.match(r'.+Opt$', self.compile_option):
raise InstallError(
"WM_COMPILE_OPTION={0} is not type '*Opt'"
.format(self.compile_option))
return True
def create_rules(self, projdir, foam_pkg):
""" Create cRpathOpt,c++RpathOpt and mplibUSER,mplibUSERMPI
rules in the specified project directory.
The compiler rules are based on the respective cOpt,c++Opt rules
but with additional rpath information for the OpenFOAM libraries.
The rpath rules allow wmake to use spack information with minimal
modification to OpenFOAM.
The rpath is used for the installed libpath (continue to use
LD_LIBRARY_PATH for values during the build).
"""
# Note: the 'c' rules normally don't need rpath, since they are just
# used for statically linked wmake utilities, but left in anyhow.
# rpath for installed OpenFOAM libraries
rpath = '{0}{1}'.format(
foam_pkg.compiler.cxx_rpath_arg,
join_path(foam_pkg.projectdir, foam_pkg.archlib))
user_mpi = mplib_content(foam_pkg.spec)
rule_dir = self._rule_directory(projdir)
with working_dir(rule_dir):
# Compiler: copy existing cOpt,c++Opt and modify '*DBUG' value
for lang in ['c', 'c++']:
src = '{0}Opt'.format(lang)
dst = '{0}{1}'.format(lang, self.compile_option)
with open(src, 'r') as infile:
with open(dst, 'w') as outfile:
for line in infile:
line = line.rstrip()
outfile.write(line)
if re.match(r'^\S+DBUG\s*=', line):
outfile.write(' ')
outfile.write(rpath)
outfile.write('\n')
# MPI rules
for mplib in ['mplibUSER', 'mplibUSERMPI']:
with open(mplib, 'w') as out:
out.write("""# Use mpi from spack ({name})\n
PFLAGS = {FLAGS}
PINC = {PINC}
PLIBS = {PLIBS}
""".format(**user_mpi))
# -----------------------------------------------------------------------------
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/openfoam-com/package.py
|
Python
|
lgpl-2.1
| 33,458
|
[
"ParaView"
] |
f0d5a3d50338f022cee5e2c824477882bf832b49cc069f2926717bf61f6da812
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1950-2020/0.5x0.5/combined_sources_SO2_high_1950-2020.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i126: SO2 low level surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i126'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='SO2_high'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='SO2 high level emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_sulfur_dioxide_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='high_level'
ocube.attributes['highest_level']='8'
ocube.attributes['lowest_level']='8'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_SO2_high_1950-2020.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of sulfur dioxide from 1950 to 2020 (from selected anthropogenic source sectors only)'
ocube.attributes['File_version']='v3'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1950-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name','highest_level','lowest_level'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1950-2020/regrid_SO2_high_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 19,293
|
[
"NetCDF"
] |
41dea4b978444291e784df3fe32a10cdfff619cd53a98b12340b419fa14ee3e8
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 16:21:06 2017
@author: Gerardo A. Rivera Tello
"""
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib as mpl
from osgeo import gdal,osr
import numpy as np
import os
#%%
def plot_map(data,lat,lon,band=None,title=None,savefig=None,name='image'):
fig, axis = plt.subplots(figsize=(10,20))
m = Basemap(projection = 'cyl', resolution = 'l',
llcrnrlat=lat.min()-1,urcrnrlat=lat.max()+1,
llcrnrlon=lon.min()-1, urcrnrlon=lon.max()+1)
m.drawcoastlines(linewidth = 0.5)
m.drawcountries()
m.drawparallels(np.arange(-90.0,90.0,2.0), labels = [1,0,0,0])
m.drawmeridians(np.arange(-180.0,180.0,2.0), labels = [0,0,0,0],linewidth=0.5)
m.drawmeridians(np.arange(-180.0,180.0,10.0), labels = [0,0,0,1],linewidth=0.5)
x, y =m(lon, lat)
if band == None:
mmap=m.pcolormesh(x, y, data, vmin=data.min(),vmax=data.max(),cmap=plt.cm.bwr)
else:
mmap=m.pcolormesh(x, y, data[band], vmin=data.min(),vmax=data.max(),cmap=plt.cm.bwr)
cbar = m.colorbar(mmap,location='bottom',size='10%',pad='15%')
cbar.set_label('°C')
if title != None:
axis.set_title(title)
if savefig != None:
fig.savefig("{}.png".format(name),dpi=1000,bbox_inches='tight')
#%%
def plot_data(data,cbar=0,save_img=0,name='image',norm = 0):
plot,axs = plt.subplots()
if norm == 1:
norm = mpl.colors.Normalize(vmin=-0.5, vmax=0.5)
cmap = mpl.cm.get_cmap('jet')
raw_data = axs.imshow(data,interpolation="gaussian",cmap=cmap,norm=norm)
else:
raw_data = axs.imshow(data,interpolation="gaussian",cmap='Greens')
if cbar == 1:
cbar = plot.colorbar(raw_data)
if save_img == 1:
plt.savefig("{}.png".format(name),dpi=1000,bbox_inches='tight')
#%%
def metadata_as_dict(metadata_txt):
print "\nComenzando la lectura del metadato"
meta = {}
with open(metadata_txt) as txt:
for line in txt:
(key, val) = line.split(None,1)
meta[key] = val[:-1]
print "Terminado\n"
return meta
def filter_cloud_flag(quality):
print "Computando la mascara de nubes"
flags = gdal.Open(quality).ReadAsArray()
cloud_mask = flags & 3
land = flags & 8
land = np.abs(land-cloud_mask)/8
flags = None
print "Terminado\n"
return land
def create_tiff(raster_out,meta,data,quality):
driver = gdal.GetDriverByName('GTiff')
res = float(meta['MAP_PROJ_RESOLUTION'])
outRaster = driver.Create(raster_out, data.shape[1], data.shape[0], 1, gdal.GDT_Byte,['INTERLEAVE=BAND'])
outRaster.SetGeoTransform((float(meta['CARTO_UPPER_LEFT_X'])-res/2, res, 0, float(meta['CARTO_UPPER_LEFT_Y'])+res/2, 0, -res))
outband = outRaster.GetRasterBand(1)
data *= quality
outband.WriteArray(data)
outband.SetDescription('NDVI_DATA')
outRaster.SetMetadata(meta)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(4326)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def main(data_hdf,quality_hdf,metadata_txt,path=os.getcwd()):
data = gdal.Open(data_hdf).ReadAsArray().astype(np.int)
# data = data*0.004-0.1
create_tiff(os.path.join(path,'{}.tif'.format(data_hdf[:-8])),metadata_as_dict(metadata_txt),data,filter_cloud_flag(quality_hdf))
print 'GeoTIFF generado'
data = None
#%%
if __name__ == '__main__':
dir_files = [files for files in os.listdir(os.getcwd())]
hdf_files = [hdf for hdf in dir_files if hdf.endswith('.HDF')]
txt_files = [txt for txt in dir_files if txt.endswith('_LOG.TXT')]
if 'NDV' in hdf_files[0]:
main(hdf_files[0],hdf_files[1],txt_files[0])
else:
main(hdf_files[1],hdf_files[0],txt_files[0])
|
DangoMelon0701/PyRemote-Sensing
|
SPOT Vegetation/combine_files.py
|
Python
|
mit
| 3,849
|
[
"Gaussian"
] |
67cb58df9640fa38e90d8a0b35758e21bcf93e78df176fee0f4b1d957358424f
|
#!/usr/bin/env python
# Test Harness Tools
from sklearn import preprocessing
from sklearn import cross_validation
# Classifiers to spot check
from sklearn import linear_model
from sklearn import svm
from sklearn import tree
from sklearn import neural_network
from sklearn import naive_bayes
from sknn import mlp
# Other Useful Dependencies
import numpy
import arff
from tabulate import tabulate
from operator import itemgetter
# ------------------------------------------------------------------------------
# Dataset Loading
# ------------------------------------------------------------------------------
def createLabelValueMapping(labelTypes):
vals = {}
count = 0
# Sorted given label types to ensure mapping is the same when the same
# types are given in multiple runs
for label in sorted(labelTypes):
vals[label] = count
count += 1
return vals
def assignValuesToLabels(rawLabels, labelValueMapping):
return [ labelValueMapping[l] for l in rawLabels ]
def loadArffDataset(filename, normalise, displayData=False):
with open(filename) as f:
data = arff.load(f)
if displayData:
attributeHeaders = [ attr[0] for attr in data['attributes'] ]
rows = data['data']
print tabulate(rows, headers=attributeHeaders)
# Assign each label a numerical value
# (required by most classifiers)
rawLabels = [ item[-1] for item in data['data'] ]
labelValueMapping = createLabelValueMapping(set(rawLabels))
# Use mapping to convert labels to a number (for sklearn)
labelValues = assignValuesToLabels(rawLabels, labelValueMapping)
# Structure input/label data in a format sklearn understands
featureVecs = numpy.array([ item[:-1] for item in rows ]) # features
labels = numpy.array(labelValues)
numInputFeatures = len(data['attributes'])
numLabelTypes = len(labelValueMapping)
if normalise:
featureVecs = preprocessing.normalize(featureVecs)
return featureVecs, labels, numInputFeatures, numLabelTypes
# ------------------------------------------------------------------------------
# Evaluation
# ------------------------------------------------------------------------------
def evaluateClassifiers(classifiers, featureVecs, labels, kFolds):
results = {}
for name, classifier in classifiers.items():
results[name] = cross_validation.cross_val_score(
classifier, featureVecs, labels, cv=kFolds)
return results
def computeOverallScores(results):
overallScores = []
for clsName, scores in results.items():
mean = scores.mean()
confidenceInterval = scores.std() * 2
worstCase = mean - confidenceInterval
overallScores.append( [clsName, mean, confidenceInterval, worstCase] )
# Sort by mean score descending before returning
overallScores.sort(key=itemgetter(1), reverse=True)
return overallScores
# ------------------------------------------------------------------------------
# Test Harness
# ------------------------------------------------------------------------------
if __name__ == '__main__':
# Load dataset
featureVecs, labels, numFeatures, numLabelTypes = loadArffDataset(
'data/faces_vegetables_dataset.arff',
normalise=True,
displayData=True)
# Construct all classifiers we wish to test, with 'standard' parameters
classifiers = {
'SVM':
svm.SVC(kernel='linear', C=1),
'Decision Tree':
tree.DecisionTreeClassifier(criterion='gini', splitter='best'),
'Feed-Forward Neural Network (Sigmoid)':
mlp.Classifier(layers=[
mlp.Layer('Sigmoid', units=numFeatures),
mlp.Layer('Sigmoid', units=numLabelTypes),
],
n_iter=100),
'Gaussian Naive Bayes':
naive_bayes.GaussianNB(),
'Multi-Nomial Naive Bayes':
naive_bayes.MultinomialNB(),
'Bernoulli Naive Bayes':
naive_bayes.BernoulliNB(),
}
# Test classifiers and compute their mean scores
results = evaluateClassifiers(classifiers, featureVecs, labels, 10)
scores = computeOverallScores(results)
# Output scores in tabular format
# Note that the overall scores list is already is sorted from highest mean
# score to lowest
print tabulate(
scores,
headers=['Classifier', 'Mean Acc.', 'Conf. Interval', 'Worst Acc.'])
|
DonaldWhyte/intro-to-ml
|
demo/spotcheck_facesvegetables.py
|
Python
|
mit
| 4,593
|
[
"Gaussian"
] |
646787b9e77d88269c8f490b874791782513db1f00c91207b24588f5ce31276f
|
ABERRANT_PLURAL_MAP = {
'appendix': 'appendices',
'barracks': 'barracks',
'cactus': 'cacti',
'child': 'children',
'criterion': 'criteria',
'deer': 'deer',
'echo': 'echoes',
'elf': 'elves',
'embargo': 'embargoes',
'focus': 'foci',
'fungus': 'fungi',
'goose': 'geese',
'hero': 'heroes',
'hoof': 'hooves',
'index': 'indices',
'knife': 'knives',
'leaf': 'leaves',
'life': 'lives',
'man': 'men',
'mouse': 'mice',
'nucleus': 'nuclei',
'person': 'people',
'phenomenon': 'phenomena',
'potato': 'potatoes',
'self': 'selves',
'syllabus': 'syllabi',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'woman': 'women',
}
VOWELS = set('aeiou')
def pluralize(singular):
"""
Taken from ActiveState recipe
http://code.activestate.com/recipes/577781-pluralize-word-convert-singular-word-to-its-plural/
Original code follows:
Return plural form of given lowercase singular word (English only). Based on
ActiveState recipe http://code.activestate.com/recipes/413172/
>>> pluralize('')
''
>>> pluralize('goose')
'geese'
>>> pluralize('dolly')
'dollies'
>>> pluralize('genius')
'genii'
>>> pluralize('jones')
'joneses'
>>> pluralize('pass')
'passes'
>>> pluralize('zero')
'zeros'
>>> pluralize('casino')
'casinos'
>>> pluralize('hero')
'heroes'
>>> pluralize('church')
'churches'
>>> pluralize('x')
'xs'
>>> pluralize('car')
'cars'
"""
if not singular:
return ''
plural = ABERRANT_PLURAL_MAP.get(singular)
if plural:
return plural
root = singular
try:
if singular[-1] == 'y' and singular[-2] not in VOWELS:
root = singular[:-1]
suffix = 'ies'
elif singular[-1] == 's':
if singular[-2] in VOWELS:
if singular[-3:] == 'ius':
root = singular[:-2]
suffix = 'i'
else:
root = singular[:-1]
suffix = 'ses'
else:
suffix = 'es'
elif singular[-2:] in ('ch', 'sh'):
suffix = 'es'
else:
suffix = 's'
except IndexError:
suffix = 's'
plural = root + suffix
return plural
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Govexec/django-odd-utilities
|
odd_utilities/text_utilities.py
|
Python
|
mit
| 2,465
|
[
"CASINO"
] |
ebf19f8f5dcd1e50182500e8115b835a84f4a8d402627fe5b5cf1dc518c233b4
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
#sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../")
# print(sys.path)
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = "/".join(scriptPath.split("/")[0:-2]) # absolute bin path
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../")
from ablib.utils.tools import *
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option('-g', '--gff', dest='gff', action='store', type='string', help='gff file,do not have to provide it if db is exited')
p.add_option('-d', '--db', dest='db', default='gffdb', action='store', type='string', help='the gff database file to create or use')
p.add_option('-b', '--bamorbed', dest='bamorbed', action='store', type='string', help='bam or bed file, Important: the bamfile\'s suffix must be ".bam"')
p.add_option('-o', '--outfile', dest='outfile', default='Mapping_distribution.txt', action='store', type='string', help='gene expression file')
p.add_option('-n', '--samplename', dest='samplename', default='', action='store', type='string', help='sample name,default is ""')
p.add_option('-m', '--mapinfo', dest='mapinfo', default='', action='store', type='string', help='output which region peak is located on')
p.add_option('-u', '--unstrand', dest='unstrand', default=False, action='store_true', help='unstrand library,antisense will not be considered.')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option('-O', '--outDir', dest='outDir', default='./', action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store', type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='', action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string', help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False, action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp', default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False, action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '_'
if opt.outfile == 'Mapping_distribution.txt':
opt.outfile = sample + opt.outfile
intype = "bam"
match = re.search(r'\.bam$', opt.bamorbed)
if not match:
intype = "bed"
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = "/".join(scriptPath.split("/")[0:-1]) # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
#os.mkdir(outPath) if not os.path.isdir(outPath) else None
os.system('mkdir -p ' + outPath)
logPath = os.path.abspath(opt.logDir)
#os.mkdir(logPath) if not os.path.isdir(logPath) else None
os.system('mkdir -p ' + logPath)
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# def invert_strand(iv):
# """
# :param iv: HTSeq.GenomicInterval object
# :return: HTSeq.GenomicInterval - strand is reversed
# """
# iv2 = iv.copy()
# if iv2.strand == "+":
# iv2.strand = "-"
# elif iv2.strand == "-":
# iv2.strand = "+"
# else:
# raise ValueError, "Illegal strand"
# return iv2
def getTotalBase(iv, coverage):
totalbases = 0
for iv2, value2 in coverage[iv].steps():
if value2 > 0:
totalbases += value2 * iv2.length
return totalbases
# @profile
def readChrwithBam(chr, reads):
print(chr)
reads_dict = {}
anti_dict = {}
db = gffutils.FeatureDB(opt.db)
bamfile = HTSeq.BAM_Reader(opt.bamorbed)
usedreads = {}
forward_end = 0
i = 0
## mapping statics
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
exons = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'exon')
for gene in db.features_of_type(genes, seqid=chr, order_by='start'):
# print(gene)
## gene info
# if not gene.seqid == chr:
# continue
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
gene_end = gene.end
# if gene_start-forward_end>2500000:
# usedreads.clear()
forward_end = gene_end
reads_dict[gene_id] = {}
for e in exons:
reads_dict[gene_id][e] = 0
reads_dict[gene_id]['intron'] = 0
reads_dict[gene_id]['sense'] = 0
reads_dict[gene_id]['antisense'] = 0
reads_dict[gene_id]['noncoding_exon'] = 0
# gene_iv = HTSeq.GenomicInterval(chr, gene.start - 1, gene.end, gene.strand)
# for r in bamfile[gene_iv]:
# r_name = r.read.name
# if not r.aligned:
# continue
#
# if not opt.unstrand:
# if r.iv.strand == gene_strand:
# if usedreads.has_key(r_name):
# if anti_dict.has_key(r_name):
# gid = anti_dict[r_name]
# reads_dict[gid]['antisense'] -= 1
# anti_dict.pop(r_name)
# usedreads.pop(r_name)
# else:
# continue
# reads_dict[gene_id]['sense'] += 1
# else:
# if usedreads.has_key(r_name):
# continue
# reads_dict[gene_id]['antisense'] += 1
# anti_dict[r_name] = gene_id
# usedreads[r_name] = ""
# else:
# if usedreads.has_key(r_name):
# continue
# reads_dict[gene_id]['sense'] += 1
gas = HTSeq.GenomicArrayOfSets([chr], stranded=False)
for isoform in db.children(gene_id, level=1):
for gu in db.children(isoform.id, level=1, featuretype=exons):
gu_type = gu.featuretype
gu_start = gu.start
gu_end = gu.end
gu_strand = gu.strand
gu_iv = HTSeq.GenomicInterval(chr, gu_start - 1, gu_end, gu_strand)
gas[gu_iv] += gu_type
for isoform in db.children(gene_id, level=1):
# gas = HTSeq.GenomicArrayOfSets([chr], stranded=False)
isoform_iv = HTSeq.GenomicInterval(chr, isoform.start - 1, isoform.end, isoform.strand)
# for gu in db.children(isoform.id, level=1, featuretype=exons):
# gu_type = gu.featuretype
# gu_start = gu.start
# gu_end = gu.end
# gu_strand = gu.strand
# gu_iv = HTSeq.GenomicInterval(chr, gu_start - 1, gu_end, gu_strand)
# gas[gu_iv] += gu_type
for r in bamfile[isoform_iv]:
r_name = r.read.name
if r.paired_end:
r_name = r.read.name + r.pe_which
if not r.aligned:
continue
if not opt.unstrand:
# if r.iv.strand == gene_strand:
if ((r.iv.strand == isoform_iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand == isoform_iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand != isoform_iv.strand and r.pe_which == "second")):
if usedreads.has_key(r_name):
if anti_dict.has_key(r_name):
gid = anti_dict[r_name]
reads_dict[gid]['antisense'] -= 1
anti_dict.pop(r_name)
usedreads.pop(r_name)
else:
continue
reads_dict[gene_id]['sense'] += 1
else:
if usedreads.has_key(r_name):
continue
reads_dict[gene_id]['antisense'] += 1
anti_dict[r_name] = gene_id
usedreads[r_name] = ""
else:
if usedreads.has_key(r_name):
continue
reads_dict[gene_id]['sense'] += 1
if usedreads.has_key(r_name):
continue
else:
usedreads[r_name] = ""
# print("no")
r_len = len(r.read)
iv_seq = (co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0)
for iv in iv_seq:
# print(iv)
for iv2, fs in gas[iv].steps():
iv_len = iv2.length
if len(fs) == 0:
reads_dict[gene_id]['intron'] += float(iv_len) / r_len
elif len(fs) == 1 and list(fs)[0] == "exon":
reads_dict[gene_id]['noncoding_exon'] += float(iv_len) / r_len
elif len(fs) >= 1 and "CDS" in list(fs):
reads_dict[gene_id]['CDS'] += float(iv_len) / r_len
elif len(fs) >= 1 and "five_prime_UTR" in list(fs) and "three_prime_UTR" in list(fs) :
for s in list(fs):
if s == "exon":
continue
else:
reads_dict[gene_id][s] += float(iv_len) / r_len / 2
elif len(fs) >= 1 :
for s in list(fs):
if s == "exon":
continue
else:
reads_dict[gene_id][s] += float(iv_len) / r_len
i += 1
if i > 0 and i % 1000 == 0:
sys.stderr.write("%s : %d gene processed.\n" % (chr, i))
# if i==400:
# break
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
def readChrwithBed(chr, reads, peaks):
print(chr)
reads_dict = {}
peaks_dict = {}
anti_dict = {}
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
exons = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'exon')
db = gffutils.FeatureDB(opt.db)
bedfile = HTSeq.BED_Reader(opt.bamorbed)
bedga = HTSeq.GenomicArrayOfSets([chr], stranded=False)
n = 0
bed_dict = {}
for r in bedfile:
if r.iv.chrom != chr:
continue
# if r.name == "--":
# r.name = r.iv.chrom + "\t" + str(r.iv.start) + "\t" + str(
# r.iv.end) + "\t" + r.name + "\t" + str(r.score) + "\t" + r.iv.strand
# r.name = r.iv.chrom + "\t" + str(r.iv.start) + "\t" + str(
# r.iv.end) + "\t" + r.name + "\t" + str(r.score) + "\t" + r.iv.strand
r.name = r.line
# print(r.line)
n += 1
bed_dict[str(n)] = r
bedga[r.iv] += str(n)
peaks_dict[r.name] = {}
for e in exons:
peaks_dict[r.name][e] = 0
peaks_dict[r.name]['intron'] = 0
peaks_dict[r.name]['sense'] = 0
peaks_dict[r.name]['antisense'] = 0
peaks_dict[r.name]['noncoding_exon'] = 0
peaks_dict[r.name]['gene'] = "--"
usedreads = {}
forward_end = 0
i = 0
for gene in db.features_of_type(genes, seqid=chr, order_by='start'):
# print(gene)
## gene info
# if not gene.seqid == chr:
# continue
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
gene_end = gene.end
# if gene_start-forward_end>2500000:
# usedreads.clear()
forward_end = gene_end
## mapping statics
reads_dict[gene_id] = {}
for e in exons:
reads_dict[gene_id][e] = 0
reads_dict[gene_id]['intron'] = 0
reads_dict[gene_id]['sense'] = 0
reads_dict[gene_id]['antisense'] = 0
reads_dict[gene_id]['noncoding_exon'] = 0
gene_iv = HTSeq.GenomicInterval(chr, gene.start - 1, gene.end, gene.strand)
bfs = set()
for biv, fs in bedga[gene_iv].steps():
bfs = bfs.union(fs)
# for n in bfs:
# bed = bed_dict[n]
# bedname = bed.name
# bediv = bed.iv
# if not opt.unstrand:
# if bediv.strand == gene_strand:
# if usedreads.has_key(bedname):
# if anti_dict.has_key(bedname):
# gid = anti_dict[bedname]
# reads_dict[gid]['antisense'] -= 1
# peaks_dict[bedname]['antisense'] = 0
# anti_dict.pop(bedname)
# usedreads.pop(bedname)
# else:
# continue
# peaks_dict[bedname]['gene'] = gene_id
# reads_dict[gene_id]['sense'] += 1
# else:
# if usedreads.has_key(bedname):
# continue
# reads_dict[gene_id]['antisense'] += 1
# peaks_dict[bedname]['antisense'] = 1
# anti_dict[bedname] = gene_id
# peaks_dict[bedname]['gene'] = gene_id
# usedreads[bedname] = ""
# else:
# if usedreads.has_key(bedname):
# continue
# peaks_dict[bedname]['gene'] = gene_id
# reads_dict[gene_id]['sense'] += 1
for isoform in db.children(gene_id, level=1):
gas = HTSeq.GenomicArrayOfSets([chr], stranded=False)
isoform_iv = HTSeq.GenomicInterval(chr, isoform.start - 1, isoform.end, isoform.strand)
for gu in db.children(isoform.id, level=1, featuretype=exons):
gu_type = gu.featuretype
gu_start = gu.start
gu_end = gu.end
gu_strand = gu.strand
gu_iv = HTSeq.GenomicInterval(chr, gu_start - 1, gu_end, gu_strand)
gas[gu_iv] += gu_type
bfs = set()
for biv, fs in bedga[isoform_iv].steps():
bfs = bfs.union(fs)
for n in bfs:
bed = bed_dict[n]
bedname = bed.name
bediv = bed.iv
if not opt.unstrand:
if bediv.strand == gene_strand:
if usedreads.has_key(bedname):
if anti_dict.has_key(bedname):
gid = anti_dict[bedname]
reads_dict[gid]['antisense'] -= 1
peaks_dict[bedname]['antisense'] = 0
anti_dict.pop(bedname)
usedreads.pop(bedname)
else:
continue
peaks_dict[bedname]['gene'] = gene_id
reads_dict[gene_id]['sense'] += 1
else:
if usedreads.has_key(bedname):
continue
reads_dict[gene_id]['antisense'] += 1
peaks_dict[bedname]['antisense'] = 1
anti_dict[bedname] = gene_id
peaks_dict[bedname]['gene'] = gene_id
usedreads[bedname] = ""
else:
if usedreads.has_key(bedname):
continue
peaks_dict[bedname]['gene'] = gene_id
reads_dict[gene_id]['sense'] += 1
if usedreads.has_key(bedname):
continue
else:
usedreads[bedname] = ""
r_len = bediv.length
for iv, fs in gas[bediv].steps():
iv_len = iv.length
if len(fs) == 0:
reads_dict[gene_id]['intron'] += float(iv_len) / r_len
peaks_dict[bedname]['intron'] += float(iv_len) / r_len
elif len(fs) == 1 and list(fs)[0] == "exon":
reads_dict[gene_id]['noncoding_exon'] += float(iv_len) / r_len
peaks_dict[bedname]['noncoding_exon'] += float(iv_len) / r_len
elif len(fs) >= 1 and "CDS" in list(fs):
reads_dict[gene_id]['CDS'] += float(iv_len) / r_len
peaks_dict[bedname]['CDS'] += float(iv_len) / r_len
elif len(fs) >= 1:
for s in list(fs):
if s == "exon":
continue
else:
reads_dict[gene_id][s] += float(iv_len) / r_len
peaks_dict[bedname][s] += float(iv_len) / r_len
i += 1
if i > 0 and i % 1000 == 0:
sys.stderr.write("%s : %d gene processed.\n" % (chr, i))
# if i==400:
# break
reads[chr] = reads_dict.copy()
peaks[chr] = peaks_dict.copy()
del reads_dict
del peaks_dict
logging.info("done %s" % chr)
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if opt.gff:
db = gffutils.create_db(opt.gff, opt.db, merge_strategy="create_unique", verbose=False, force=True)
db = gffutils.FeatureDB(opt.db)
TMR = 0
bedtitle = ""
if intype == "bam":
if not os.path.isfile(opt.bamorbed + '.bai'):
os.system("samtools index " + opt.bamorbed)
TMR = getBamReadsNumber(opt.bamorbed,model='single')
print(TMR)
else:
bedtitle = getTitle(opt.bamorbed)
for line in open(opt.bamorbed):
if line.startswith("#") or line.startswith("track") or line.startswith("\n"):
continue
TMR += 1
print(TMR)
# for chr in db.seqids():
# # print(chr)
# reads2={}
# peaks2={}
# reads2[chr] = {}
# readChrwithBed(chr, reads2, peaks2)
Watcher()
pool = multiprocessing.Pool(processes=15)
server = multiprocessing.Manager()
reads = server.dict()
peaks = server.dict()
if intype == "bam":
chr_dict = readBamHeader(opt.bamorbed)
for chr in db.seqids():
if not chr in chr_dict:
continue
# print(chr)
reads[chr] = {}
# runjobs(readChrwithBam,arglist,10)
pool.apply_async(readChrwithBam, args=(chr, reads))
# pool.apply_async(func, (chr,))
else:
for chr in db.seqids():
# print(chr)
reads[chr] = {}
pool.apply_async(readChrwithBed, args=(chr, reads, peaks))
pool.close()
pool.join()
d = dict(reads).copy()
p = dict(peaks).copy()
server.shutdown()
types = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'noncoding_exon', 'intron')
ori = ('sense', 'antisense')
total_dict = {}
for k in types:
total_dict[k] = 0
for k in ori:
total_dict[k] = 0
total_dict["intergenic"] = 0
for chr in d:
# print(chr)
for gene in d[chr]:
# print(gene)
for t in types:
total_dict[t] += d[chr][gene][t]
for o in ori:
total_dict[o] += d[chr][gene][o]
print(total_dict["sense"])
# total_dict["intergenic"] = TMR - total_dict["sense"] - total_dict["antisense"]
total_dict["intergenic"] = TMR - total_dict["three_prime_UTR"] - total_dict["five_prime_UTR"] - total_dict["CDS"] - total_dict["noncoding_exon"] - total_dict["intron"] - total_dict["antisense"]
if total_dict["intergenic"] < 0:
total_dict["intergenic"] = 0
os.chdir(opt.outDir)
fout = open(opt.outfile, 'w')
# fout.writelines("+Type\tReads\n")
fout.writelines("+Type\tReads\tFraction\n") ##2016.09.12 modifed
Percent =0 ##2016.09.12 modifed
for k, v in total_dict.items():
if k == "sense":
continue
if opt.unstrand and k == "antisense":
continue
Percent = '{:.2f}'.format(int(v)*100/float(TMR)) ##2016.09.12 modifed
fout.writelines(k + '\t' + str(int(v)) + '\t' + str(Percent) + '%' + '\n')
# fout.writelines("%s\t%s\n" % (k, int(v)))
fout.close()
cmd = "cd " + outPath + "&& Rscript " + binPath + "/plot/Bar_single_Mapping_distribution.r -f " + opt.outfile + " -t " + sample + "Mapping_distribution -n " + sample + "Mapping_distribution -o ./ \n"
os.system(cmd)
if opt.mapinfo != "":
w = open(opt.mapinfo, 'w')
w.writelines(bedtitle + "\tregionInfo\tGeneID\n")
for chr in p:
for peak in p[chr]:
w.writelines(peak + "\t")
pflag = 0
for t in types:
if p[chr][peak][t] > 0:
w.writelines(t + ":" + str(round(p[chr][peak][t], 2)) + ";")
pflag = 1
if p[chr][peak]["antisense"] > 0:
# w.writelines("antisense:" + str(round(p[chr][peak]["antisense"], 2)) + ";")
w.writelines("antisense;")
pflag = 1
if pflag == 0:
w.writelines("intergenic;")
w.writelines("\t" + p[chr][peak]['gene'] + "\n")
w.close()
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
|
ablifedev/ABLIRC
|
ABLIRC/bin/public/mapping_distribution_analyse.py
|
Python
|
mit
| 29,245
|
[
"HTSeq"
] |
d4b659112fad729c0e2e3a74ae39ce5453b9b50ea55900cc1a25224d03dbda53
|
#!/usr/bin/env python
# Disco-cake
# Copyright (C) 2017 Maugere Lucas
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from expected_wrap import ExpectedList
from test_base import TestBase
class TestShirobakoOst(TestBase):
cfg = TestBase.default_config()
input_data = TestBase.make_input_data_with_album_id('vgmdb', 48776)
def test(self):
self.execute(self.input_data, self.initial_names, self.cfg)
self.assert_all_fields(None, self.album)
def setUp(self):
self.album = ExpectedList.from_names(self.expected_names)
self.album['track_number'] = self.album.track_numbers()
self.album['title'] = self.titles
self.album['disc_name'] = "SHIROBAKO ORIGINAL SOUNDTRACK 1"
self.album['year'] = "2015"
self.album['category'] = "Animation"
self.album['composer'] = "Shiro Hamaguchi"
self.album.sort()
initial_names = [
'01. SHIROBAKO.mp3',
'02. オンエアまで全裸待機.mp3',
'03. 制作進行の宮森あおいです.mp3',
'04. 疲れたので家に帰ろう.mp3',
'05. かれん味.mp3',
'06. クリエイターたち.mp3',
'07. アイドリングストップなんていらない.mp3',
'08. 新人原画はつらいよ.mp3',
'09. 悩みのない人生なんて.mp3',
'10. ガヤデビュー.mp3',
'11. どんどんドーナツどーんと行こう!.mp3',
'12. 絵コンテの上がらない日々.mp3',
'13. しゃーっす.mp3',
'14. 何だか不条理です.mp3',
'15. とりあえず偉い人に抗ってみる.mp3',
'16. けんけんがくがく.mp3',
'17. 原画回収に行ってきまーす.mp3',
'18. つながっているんだよ.mp3',
'19. 皆さん、お疲れ様でした.mp3',
'20. あるぴんはいます!.mp3',
'21. 萌えアニメ NOW ON SALE.mp3',
'22. ロボットアニメ NOW ON SALE.mp3',
'23. 次回『SHIROBAKO』お楽しみに!.mp3',
'24. ぼくらのトレーシー.mp3',
'25. 愛のトレーシー.mp3',
'26. がんばれトレーシー.mp3',
'27. 何やら不穏な気配ですぜ旦那.mp3',
'28. 後の車、怪しくない?.mp3',
'29. 執念の恋ヶ窪警部.mp3',
'30. 本日二度目のEXODUS.mp3',
'31. ルート66、ホントは青梅街道.mp3',
'32. 紛らわしすぎるダイイングメッセージ.mp3',
'33. あるぴんの感情につけてみました.mp3',
'34. もう一回、あるぴんの感情.mp3',
'35. 百頭の馬と複葉機.mp3',
]
expected_names = [
'01 - SHIROBAKO.mp3',
'02 - On Air made Zenra Taiki.mp3',
'03 - Seisaku Shinkou no Miyamori Aoi desu.mp3',
'04 - Tsukareta node Ie ni Kaerou.mp3',
'05 - Kerenmi.mp3',
'06 - Creator-tachi.mp3',
'07 - Idling Stop Nante Iranai.mp3',
'08 - Shinjin Genga wa Tsurai yo.mp3',
'09 - Nayami no nai Jinsei Nante.mp3',
'10 - Gaya Debut.mp3',
'11 - Don Don Donuts Do-n to Ikou!.mp3',
'12 - E-conte no Agaranai Hibi.mp3',
'13 - Sha-ssu.mp3',
'14 - Nandaka Fujouri desu.mp3',
'15 - Toriaezu Erai Hito ni Aragatte Miru.mp3',
'16 - Kenkengakugaku.mp3',
'17 - Genga Kaishuu ni Ittekima-su.mp3',
'18 - Tsunagatte Irunda yo.mp3',
'19 - Minasan, Otsukaresama Deshita.mp3',
'20 - Arupin wa Imasu!.mp3',
'21 - Moe Anime NOW ON SALE.mp3',
'22 - Robot Anime NOW ON SALE.mp3',
'23 - Jikai SHIROBAKO Otanoshimi ni!.mp3',
'24 - Bokura no Tracy.mp3',
'25 - Ai no Tracy.mp3',
'26 - Ganbare Tracy.mp3',
'27 - Naniyara Fuon na Kehai desu ze Danna.mp3',
'28 - Ushiro no Kuruma, Ayashikunai.mp3',
'29 - Shuunen no Koigakubo Keibu.mp3',
'30 - Honjiysu Nidome no EXODUS.mp3',
'31 - Route 66, Honto wa Oume Kaidou.mp3',
'32 - Magirawashisugiru Dying Message.mp3',
'33 - Arupin no Kanjou ni Tsukete Mimashita.mp3',
'34 - Mou Ikkai, Arupin no Kanjou.mp3',
'35 - Hyakutou no Uma to Fukuyouki.mp3',
]
titles = [
'SHIROBAKO',
'On Air made Zenra Taiki',
'Seisaku Shinkou no Miyamori Aoi desu',
'Tsukareta node Ie ni Kaerou',
'Kerenmi',
'Creator-tachi',
'Idling Stop Nante Iranai',
'Shinjin Genga wa Tsurai yo',
'Nayami no nai Jinsei Nante',
'Gaya Debut',
'Don Don Donuts Do-n to Ikou!',
'E-conte no Agaranai Hibi',
'Sha-ssu',
'Nandaka Fujouri desu',
'Toriaezu Erai Hito ni Aragatte Miru',
'Kenkengakugaku',
'Genga Kaishuu ni Ittekima-su',
'Tsunagatte Irunda yo',
'Minasan, Otsukaresama Deshita',
'Arupin wa Imasu!',
'Moe Anime NOW ON SALE',
'Robot Anime NOW ON SALE',
'Jikai "SHIROBAKO" Otanoshimi ni!', # this title uses ""
'Bokura no Tracy',
'Ai no Tracy',
'Ganbare Tracy',
'Naniyara Fuon na Kehai desu ze Danna',
'Ushiro no Kuruma, Ayashikunai?', # this title uses ?
'Shuunen no Koigakubo Keibu',
'Honjiysu Nidome no EXODUS',
'Route 66, Honto wa Oume Kaidou',
'Magirawashisugiru Dying Message',
'Arupin no Kanjou ni Tsukete Mimashita',
'Mou Ikkai, Arupin no Kanjou',
'Hyakutou no Uma to Fukuyouki',
]
if __name__ == '__main__':
unittest.main()
|
Cqfuj/disco-cake
|
tests/test_shirobako_ost.py
|
Python
|
gpl-3.0
| 6,221
|
[
"MOE"
] |
42bbddd1ef1730189fb39e2b69bd16eff86f43dc8bad67c0610d2d022fa710e9
|
import logging
from neuro import reshape
import neuro
import numpy
from reikna.algorithms import PureParallel
from reikna.core import Parameter
from reikna.core.signature import Annotation
log = logging.getLogger("classification")
def classification_delta_kernel(ctx, outputs, targets, deltas):
kernel_cache, thread = ctx.kernel_cache, ctx.thread
assert outputs.shape[0] == targets.shape[0] == deltas.shape[0]
assert len(targets.shape) == 1
assert targets.dtype == numpy.int32
assert outputs.shape[1] == deltas.shape[1]
key = (classification_delta_kernel, outputs.shape)
if not key in kernel_cache.keys():
log.info("compiling " + str(key))
kernel = PureParallel(
[
Parameter('outputs', Annotation(outputs, 'i')),
Parameter('targets', Annotation(targets, 'i')),
Parameter('deltas', Annotation(deltas, 'o'))
],
"""
${outputs.ctype} out = ${outputs.load_same};
SIZE_T t = ${targets.load_idx}(${idxs[0]});
SIZE_T idx = ${idxs[1]};
${deltas.ctype} d;
if (t == idx) {
d = 1.0f - out;
} else {
d = -out;
}
${deltas.store_same}(d);
""", guiding_array='deltas')
kernel_cache[key] = kernel.compile(thread)
# Run kernel
kernel_cache[key](outputs, targets, deltas)
def class_errors(ctx, expected, actual, errors):
""" expected int32, actual float, errors int32 """
kernel_cache, thread = ctx.kernel_cache, ctx.thread
key = (class_errors, expected.shape)
if key not in kernel_cache.keys():
# target should be an integer
logging.info("compiling " + str(key))
assert expected.shape == errors.shape # one neuron per class
assert expected.shape == (actual.shape[0],) # index of the class
assert actual.dtype == numpy.float32
assert expected.dtype == numpy.int32
assert errors.dtype == numpy.int32
kernel = PureParallel(
[
Parameter('expected', Annotation(expected, 'i')),
Parameter('actual', Annotation(actual, 'i')),
Parameter('errors', Annotation(errors, 'o'))
],
"""
SIZE_T expected = ${expected.load_idx}(${idxs[0]});;
float maximum=0.0f;
float value;
SIZE_T maxindex = 0;
SIZE_T tl = ${target_length};
// calculate argmax
for(SIZE_T j=0; j < tl; j++) {
value = ${actual.load_idx}(${idxs[0]}, j);
if (value > maximum) {
maximum = value;
maxindex = j;
}
}
// If the confidence is too low, return an error
if (maximum < (1.0f / ${target_length}.0f + 0.001f)) {
${errors.store_same}(1);
return;
};
// compare argmax
if (maxindex != expected) {
${errors.store_same}(1);
} else {
${errors.store_same}(0);
}
""", guiding_array='expected', render_kwds={'target_length' : numpy.int32(actual.shape[1])})
kernel_cache[key] = kernel.compile(thread)
kernel_cache[key](expected, actual, errors)
class ClassificationNetwork(object):
"""
Defines the ouput of a neural network to solve a regression task.
"""
def __init__(self, **kwargs):
super(ClassificationNetwork, self).__init__(**kwargs)
log.info("Classification constructor")
self.targets_dtype = numpy.int32
self.error_measure = "Classification Errors"
def create_state(self, num_patterns):
state = super(ClassificationNetwork, self).create_state(num_patterns)
ctx = self.context
shp = (num_patterns,)
state.classification_errors = ctx.thread.array(shp, dtype=numpy.int32)
return state
def get_target_shape(self):
# in a classification network, the target values are just the index of the correct class
return ()
def get_target_dtype(self):
return numpy.int32
def delta(self, network_state, targets):
"""
Classes must be coded as integers. Each integer is one class.
"""
super(ClassificationNetwork, self).delta(network_state, targets)
outputs = network_state.layers[-1].activations
deltas = network_state.layers[-1].deltas
classification_delta_kernel(self.context, outputs, targets, deltas)
def add_layer(self, LayerClass, **kwargs):
super(ClassificationNetwork, self).add_layer(LayerClass, **kwargs)
def error(self, inputs, targets, network_state):
"""
Calculate the classification error.
"""
self.propagate(network_state, inputs)
outputs = network_state.layers[-1].activations
class_errors(self.context, targets, outputs, network_state.classification_errors)
self.context.sum(network_state.classification_errors, network_state.error)
return network_state.error.get()
|
schreon/neuronaut
|
neuro/classification.py
|
Python
|
mit
| 5,154
|
[
"NEURON"
] |
268162bad927419b438572b8c5c784b186e803f2c927123874b3cf7aa0acc0db
|
__author__ = 'jiataogu'
import theano
import logging
import copy
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),os.path.pardir)))
import emolga.basic.objectives as objectives
import emolga.basic.optimizers as optimizers
from theano.compile.nanguardmode import NanGuardMode
from emolga.utils.generic_utils import visualize_
from emolga.layers.core import Dropout, Dense, Dense2, Identity
from emolga.layers.recurrent import *
from emolga.layers.ntm_minibatch import Controller
from emolga.layers.embeddings import *
from emolga.layers.attention import *
from core import Model
logger = logging.getLogger(__name__)
RNN = GRU # change it here for other RNN models.
err = 1e-9
class Encoder(Model):
"""
Recurrent Neural Network-based Encoder
It is used to compute the context vector.
"""
def __init__(self,
config, rng, prefix='enc',
mode='Evaluation', embed=None, use_context=False):
super(Encoder, self).__init__()
self.config = config
self.rng = rng
self.prefix = prefix
self.mode = mode
self.name = prefix
self.use_context = use_context
self.return_embed = False
self.return_sequence = False
"""
Create all elements of the Encoder's Computational graph
"""
# create Embedding layers
logger.info("{}_create embedding layers.".format(self.prefix))
if embed:
self.Embed = embed
else:
self.Embed = Embedding(
self.config['enc_voc_size'],
self.config['enc_embedd_dim'],
name="{}_embed".format(self.prefix))
self._add(self.Embed)
if self.use_context:
self.Initializer = Dense(
config['enc_contxt_dim'],
config['enc_hidden_dim'],
activation='tanh',
name="{}_init".format(self.prefix)
)
self._add(self.Initializer)
"""
Encoder Core
"""
# create RNN cells
if not self.config['bidirectional']:
logger.info("{}_create RNN cells.".format(self.prefix))
self.RNN = RNN(
self.config['enc_embedd_dim'],
self.config['enc_hidden_dim'],
None if not use_context
else self.config['enc_contxt_dim'],
name="{}_cell".format(self.prefix)
)
self._add(self.RNN)
else:
logger.info("{}_create forward RNN cells.".format(self.prefix))
self.forwardRNN = RNN(
self.config['enc_embedd_dim'],
self.config['enc_hidden_dim'],
None if not use_context
else self.config['enc_contxt_dim'],
name="{}_fw_cell".format(self.prefix)
)
self._add(self.forwardRNN)
logger.info("{}_create backward RNN cells.".format(self.prefix))
self.backwardRNN = RNN(
self.config['enc_embedd_dim'],
self.config['enc_hidden_dim'],
None if not use_context
else self.config['enc_contxt_dim'],
name="{}_bw_cell".format(self.prefix)
)
self._add(self.backwardRNN)
logger.info("create encoder ok.")
def build_encoder(self, source, context=None, return_embed=False,
return_sequence=False,
return_gates=False,
clean_mask=False):
"""
Build the Encoder Computational Graph
"""
# clean_mask means we set the hidden states of masked places as 0.
# sometimes it will help the program to solve something
# note that this option only works when return_sequence.
# we recommend to leave at least one mask in the end of encoded sequence.
# Initial state
Init_h = None
if self.use_context:
Init_h = self.Initializer(context)
# word embedding
if not self.config['bidirectional']:
X, X_mask = self.Embed(source, True)
if return_gates:
X_out, Z, R = self.RNN(X, X_mask, C=context, init_h=Init_h,
return_sequence=return_sequence,
return_gates=True)
else:
X_out = self.RNN(X, X_mask, C=context, init_h=Init_h,
return_sequence=return_sequence,
return_gates=False)
if return_sequence:
X_tail = X_out[:, -1]
if clean_mask:
X_out = X_out * X_mask[:, :, None]
else:
X_tail = X_out
else:
source2 = source[:, ::-1]
X, X_mask = self.Embed(source, True)
X2, X2_mask = self.Embed(source2, True)
if not return_gates:
X_out1 = self.backwardRNN(X, X_mask, C=context, init_h=Init_h, return_sequence=return_sequence)
X_out2 = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h, return_sequence=return_sequence)
else:
X_out1, Z1, R1 = self.backwardRNN(X, X_mask, C=context, init_h=Init_h,
return_sequence=return_sequence,
return_gates=True)
X_out2, Z2, R2 = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h,
return_sequence=return_sequence,
return_gates=True)
Z = T.concatenate([Z1, Z2[:, ::-1, :]], axis=2)
R = T.concatenate([R1, R2[:, ::-1, :]], axis=2)
if not return_sequence:
X_out = T.concatenate([X_out1, X_out2], axis=1)
X_tail = X_out
else:
X_out = T.concatenate([X_out1, X_out2[:, ::-1, :]], axis=2)
X_tail = T.concatenate([X_out1[:, -1], X_out2[:, -1]], axis=1)
if clean_mask:
X_out = X_out * X_mask[:, :, None]
X_mask = T.cast(X_mask, dtype='float32')
if not return_gates:
if return_embed:
return X_out, X, X_mask, X_tail
return X_out
else:
if return_embed:
return X_out, X, X_mask, X_tail, Z, R
return X_out, Z, R
def compile_encoder(self, with_context=False, return_embed=False, return_sequence=False):
source = T.imatrix()
self.return_embed = return_embed
self.return_sequence = return_sequence
if with_context:
context = T.matrix()
self.encode = theano.function([source, context],
self.build_encoder(source, context,
return_embed=return_embed,
return_sequence=return_sequence))
self.gtenc = theano.function([source, context],
self.build_encoder(source, context,
return_embed=return_embed,
return_sequence=return_sequence,
return_gates=True))
else:
self.encode = theano.function([source],
self.build_encoder(source, None,
return_embed=return_embed,
return_sequence=return_sequence))
self.gtenc = theano.function([source],
self.build_encoder(source, None,
return_embed=return_embed,
return_sequence=return_sequence,
return_gates=True))
class Decoder(Model):
"""
Recurrent Neural Network-based Decoder.
It is used for:
(1) Evaluation: compute the probability P(Y|X)
(2) Prediction: sample the best result based on P(Y|X)
(3) Beam-search
(4) Scheduled Sampling (how to implement it?)
"""
def __init__(self,
config, rng, prefix='dec',
mode='RNN', embed=None,
highway=False):
"""
mode = RNN: use a RNN Decoder
"""
super(Decoder, self).__init__()
self.config = config
self.rng = rng
self.prefix = prefix
self.name = prefix
self.mode = mode
self.highway = highway
self.init = initializations.get('glorot_uniform')
self.sigmoid = activations.get('sigmoid')
# use standard drop-out for input & output.
# I believe it should not use for context vector.
self.dropout = config['dropout']
if self.dropout > 0:
logger.info('Use standard-dropout!!!!')
self.D = Dropout(rng=self.rng, p=self.dropout, name='{}_Dropout'.format(prefix))
"""
Create all elements of the Decoder's computational graph.
"""
# create Embedding layers
logger.info("{}_create embedding layers.".format(self.prefix))
if embed:
self.Embed = embed
else:
self.Embed = Embedding(
self.config['dec_voc_size'],
self.config['dec_embedd_dim'],
name="{}_embed".format(self.prefix))
self._add(self.Embed)
# create Initialization Layers
logger.info("{}_create initialization layers.".format(self.prefix))
if not config['bias_code']:
self.Initializer = Zero()
else:
self.Initializer = Dense(
config['dec_contxt_dim'],
config['dec_hidden_dim'],
activation='tanh',
name="{}_init".format(self.prefix)
)
# create RNN cells
logger.info("{}_create RNN cells.".format(self.prefix))
if 'location_embed' in self.config:
if config['location_embed']:
dec_embedd_dim = 2 * self.config['dec_embedd_dim']
else:
dec_embedd_dim = self.config['dec_embedd_dim']
else:
dec_embedd_dim = self.config['dec_embedd_dim']
self.RNN = RNN(
dec_embedd_dim,
self.config['dec_hidden_dim'],
self.config['dec_contxt_dim'],
name="{}_cell".format(self.prefix)
)
self._add(self.Initializer)
self._add(self.RNN)
# HighWay Gating
if highway:
logger.info("HIGHWAY CONNECTION~~~!!!")
assert self.config['context_predict']
assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']
self.C_x = self.init((self.config['dec_contxt_dim'],
self.config['dec_hidden_dim']))
self.H_x = self.init((self.config['dec_hidden_dim'],
self.config['dec_hidden_dim']))
self.b_x = initializations.get('zero')(self.config['dec_hidden_dim'])
self.C_x.name = '{}_Cx'.format(self.prefix)
self.H_x.name = '{}_Hx'.format(self.prefix)
self.b_x.name = '{}_bx'.format(self.prefix)
self.params += [self.C_x, self.H_x, self.b_x]
# create readout layers
logger.info("_create Readout layers")
# 1. hidden layers readout.
self.hidden_readout = Dense(
self.config['dec_hidden_dim'],
self.config['output_dim']
if self.config['deep_out']
else self.config['dec_voc_size'],
activation='linear',
name="{}_hidden_readout".format(self.prefix)
)
# 2. previous word readout
self.prev_word_readout = None
if self.config['bigram_predict']:
self.prev_word_readout = Dense(
dec_embedd_dim,
self.config['output_dim']
if self.config['deep_out']
else self.config['dec_voc_size'],
activation='linear',
name="{}_prev_word_readout".format(self.prefix),
learn_bias=False
)
# 3. context readout
self.context_readout = None
if self.config['context_predict']:
if not self.config['leaky_predict']:
self.context_readout = Dense(
self.config['dec_contxt_dim'],
self.config['output_dim']
if self.config['deep_out']
else self.config['dec_voc_size'],
activation='linear',
name="{}_context_readout".format(self.prefix),
learn_bias=False
)
else:
assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']
self.context_readout = self.hidden_readout
# option: deep output (maxout)
if self.config['deep_out']:
self.activ = Activation(config['deep_out_activ'])
# self.dropout = Dropout(rng=self.rng, p=config['dropout'])
self.output_nonlinear = [self.activ] # , self.dropout]
self.output = Dense(
self.config['output_dim'] / 2
if config['deep_out_activ'] == 'maxout2'
else self.config['output_dim'],
self.config['dec_voc_size'],
activation='softmax',
name="{}_output".format(self.prefix),
learn_bias=False
)
else:
self.output_nonlinear = []
self.output = Activation('softmax')
# registration:
self._add(self.hidden_readout)
if not self.config['leaky_predict']:
self._add(self.context_readout)
self._add(self.prev_word_readout)
self._add(self.output)
if self.config['deep_out']:
self._add(self.activ)
# self._add(self.dropout)
logger.info("create decoder ok.")
@staticmethod
def _grab_prob(probs, X, block_unk=False):
assert probs.ndim == 3
batch_size = probs.shape[0]
max_len = probs.shape[1]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
"""
Build the decoder for evaluation
"""
def prepare_xy(self, target):
# Word embedding
Y, Y_mask = self.Embed(target, True) # (nb_samples, max_len, embedding_dim)
if self.config['use_input']:
X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)
else:
X = 0 * Y
# option ## drop words.
X_mask = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)
Count = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)
return X, X_mask, Y, Y_mask, Count
def build_decoder(self, target, context=None,
return_count=False,
train=True):
"""
Build the Decoder Computational Graph
For training/testing
"""
X, X_mask, Y, Y_mask, Count = self.prepare_xy(target)
# input drop-out if any.
if self.dropout > 0:
X = self.D(X, train=train)
# Initial state of RNN
Init_h = self.Initializer(context)
if not self.highway:
X_out = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)
# Readout
readout = self.hidden_readout(X_out)
if self.dropout > 0:
readout = self.D(readout, train=train)
if self.config['context_predict']:
readout += self.context_readout(context).dimshuffle(0, 'x', 1)
else:
X = X.dimshuffle((1, 0, 2))
X_mask = X_mask.dimshuffle((1, 0))
def _recurrence(x, x_mask, prev_h, c):
# compute the highway gate for context vector.
xx = dot(c, self.C_x, self.b_x) + dot(prev_h, self.H_x) # highway gate.
xx = self.sigmoid(xx)
cy = xx * c # the path without using RNN
x_out = self.RNN(x, mask=x_mask, C=c, init_h=prev_h, one_step=True)
hx = (1 - xx) * x_out
return x_out, hx, cy
outputs, _ = theano.scan(
_recurrence,
sequences=[X, X_mask],
outputs_info=[Init_h, None, None],
non_sequences=[context]
)
# hidden readout + context readout
readout = self.hidden_readout( outputs[1].dimshuffle((1, 0, 2)))
if self.dropout > 0:
readout = self.D(readout, train=train)
readout += self.context_readout(outputs[2].dimshuffle((1, 0, 2)))
# return to normal size.
X = X.dimshuffle((1, 0, 2))
X_mask = X_mask.dimshuffle((1, 0))
if self.config['bigram_predict']:
readout += self.prev_word_readout(X)
for l in self.output_nonlinear:
readout = l(readout)
prob_dist = self.output(readout) # (nb_samples, max_len, vocab_size)
# log_old = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)
log_prob = T.sum(T.log(self._grab_prob(prob_dist, target) + err) * X_mask, axis=1)
log_ppl = log_prob / Count
if return_count:
return log_prob, Count
else:
return log_prob, log_ppl
"""
Sample one step
"""
def _step_sample(self, prev_word, prev_stat, context):
# word embedding (note that for the first word, embedding should be all zero)
if self.config['use_input']:
X = T.switch(
prev_word[:, None] < 0,
alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),
self.Embed(prev_word)
)
else:
X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])
if self.dropout > 0:
X = self.D(X, train=False)
# apply one step of RNN
if not self.highway:
X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
next_stat = X_proj
# compute the readout probability distribution and sample it
# here the readout is a matrix, different from the learner.
readout = self.hidden_readout(next_stat)
if self.dropout > 0:
readout = self.D(readout, train=False)
if self.config['context_predict']:
readout += self.context_readout(context)
else:
xx = dot(context, self.C_x, self.b_x) + dot(prev_stat, self.H_x) # highway gate.
xx = self.sigmoid(xx)
X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
next_stat = X_proj
readout = self.hidden_readout((1 - xx) * X_proj)
if self.dropout > 0:
readout = self.D(readout, train=False)
readout += self.context_readout(xx * context)
if self.config['bigram_predict']:
readout += self.prev_word_readout(X)
for l in self.output_nonlinear:
readout = l(readout)
next_prob = self.output(readout)
next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)
return next_prob, next_sample, next_stat
"""
Build the sampler for sampling/greedy search/beam search
"""
def build_sampler(self):
"""
Build a sampler which only steps once.
Typically it only works for one word a time?
"""
logger.info("build sampler ...")
if self.config['sample_stoch'] and self.config['sample_argmax']:
logger.info("use argmax search!")
elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
logger.info("use stochastic sampling!")
elif self.config['sample_beam'] > 1:
logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))
# initial state of our Decoder.
context = T.matrix() # theano variable.
init_h = self.Initializer(context)
logger.info('compile the function: get_init_state')
self.get_init_state \
= theano.function([context], init_h, name='get_init_state')
logger.info('done.')
# word sampler: 1 x 1
prev_word = T.vector('prev_word', dtype='int64')
prev_stat = T.matrix('prev_state', dtype='float32')
next_prob, next_sample, next_stat \
= self._step_sample(prev_word, prev_stat, context)
# next word probability
logger.info('compile the function: sample_next')
inputs = [prev_word, prev_stat, context]
outputs = [next_prob, next_sample, next_stat]
self.sample_next = theano.function(inputs, outputs, name='sample_next')
logger.info('done')
pass
"""
Build a Stochastic Sampler which can use SCAN to work on GPU.
However it cannot be used in Beam-search.
"""
def build_stochastic_sampler(self):
context = T.matrix()
init_h = self.Initializer(context)
logger.info('compile the function: sample')
pass
"""
Generate samples, either with stochastic sampling or beam-search!
"""
def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):
# beam size
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling!!'
# fix length cannot use beam search
# if fixlen:
# assert k == 1
# prepare for searching
sample = []
score = []
if stochastic:
score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype(theano.config.floatX)
hyp_states = []
# get initial state of decoder RNN with context
next_state = self.get_init_state(context)
next_word = -1 * np.ones((1,)).astype('int64') # indicator for the first target word (bos target)
# Start searching!
for ii in xrange(maxlen):
# print next_word
ctx = np.tile(context, [live_k, 1])
next_prob, next_word, next_state \
= self.sample_next(next_word, next_state, ctx) # wtf.
if stochastic:
# using stochastic sampling (or greedy sampling.)
if argmax:
nw = next_prob[0].argmax()
next_word[0] = nw
else:
nw = next_word[0]
sample.append(nw)
score += next_prob[0, nw]
if (not fixlen) and (nw == 0): # sample reached the end
break
else:
# using beam-search
# we can only computed in a flatten way!
cand_scores = hyp_scores[:, None] - np.log(next_prob)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
# fetch the best results.
voc_size = next_prob.shape[1]
trans_index = ranks_flat / voc_size
word_index = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
# get the new hyp samples
new_hyp_samples = []
new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if (new_hyp_states[idx][-1] == 0) and (not fixlen):
sample.append(new_hyp_samples[idx])
score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_word = np.array([w[-1] for w in hyp_samples])
next_state = np.array(hyp_states)
pass
pass
# end.
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
score.append(hyp_scores[idx])
return sample, score
class DecoderAtt(Decoder):
"""
Recurrent Neural Network-based Decoder [for CopyNet-b Only]
with Attention Mechanism
"""
def __init__(self,
config, rng, prefix='dec',
mode='RNN', embed=None,
copynet=False, identity=False):
super(DecoderAtt, self).__init__(
config, rng, prefix,
mode, embed, False)
self.init = initializations.get('glorot_uniform')
self.copynet = copynet
self.identity = identity
# attention reader
self.attention_reader = Attention(
self.config['dec_hidden_dim'],
self.config['dec_contxt_dim'],
1000,
name='source_attention',
coverage=self.config['coverage']
)
self._add(self.attention_reader)
# if use copynet
if self.copynet:
if not self.identity:
self.Is = Dense(
self.config['dec_contxt_dim'],
self.config['dec_embedd_dim'],
name='in-trans'
)
else:
assert self.config['dec_contxt_dim'] == self.config['dec_embedd_dim']
self.Is = Identity(name='ini')
self.Os = Dense(
self.config['dec_readout_dim']
if not self.config['location_embed']
else self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],
self.config['dec_contxt_dim'],
name='out-trans'
)
if self.config['copygate']:
self.Gs = Dense(
self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],
1,
name='copy-gate',
activation='linear',
learn_bias=True,
negative_bias=True
)
self._add(self.Gs)
if self.config['location_embed']:
self._add(self.Is)
self._add(self.Os)
logger.info('adjust decoder ok.')
"""
Build the decoder for evaluation
"""
def prepare_xy(self, target, cc_matrix):
# target: (nb_samples, index_seq)
# cc_matrix: (nb_samples, maxlen_t, maxlen_s)
# context: (nb_samples)
Y, Y_mask = self.Embed(target, True) # (nb_samples, maxlen_t, embedding_dim)
X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)
# LL = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, cc_matrix.shape[2]),
# cc_matrix[:, :-1, :]], axis=1)
LL = cc_matrix
XL_mask = T.cast(T.gt(T.sum(LL, axis=2), 0), dtype='float32')
if not self.config['use_input']:
X *= 0
X_mask = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)
Count = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)
return X, X_mask, LL, XL_mask, Y_mask, Count
"""
The most different part. Be caution !!
Very different from traditional RNN search.
"""
def build_decoder(self,
target,
cc_matrix,
context,
c_mask,
return_count=False,
train=True):
"""
Build the Computational Graph ::> Context is essential
"""
assert c_mask is not None, 'context must be supplied for this decoder.'
assert context.ndim == 3, 'context must have 3 dimentions.'
# context: (nb_samples, max_len, contxt_dim)
context_A = self.Is(context) # (nb_samples, max_len, embed_dim)
X, X_mask, LL, XL_mask, Y_mask, Count = self.prepare_xy(target, cc_matrix)
# input drop-out if any.
if self.dropout > 0:
X = self.D(X, train=train)
# Initial state of RNN
Init_h = self.Initializer(context[:, 0, :]) # default order ->
Init_a = T.zeros((context.shape[0], context.shape[1]), dtype='float32')
coverage = T.zeros((context.shape[0], context.shape[1]), dtype='float32')
X = X.dimshuffle((1, 0, 2))
X_mask = X_mask.dimshuffle((1, 0))
LL = LL.dimshuffle((1, 0, 2)) # (maxlen_t, nb_samples, maxlen_s)
XL_mask = XL_mask.dimshuffle((1, 0)) # (maxlen_t, nb_samples)
def _recurrence(x, x_mask, ll, xl_mask, prev_h, prev_a, cov, cc, cm, ca):
"""
x: (nb_samples, embed_dims)
x_mask: (nb_samples, )
ll: (nb_samples, maxlen_s)
xl_mask:(nb_samples, )
-----------------------------------------
prev_h: (nb_samples, hidden_dims)
prev_a: (nb_samples, maxlen_s)
cov: (nb_samples, maxlen_s) *** coverage ***
-----------------------------------------
cc: (nb_samples, maxlen_s, cxt_dim)
cm: (nb_samples, maxlen_s)
ca: (nb_samples, maxlen_s, ebd_dim)
"""
# compute the attention and get the context vector
prob = self.attention_reader(prev_h, cc, Smask=cm, Cov=cov)
ncov = cov + prob
cxt = T.sum(cc * prob[:, :, None], axis=1)
# compute input word embedding (mixed)
x_in = T.concatenate([x, T.sum(ca * prev_a[:, :, None], axis=1)], axis=-1)
# compute the current hidden states of the RNN.
x_out = self.RNN(x_in, mask=x_mask, C=cxt, init_h=prev_h, one_step=True)
# compute the current readout vector.
r_in = [x_out]
if self.config['context_predict']:
r_in += [cxt]
if self.config['bigram_predict']:
r_in += [x_in]
# copynet decoding
r_in = T.concatenate(r_in, axis=-1)
r_out = self.hidden_readout(x_out) # (nb_samples, voc_size)
if self.config['context_predict']:
r_out += self.context_readout(cxt)
if self.config['bigram_predict']:
r_out += self.prev_word_readout(x_in)
for l in self.output_nonlinear:
r_out = l(r_out)
key = self.Os(r_in) # (nb_samples, cxt_dim) :: key
Eng = T.sum(key[:, None, :] * cc, axis=-1)
# # gating
if self.config['copygate']:
gt = self.sigmoid(self.Gs(r_in)) # (nb_samples, 1)
r_out += T.log(gt.flatten()[:, None])
Eng += T.log(1 - gt.flatten()[:, None])
# r_out *= gt.flatten()[:, None]
# Eng *= 1 - gt.flatten()[:, None]
EngSum = logSumExp(Eng, axis=-1, mask=cm, c=r_out)
next_p = T.concatenate([T.exp(r_out - EngSum), T.exp(Eng - EngSum) * cm], axis=-1)
next_c = next_p[:, self.config['dec_voc_size']:] * ll # (nb_samples, maxlen_s)
next_b = next_p[:, :self.config['dec_voc_size']]
sum_a = T.sum(next_c, axis=1, keepdims=True) # (nb_samples,)
next_a = (next_c / (sum_a + err)) * xl_mask[:, None] # numerically consideration
return x_out, next_a, ncov, sum_a, next_b
outputs, _ = theano.scan(
_recurrence,
sequences=[X, X_mask, LL, XL_mask],
outputs_info=[Init_h, Init_a, coverage, None, None],
non_sequences=[context, c_mask, context_A]
)
X_out, source_prob, coverages, source_sum, prob_dist = [z.dimshuffle((1, 0, 2)) for z in outputs]
X = X.dimshuffle((1, 0, 2))
X_mask = X_mask.dimshuffle((1, 0))
XL_mask = XL_mask.dimshuffle((1, 0))
# unk masking
U_mask = T.ones_like(target) * (1 - T.eq(target, 1))
U_mask += (1 - U_mask) * (1 - XL_mask)
# The most different part is here !!
log_prob = T.sum(T.log(
self._grab_prob(prob_dist, target) * U_mask +
source_sum.sum(axis=-1) + err
) * X_mask, axis=1)
log_ppl = log_prob / (Count + err)
if return_count:
return log_prob, Count
else:
return log_prob, log_ppl
"""
Sample one step
"""
def _step_sample(self,
prev_word,
prev_stat,
prev_loc,
prev_cov,
context,
c_mask,
context_A):
assert c_mask is not None, 'we need the source mask.'
# word embedding (note that for the first word, embedding should be all zero)
X = T.switch(
prev_word[:, None] < 0,
alloc_zeros_matrix(prev_word.shape[0], 2 * self.config['dec_embedd_dim']),
T.concatenate([self.Embed(prev_word),
T.sum(context_A * prev_loc[:, :, None], axis=1)
], axis=-1)
)
if self.dropout > 0:
X = self.D(X, train=False)
# apply one step of RNN
Probs = self.attention_reader(prev_stat, context, c_mask, Cov=prev_cov)
ncov = prev_cov + Probs
cxt = T.sum(context * Probs[:, :, None], axis=1)
X_proj, zz, rr = self.RNN(X, C=cxt,
init_h=prev_stat,
one_step=True,
return_gates=True)
next_stat = X_proj
# compute the readout probability distribution and sample it
# here the readout is a matrix, different from the learner.
readin = [next_stat]
if self.config['context_predict']:
readin += [cxt]
if self.config['bigram_predict']:
readin += [X]
readin = T.concatenate(readin, axis=-1)
# if gating
# if self.config['copygate']:
# gt = self.sigmoid(self.Gs(readin)) # (nb_samples, dim)
# readin *= 1 - gt
# readout = self.hidden_readout(next_stat * gt[:, :self.config['dec_hidden_dim']])
# if self.config['context_predict']:
# readout += self.context_readout(
# cxt * gt[:, self.config['dec_hidden_dim']:
# self.config['dec_hidden_dim'] + self.config['dec_contxt_dim']])
# if self.config['bigram_predict']:
# readout += self.prev_word_readout(
# X * gt[:, -2 * self.config['dec_embedd_dim']:])
# else:
readout = self.hidden_readout(next_stat)
if self.config['context_predict']:
readout += self.context_readout(cxt)
if self.config['bigram_predict']:
readout += self.prev_word_readout(X)
for l in self.output_nonlinear:
readout = l(readout)
key = self.Os(readin)
Eng = T.sum(key[:, None, :] * context, axis=-1)
# # gating
if self.config['copygate']:
gt = self.sigmoid(self.Gs(readin)) # (nb_samples, 1)
readout += T.log(gt.flatten()[:, None])
Eng += T.log(1 - gt.flatten()[:, None])
EngSum = logSumExp(Eng, axis=-1, mask=c_mask, c=readout)
next_prob = T.concatenate([T.exp(readout - EngSum), T.exp(Eng - EngSum) * c_mask], axis=-1)
next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)
return next_prob, next_sample, next_stat, ncov, next_stat
def build_sampler(self):
"""
Build a sampler which only steps once.
Typically it only works for one word a time?
"""
logger.info("build sampler ...")
if self.config['sample_stoch'] and self.config['sample_argmax']:
logger.info("use argmax search!")
elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
logger.info("use stochastic sampling!")
elif self.config['sample_beam'] > 1:
logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))
# initial state of our Decoder.
context = T.tensor3() # theano variable.
c_mask = T.matrix() # mask of the input sentence.
context_A = self.Is(context)
init_h = self.Initializer(context[:, 0, :])
init_a = T.zeros((context.shape[0], context.shape[1]))
cov = T.zeros((context.shape[0], context.shape[1]))
logger.info('compile the function: get_init_state')
self.get_init_state \
= theano.function([context], [init_h, init_a, cov], name='get_init_state')
logger.info('done.')
# word sampler: 1 x 1
prev_word = T.vector('prev_word', dtype='int64')
prev_stat = T.matrix('prev_state', dtype='float32')
prev_a = T.matrix('prev_a', dtype='float32')
prev_cov = T.matrix('prev_cov', dtype='float32')
next_prob, next_sample, next_stat, ncov, alpha \
= self._step_sample(prev_word,
prev_stat,
prev_a,
prev_cov,
context,
c_mask,
context_A)
# next word probability
logger.info('compile the function: sample_next')
inputs = [prev_word, prev_stat, prev_a, prev_cov, context, c_mask]
outputs = [next_prob, next_sample, next_stat, ncov, alpha]
self.sample_next = theano.function(inputs, outputs, name='sample_next')
logger.info('done')
"""
Generate samples, either with stochastic sampling or beam-search!
[:-:] I have to think over how to modify the BEAM-Search!!
"""
def get_sample(self,
context,
c_mask,
source,
k=1, maxlen=30, stochastic=True,
argmax=False, fixlen=False,
return_attend=False
):
# beam size
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling!!'
# fix length cannot use beam search
# if fixlen:
# assert k == 1
# prepare for searching
Lmax = self.config['dec_voc_size']
sample = []
ppp = []
attend = []
score = []
if stochastic:
score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype(theano.config.floatX)
hyp_ppps = [[]] * live_k
hyp_attends = [[]] * live_k
# get initial state of decoder RNN with context
next_state, ss_prob, coverage = self.get_init_state(context)
next_word = -1 * np.ones((1,)).astype('int64') # indicator for the first target word (bos target)
# Start searching!
for ii in xrange(maxlen):
# print next_word
ctx = np.tile(context, [live_k, 1, 1])
cmk = np.tile(c_mask, [live_k, 1])
sss = np.tile(source, [live_k, 1])
# # process word
def process_():
# caution for index_0: UNK
ll = np.zeros((sss.shape[0], sss.shape[1]), dtype='float32')
for i in xrange(next_word.shape[0]):
if next_word[i] >= Lmax:
ll[i][next_word[i] - Lmax] = 1.
next_word[i] = sss[i][next_word[i] - Lmax]
else:
ll[i] = (sss[i] == next_word[i, None])
# for k in xrange(sss.shape[1]):
# ll[i][k] = (sss[i][k] == next_word[i])
return ll, next_word
# print next_word
ll, next_word = process_()
ll_mask = (np.sum(ll, axis=1, keepdims=True) > 0)
next_a = ss_prob * ll
next_a = next_a / (err + np.sum(next_a, axis=1, keepdims=True)) * ll_mask
next_prob0, next_word, next_state, coverage, alpha \
= self.sample_next(next_word, next_state, next_a, coverage, ctx, cmk)
# print next_prob0.shape[1]
if not self.config['decode_unk']:
next_prob0[:, 1] = 0.
next_prob0 /= np.sum(next_prob0, axis=1, keepdims=True)
def merge_():
# merge the probabilities
temple_prob = copy.copy(next_prob0)
source_prob = copy.copy(next_prob0[:, Lmax:])
for i in xrange(next_prob0.shape[0]):
for j in xrange(sss.shape[1]):
if (sss[i, j] < Lmax) and (sss[i, j] != 1):
temple_prob[i, sss[i, j]] += source_prob[i, j]
temple_prob[i, Lmax + j] = 0.
return temple_prob, source_prob
next_prob, ss_prob = merge_()
next_prob0[:, Lmax:] = 0.
# print '0', next_prob0[:, 3165]
# print '01', next_prob[:, 3165]
# # print next_prob[0, Lmax:]
# print ss_prob[0, :]
if stochastic:
# using stochastic sampling (or greedy sampling.)
if argmax:
nw = next_prob[0].argmax()
next_word[0] = nw
else:
nw = self.rng.multinomial(pvals=next_prob).argmax(1)
sample.append(nw)
score += next_prob[0, nw]
if (not fixlen) and (nw == 0): # sample reached the end
break
else:
# using beam-search
# we can only computed in a flatten way!
cand_scores = hyp_scores[:, None] - np.log(next_prob)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
# fetch the best results.
voc_size = next_prob.shape[1]
trans_index = ranks_flat / voc_size
word_index = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
# get the new hyp samples
new_hyp_samples = []
new_hyp_ppps = []
new_hyp_attends = []
new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)
new_hyp_states = []
new_hyp_coverage = []
new_hyp_ss = []
for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
new_hyp_coverage.append(copy.copy(coverage[ti]))
new_hyp_ss.append(copy.copy(ss_prob[ti]))
if not return_attend:
new_hyp_ppps.append(hyp_ppps[ti] + [[next_prob0[ti][wi], next_prob[ti][wi]]])
else:
new_hyp_ppps.append(hyp_ppps[ti] + [(ss_prob[ti], alpha[ti])])
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
hyp_coverage = []
hyp_ppps = []
hyp_ss = []
for idx in xrange(len(new_hyp_samples)):
if (new_hyp_states[idx][-1] == 0) and (not fixlen):
sample.append(new_hyp_samples[idx])
ppp.append(new_hyp_ppps[idx])
score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_ppps.append(new_hyp_ppps[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_coverage.append(new_hyp_coverage[idx])
hyp_ss.append(new_hyp_ss[idx])
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_word = np.array([w[-1] for w in hyp_samples])
next_state = np.array(hyp_states)
coverage = np.array(hyp_coverage)
ss_prob = np.array(hyp_ss)
pass
# end.
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
ppp.append(hyp_ppps[idx])
score.append(hyp_scores[idx])
return sample, score, ppp
class FnnDecoder(Model):
def __init__(self, config, rng, prefix='fnndec'):
"""
mode = RNN: use a RNN Decoder
"""
super(FnnDecoder, self).__init__()
self.config = config
self.rng = rng
self.prefix = prefix
self.name = prefix
"""
Create Dense Predictor.
"""
self.Tr = Dense(self.config['dec_contxt_dim'],
self.config['dec_hidden_dim'],
activation='maxout2',
name='{}_Tr'.format(prefix))
self._add(self.Tr)
self.Pr = Dense(self.config['dec_hidden_dim'] / 2,
self.config['dec_voc_size'],
activation='softmax',
name='{}_Pr'.format(prefix))
self._add(self.Pr)
logger.info("FF decoder ok.")
@staticmethod
def _grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[0]
max_len = probs.shape[1]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
def build_decoder(self, target, context):
"""
Build the Decoder Computational Graph
"""
prob_dist = self.Pr(self.Tr(context[:, None, :]))
log_prob = T.sum(T.log(self._grab_prob(prob_dist, target) + err), axis=1)
return log_prob
def build_sampler(self):
context = T.matrix()
prob_dist = self.Pr(self.Tr(context))
next_sample = self.rng.multinomial(pvals=prob_dist).argmax(1)
self.sample_next = theano.function([context], [prob_dist, next_sample], name='sample_next_{}'.format(self.prefix))
logger.info('done')
def get_sample(self, context, argmax=True):
prob, sample = self.sample_next(context)
if argmax:
return prob[0].argmax()
else:
return sample[0]
########################################################################################################################
# Encoder-Decoder Models ::::
#
class RNNLM(Model):
"""
RNN-LM, with context vector = 0.
It is very similar with the implementation of VAE.
"""
def __init__(self,
config, n_rng, rng,
mode='Evaluation'):
super(RNNLM, self).__init__()
self.config = config
self.n_rng = n_rng # numpy random stream
self.rng = rng # Theano random stream
self.mode = mode
self.name = 'rnnlm'
def build_(self):
logger.info("build the RNN-decoder")
self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)
# registration:
self._add(self.decoder)
# objectives and optimizers
self.optimizer = optimizers.get('adadelta')
# saved the initial memories
if self.config['mode'] == 'NTM':
self.memory = initializations.get('glorot_uniform')(
(self.config['dec_memory_dim'], self.config['dec_memory_wdth']))
logger.info("create the RECURRENT language model. ok")
def compile_(self, mode='train', contrastive=False):
# compile the computational graph.
# INFO: the parameters.
# mode: 'train'/ 'display'/ 'policy' / 'all'
ps = 'params: {\n'
for p in self.params:
ps += '{0}: {1}\n'.format(p.name, p.eval().shape)
ps += '}.'
logger.info(ps)
param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])
logger.info("total number of the parameters of the model: {}".format(param_num))
if mode == 'train' or mode == 'all':
if not contrastive:
self.compile_train()
else:
self.compile_train_CE()
if mode == 'display' or mode == 'all':
self.compile_sample()
if mode == 'inference' or mode == 'all':
self.compile_inference()
def compile_train(self):
# questions (theano variables)
inputs = T.imatrix() # padded input word sequence (for training)
if self.config['mode'] == 'RNN':
context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])
elif self.config['mode'] == 'NTM':
context = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)
else:
raise NotImplementedError
# decoding.
target = inputs
logPxz, logPPL = self.decoder.build_decoder(target, context)
# reconstruction loss
loss_rec = T.mean(-logPxz)
loss_ppl = T.exp(T.mean(-logPPL))
L1 = T.sum([T.sum(abs(w)) for w in self.params])
loss = loss_rec
updates = self.optimizer.get_updates(self.params, loss)
logger.info("compiling the compuational graph ::training function::")
train_inputs = [inputs]
self.train_ = theano.function(train_inputs,
[loss_rec, loss_ppl],
updates=updates,
name='train_fun')
logger.info("pre-training functions compile done.")
# add monitoring:
self.monitor['context'] = context
self._monitoring()
# compiling monitoring
self.compile_monitoring(train_inputs)
@abstractmethod
def compile_train_CE(self):
pass
def compile_sample(self):
# context vectors (as)
self.decoder.build_sampler()
logger.info("display functions compile done.")
@abstractmethod
def compile_inference(self):
pass
def default_context(self):
if self.config['mode'] == 'RNN':
return np.zeros(shape=(1, self.config['dec_contxt_dim']), dtype=theano.config.floatX)
elif self.config['mode'] == 'NTM':
memory = self.memory.get_value()
memory = memory.reshape((1, memory.shape[0], memory.shape[1]))
return memory
def generate_(self, context=None, max_len=None, mode='display'):
"""
:param action: action vector to guide the question.
If None, use a Gaussian to simulate the action.
:return: question sentence in natural language.
"""
# assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'
# assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'
if context is None:
context = self.default_context()
args = dict(k=self.config['sample_beam'],
maxlen=self.config['max_len'] if not max_len else max_len,
stochastic=self.config['sample_stoch'] if mode == 'display' else None,
argmax=self.config['sample_argmax'] if mode == 'display' else None)
sample, score = self.decoder.get_sample(context, **args)
if not args['stochastic']:
score = score / np.array([len(s) for s in sample])
sample = sample[score.argmin()]
score = score.min()
else:
score /= float(len(sample))
return sample, np.exp(score)
class AutoEncoder(RNNLM):
"""
Regular Auto-Encoder: RNN Encoder/Decoder
"""
def __init__(self,
config, n_rng, rng,
mode='Evaluation'):
super(RNNLM, self).__init__()
self.config = config
self.n_rng = n_rng # numpy random stream
self.rng = rng # Theano random stream
self.mode = mode
self.name = 'vae'
def build_(self):
logger.info("build the RNN auto-encoder")
self.encoder = Encoder(self.config, self.rng, prefix='enc')
if self.config['shared_embed']:
self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed)
else:
self.decoder = Decoder(self.config, self.rng, prefix='dec')
"""
Build the Transformation
"""
if self.config['nonlinear_A']:
self.action_trans = Dense(
self.config['enc_hidden_dim'],
self.config['action_dim'],
activation='tanh',
name='action_transform'
)
else:
assert self.config['enc_hidden_dim'] == self.config['action_dim'], \
'hidden dimension must match action dimension'
self.action_trans = Identity(name='action_transform')
if self.config['nonlinear_B']:
self.context_trans = Dense(
self.config['action_dim'],
self.config['dec_contxt_dim'],
activation='tanh',
name='context_transform'
)
else:
assert self.config['dec_contxt_dim'] == self.config['action_dim'], \
'action dimension must match context dimension'
self.context_trans = Identity(name='context_transform')
# registration
self._add(self.action_trans)
self._add(self.context_trans)
self._add(self.encoder)
self._add(self.decoder)
# objectives and optimizers
self.optimizer = optimizers.get(self.config['optimizer'], kwargs={'lr': self.config['lr']})
logger.info("create Helmholtz RECURRENT neural network. ok")
def compile_train(self, mode='train'):
# questions (theano variables)
inputs = T.imatrix() # padded input word sequence (for training)
context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])
assert context.ndim == 2
# decoding.
target = inputs
logPxz, logPPL = self.decoder.build_decoder(target, context)
# reconstruction loss
loss_rec = T.mean(-logPxz)
loss_ppl = T.exp(T.mean(-logPPL))
L1 = T.sum([T.sum(abs(w)) for w in self.params])
loss = loss_rec
updates = self.optimizer.get_updates(self.params, loss)
logger.info("compiling the compuational graph ::training function::")
train_inputs = [inputs]
self.train_ = theano.function(train_inputs,
[loss_rec, loss_ppl],
updates=updates,
name='train_fun')
logger.info("pre-training functions compile done.")
if mode == 'display' or mode == 'all':
"""
build the sampler function here <:::>
"""
# context vectors (as)
self.decoder.build_sampler()
logger.info("display functions compile done.")
# add monitoring:
self._monitoring()
# compiling monitoring
self.compile_monitoring(train_inputs)
class NRM(Model):
"""
Neural Responding Machine
A Encoder-Decoder based responding model.
"""
def __init__(self,
config, n_rng, rng,
mode='Evaluation',
use_attention=False,
copynet=False,
identity=False):
super(NRM, self).__init__()
self.config = config
self.n_rng = n_rng # numpy random stream
self.rng = rng # Theano random stream
self.mode = mode
self.name = 'nrm'
self.attend = use_attention
self.copynet = copynet
self.identity = identity
def build_(self, lr=None, iterations=None):
logger.info("build the Neural Responding Machine")
# encoder-decoder:: <<==>>
self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)
if not self.attend:
self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)
else:
self.decoder = DecoderAtt(self.config, self.rng, prefix='dec', mode=self.mode,
copynet=self.copynet, identity=self.identity)
self._add(self.encoder)
self._add(self.decoder)
# objectives and optimizers
if self.config['optimizer'] == 'adam':
self.optimizer = optimizers.get(self.config['optimizer'],
kwargs=dict(rng=self.rng,
save=False))
else:
self.optimizer = optimizers.get(self.config['optimizer'])
if lr is not None:
self.optimizer.lr.set_value(floatX(lr))
self.optimizer.iterations.set_value(floatX(iterations))
logger.info("build ok.")
def compile_(self, mode='all', contrastive=False):
# compile the computational graph.
# INFO: the parameters.
# mode: 'train'/ 'display'/ 'policy' / 'all'
ps = 'params: {\n'
for p in self.params:
ps += '{0}: {1}\n'.format(p.name, p.eval().shape)
ps += '}.'
logger.info(ps)
param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])
logger.info("total number of the parameters of the model: {}".format(param_num))
if mode == 'train' or mode == 'all':
self.compile_train()
if mode == 'display' or mode == 'all':
self.compile_sample()
if mode == 'inference' or mode == 'all':
self.compile_inference()
def compile_train(self):
# questions (theano variables)
inputs = T.imatrix() # padded input word sequence (for training)
target = T.imatrix() # padded target word sequence (for training)
cc_matrix = T.tensor3()
# encoding & decoding
code, _, c_mask, _ = self.encoder.build_encoder(inputs, None, return_sequence=True, return_embed=True)
# code: (nb_samples, max_len, contxt_dim)
if 'explicit_loc' in self.config:
if self.config['explicit_loc']:
print 'use explicit location!!'
max_len = code.shape[1]
expLoc = T.eye(max_len, self.config['encode_max_len'], dtype='float32')[None, :, :]
expLoc = T.repeat(expLoc, code.shape[0], axis=0)
code = T.concatenate([code, expLoc], axis=2)
logPxz, logPPL = self.decoder.build_decoder(target, cc_matrix,
code, c_mask)
# responding loss
loss_rec = T.mean(-logPxz)
loss_ppl = T.exp(T.mean(-logPPL))
loss = loss_rec
updates = self.optimizer.get_updates(self.params, loss)
logger.info("compiling the compuational graph ::training function::")
train_inputs = [inputs, target, cc_matrix]
self.train_ = theano.function(train_inputs,
[loss_rec, loss_ppl],
updates=updates,
name='train_fun')
self.train_guard = theano.function(train_inputs,
[loss_rec, loss_ppl],
updates=updates,
name='train_fun',
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))
logger.info("training functions compile done.")
# # add monitoring:
# self.monitor['context'] = context
# self._monitoring()
#
# # compiling monitoring
# self.compile_monitoring(train_inputs)
def compile_sample(self):
if not self.attend:
self.encoder.compile_encoder(with_context=False)
else:
self.encoder.compile_encoder(with_context=False, return_sequence=True, return_embed=True)
self.decoder.build_sampler()
logger.info("sampling functions compile done.")
def compile_inference(self):
pass
def generate_(self, inputs, mode='display', return_attend=False, return_all=False):
# assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'
# assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'
args = dict(k=self.config['sample_beam'],
maxlen=self.config['max_len'],
stochastic=self.config['sample_stoch'] if mode == 'display' else None,
argmax=self.config['sample_argmax'] if mode == 'display' else None,
return_attend=return_attend)
context, _, c_mask, _, Z, R = self.encoder.gtenc(inputs)
# c_mask[0, 3] = c_mask[0, 3] * 0
# L = context.shape[1]
# izz = np.concatenate([np.arange(3), np.asarray([1,2]), np.arange(3, L)])
# context = context[:, izz, :]
# c_mask = c_mask[:, izz]
# inputs = inputs[:, izz]
# context, _, c_mask, _ = self.encoder.encode(inputs)
# import pylab as plt
# # visualize_(plt.subplots(), Z[0][:, 300:], normal=False)
# visualize_(plt.subplots(), context[0], normal=False)
if 'explicit_loc' in self.config:
if self.config['explicit_loc']:
max_len = context.shape[1]
expLoc = np.eye(max_len, self.config['encode_max_len'], dtype='float32')[None, :, :]
expLoc = np.repeat(expLoc, context.shape[0], axis=0)
context = np.concatenate([context, expLoc], axis=2)
sample, score, ppp = self.decoder.get_sample(context, c_mask, inputs, **args)
if return_all:
return sample, score, ppp
if not args['stochastic']:
score = score / np.array([len(s) for s in sample])
idz = score.argmin()
sample = sample[idz]
score = score.min()
ppp = ppp[idz]
else:
score /= float(len(sample))
return sample, np.exp(score), ppp
def evaluate_(self, inputs, outputs, idx2word, inputs_unk=None, encode=True):
def cut_zero_yes(sample, idx2word, ppp=None, Lmax=None):
if Lmax is None:
Lmax = self.config['dec_voc_size']
if ppp is None:
if 0 not in sample:
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample]
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample[:sample.index(0)]]
else:
if 0 not in sample:
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample, ppp)]
idz = sample.index(0)
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample[:idz], ppp[:idz])]
def cut_zero_no(sample, idx2word, ppp=None, Lmax=None):
if Lmax is None:
Lmax = self.config['dec_voc_size']
if ppp is None:
if 0 not in sample:
return ['{}'.format(idx2word[w])
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample]
return ['{}'.format(idx2word[w])
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample[:sample.index(0)]]
else:
if 0 not in sample:
return ['{0} ({1:1.1f})'.format(
idx2word[w], p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]], p)
for w, p in zip(sample, ppp)]
idz = sample.index(0)
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]], p)
for w, p in zip(sample[:idz], ppp[:idz])]
if inputs_unk is None:
result, _, ppp = self.generate_(inputs[None, :])
else:
result, _, ppp = self.generate_(inputs_unk[None, :])
if encode:
cut_zero = cut_zero_yes
else:
cut_zero = cut_zero_no
pp0, pp1 = [np.asarray(p) for p in zip(*ppp)]
pp = (pp1 - pp0) / pp1
# pp = (pp1 - pp0) / pp1
print len(ppp)
print '<Environment> [lr={0}][iter={1}]'.format(self.optimizer.lr.get_value(),
self.optimizer.iterations.get_value())
a = '[SOURCE]: {}\n'.format(' '.join(cut_zero(inputs.tolist(), idx2word, Lmax=len(idx2word))))
b = '[TARGET]: {}\n'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))
c = '[DECODE]: {}\n'.format(' '.join(cut_zero(result, idx2word)))
d = '[CpRate]: {}\n'.format(' '.join(cut_zero(result, idx2word, pp.tolist())))
e = '[CpRate]: {}\n'.format(' '.join(cut_zero(result, idx2word, result)))
print a
print '{0} -> {1}'.format(len(a.split()), len(b.split()))
if inputs_unk is not None:
k = '[_INPUT]: {}\n'.format(' '.join(cut_zero(inputs_unk.tolist(), idx2word, Lmax=len(idx2word))))
print k
a += k
print b
print c
print d
# print e
a += b + c + d
return a
def analyse_(self, inputs, outputs, idx2word, inputs_unk=None, return_attend=False, name=None, display=False):
def cut_zero(sample, idx2word, ppp=None, Lmax=None):
if Lmax is None:
Lmax = self.config['dec_voc_size']
if ppp is None:
if 0 not in sample:
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample]
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample[:sample.index(0)]]
else:
if 0 not in sample:
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample, ppp)]
idz = sample.index(0)
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample[:idz], ppp[:idz])]
if inputs_unk is None:
result, _, ppp = self.generate_(inputs[None, :],
return_attend=return_attend)
else:
result, _, ppp = self.generate_(inputs_unk[None, :],
return_attend=return_attend)
source = '{}'.format(' '.join(cut_zero(inputs.tolist(), idx2word, Lmax=len(idx2word))))
target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))
decode = '{}'.format(' '.join(cut_zero(result, idx2word)))
if display:
print source
print target
print decode
idz = result.index(0)
p1, p2 = [np.asarray(p) for p in zip(*ppp)]
print p1.shape
import pylab as plt
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
visualize_(plt.subplots(), 1 - p1[:idz, :].T, grid=True, name=name)
visualize_(plt.subplots(), 1 - p2[:idz, :].T, name=name)
# visualize_(plt.subplots(), 1 - np.mean(p2[:idz, :], axis=1, keepdims=True).T)
return target == decode
def analyse_cover(self, inputs, outputs, idx2word, inputs_unk=None, return_attend=False, name=None, display=False):
def cut_zero(sample, idx2word, ppp=None, Lmax=None):
if Lmax is None:
Lmax = self.config['dec_voc_size']
if ppp is None:
if 0 not in sample:
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample]
return ['{}'.format(idx2word[w].encode('utf-8'))
if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))
for w in sample[:sample.index(0)]]
else:
if 0 not in sample:
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample, ppp)]
idz = sample.index(0)
return ['{0} ({1:1.1f})'.format(
idx2word[w].encode('utf-8'), p)
if w < Lmax
else '{0} ({1:1.1f})'.format(
idx2word[inputs[w - Lmax]].encode('utf-8'), p)
for w, p in zip(sample[:idz], ppp[:idz])]
if inputs_unk is None:
results, _, ppp = self.generate_(inputs[None, :],
return_attend=return_attend,
return_all=True)
else:
results, _, ppp = self.generate_(inputs_unk[None, :],
return_attend=return_attend,
return_all=True)
source = '{}'.format(' '.join(cut_zero(inputs.tolist(), idx2word, Lmax=len(idx2word))))
target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))
# decode = '{}'.format(' '.join(cut_zero(result, idx2word)))
score = [target == '{}'.format(' '.join(cut_zero(result, idx2word))) for result in results]
return max(score)
|
MingyuanXie/CopyNet
|
emolga/models/covc_encdec.py
|
Python
|
mit
| 74,715
|
[
"Gaussian"
] |
090c43c0c3e09b796e739128bb4d3ed666824fa7da3ac06a9984d9d932f80f39
|
import os
import logging
import tool_shed.repository_types.util as rt_util
from galaxy.model.orm import and_
from galaxy.model.orm import or_
from galaxy.webapps.tool_shed import model
from tool_shed.util import hg_util
from tool_shed.util import metadata_util
from tool_shed.util import shed_util_common as suc
log = logging.getLogger( __name__ )
class Registry( object ):
def __init__( self, app ):
log.debug( "Loading the repository registry..." )
self.app = app
self.certified_level_one_clause_list = self.get_certified_level_one_clause_list()
# The following lists contain tuples like ( repository.name, repository.user.username, changeset_revision )
# where the changeset_revision entry is always the latest installable changeset_revision..
self.certified_level_one_repository_and_suite_tuples = []
self.certified_level_one_suite_tuples = []
# These category dictionaries contain entries where the key is the category and the value is the integer count
# of viewable repositories within that category.
self.certified_level_one_viewable_repositories_and_suites_by_category = {}
self.certified_level_one_viewable_suites_by_category = {}
self.certified_level_two_repository_and_suite_tuples = []
self.certified_level_two_suite_tuples = []
self.certified_level_two_viewable_repositories_and_suites_by_category = {}
self.certified_level_two_viewable_suites_by_category = {}
self.repository_and_suite_tuples = []
self.suite_tuples = []
self.viewable_repositories_and_suites_by_category = {}
self.viewable_suites_by_category = {}
self.viewable_valid_repositories_and_suites_by_category = {}
self.viewable_valid_suites_by_category = {}
self.load_viewable_repositories_and_suites_by_category()
self.load_repository_and_suite_tuples()
def add_category_entry( self, category ):
category_name = str( category.name )
if category_name not in self.viewable_repositories_and_suites_by_category:
self.viewable_repositories_and_suites_by_category[ category_name ] = 0
if category_name not in self.viewable_suites_by_category:
self.viewable_suites_by_category[ category_name ] = 0
if category_name not in self.viewable_valid_repositories_and_suites_by_category:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] = 0
if category_name not in self.viewable_valid_suites_by_category:
self.viewable_valid_suites_by_category[ category_name ] = 0
if category_name not in self.certified_level_one_viewable_repositories_and_suites_by_category:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] = 0
if category_name not in self.certified_level_one_viewable_suites_by_category:
self.certified_level_one_viewable_suites_by_category[ category_name ] = 0
def add_entry( self, repository ):
try:
if repository:
is_valid = self.is_valid( repository )
certified_level_one_tuple = self.get_certified_level_one_tuple( repository )
latest_installable_changeset_revision, is_level_one_certified = certified_level_one_tuple
for rca in repository.categories:
category = rca.category
category_name = str( category.name )
if category_name in self.viewable_repositories_and_suites_by_category:
self.viewable_repositories_and_suites_by_category[ category_name ] += 1
else:
self.viewable_repositories_and_suites_by_category[ category_name ] = 1
if is_valid:
if category_name in self.viewable_valid_repositories_and_suites_by_category:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] += 1
else:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] = 1
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if category_name in self.viewable_suites_by_category:
self.viewable_suites_by_category[ category_name ] += 1
else:
self.viewable_suites_by_category[ category_name ] = 1
if is_valid:
if category_name in self.viewable_valid_suites_by_category:
self.viewable_valid_suites_by_category[ category_name ] += 1
else:
self.viewable_valid_suites_by_category[ category_name ] = 1
if is_level_one_certified:
if category_name in self.certified_level_one_viewable_repositories_and_suites_by_category:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] += 1
else:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] = 1
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if category_name in self.certified_level_one_viewable_suites_by_category:
self.certified_level_one_viewable_suites_by_category[ category_name ] += 1
else:
self.certified_level_one_viewable_suites_by_category[ category_name ] = 1
self.load_repository_and_suite_tuple( repository )
if is_level_one_certified:
self.load_certified_level_one_repository_and_suite_tuple( repository )
except Exception, e:
# The viewable repository numbers and the categorized (filtered) lists of repository tuples
# may be slightly skewed, but that is no reason to result in a potential server error. All
# will be corrected at next server start.
log.exception( "Handled error adding entry to repository registry: %s." % str( e ) )
def edit_category_entry( self, old_name, new_name ):
if old_name in self.viewable_repositories_and_suites_by_category:
val = self.viewable_repositories_and_suites_by_category[ old_name ]
del self.viewable_repositories_and_suites_by_category[ old_name ]
self.viewable_repositories_and_suites_by_category[ new_name ] = val
else:
self.viewable_repositories_and_suites_by_category[ new_name ] = 0
if old_name in self.viewable_valid_repositories_and_suites_by_category:
val = self.viewable_valid_repositories_and_suites_by_category[ old_name ]
del self.viewable_valid_repositories_and_suites_by_category[ old_name ]
self.viewable_valid_repositories_and_suites_by_category[ new_name ] = val
else:
self.viewable_valid_repositories_and_suites_by_category[ new_name ] = 0
if old_name in self.viewable_suites_by_category:
val = self.viewable_suites_by_category[ old_name ]
del self.viewable_suites_by_category[ old_name ]
self.viewable_suites_by_category[ new_name ] = val
else:
self.viewable_suites_by_category[ new_name ] = 0
if old_name in self.viewable_valid_suites_by_category:
val = self.viewable_valid_suites_by_category[ old_name ]
del self.viewable_valid_suites_by_category[ old_name ]
self.viewable_valid_suites_by_category[ new_name ] = val
else:
self.viewable_valid_suites_by_category[ new_name ] = 0
if old_name in self.certified_level_one_viewable_repositories_and_suites_by_category:
val = self.certified_level_one_viewable_repositories_and_suites_by_category[ old_name ]
del self.certified_level_one_viewable_repositories_and_suites_by_category[ old_name ]
self.certified_level_one_viewable_repositories_and_suites_by_category[ new_name ] = val
else:
self.certified_level_one_viewable_repositories_and_suites_by_category[ new_name ] = 0
if old_name in self.certified_level_one_viewable_suites_by_category:
val = self.certified_level_one_viewable_suites_by_category[ old_name ]
del self.certified_level_one_viewable_suites_by_category[ old_name ]
self.certified_level_one_viewable_suites_by_category[ new_name ] = val
else:
self.certified_level_one_viewable_suites_by_category[ new_name ] = 0
def get_certified_level_one_clause_list( self ):
certified_level_one_tuples = []
clause_list = []
for repository in self.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.deprecated == False ) ):
certified_level_one_tuple = self.get_certified_level_one_tuple( repository )
latest_installable_changeset_revision, is_level_one_certified = certified_level_one_tuple
if is_level_one_certified:
certified_level_one_tuples.append( certified_level_one_tuple )
clause_list.append( "%s=%d and %s='%s'" % ( model.RepositoryMetadata.table.c.repository_id,
repository.id,
model.RepositoryMetadata.table.c.changeset_revision,
latest_installable_changeset_revision ) )
return clause_list
def get_certified_level_one_tuple( self, repository ):
"""
Return True if the latest installable changeset_revision of the received repository is level one certified.
"""
if repository is None:
return ( None, False )
if repository.deleted or repository.deprecated:
return ( None, False )
repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False )
# Get the latest installable changeset revision since that is all that is currently configured for testing.
latest_installable_changeset_revision = suc.get_latest_downloadable_changeset_revision( self.app, repository, repo )
if latest_installable_changeset_revision not in [ None, hg_util.INITIAL_CHANGELOG_HASH ]:
encoded_repository_id = self.app.security.encode_id( repository.id )
repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app,
encoded_repository_id,
latest_installable_changeset_revision )
if repository_metadata:
# Filter out repository revisions that have not been tested.
if repository_metadata.time_last_tested is not None and repository_metadata.tool_test_results is not None:
if repository.type in [ rt_util.REPOSITORY_SUITE_DEFINITION, rt_util.TOOL_DEPENDENCY_DEFINITION ]:
# Look in the tool_test_results dictionary for installation errors.
try:
tool_test_results_dict = repository_metadata.tool_test_results[ 0 ]
except Exception, e:
message = 'Error attempting to retrieve install and test results for repository %s:\n' % str( repository.name )
message += '%s' % str( e )
log.exception( message )
return ( latest_installable_changeset_revision, False )
if 'installation_errors' in tool_test_results_dict:
return ( latest_installable_changeset_revision, False )
return ( latest_installable_changeset_revision, True )
else:
# We have a repository with type Unrestricted.
if repository_metadata.includes_tools:
if repository_metadata.tools_functionally_correct:
return ( latest_installable_changeset_revision, True )
return ( latest_installable_changeset_revision, False )
else:
# Look in the tool_test_results dictionary for installation errors.
try:
tool_test_results_dict = repository_metadata.tool_test_results[ 0 ]
except Exception, e:
message = 'Error attempting to retrieve install and test results for repository %s:\n' % str( repository.name )
message += '%s' % str( e )
log.exception( message )
return ( latest_installable_changeset_revision, False )
if 'installation_errors' in tool_test_results_dict:
return ( latest_installable_changeset_revision, False )
return ( latest_installable_changeset_revision, True )
else:
# No test results.
return ( latest_installable_changeset_revision, False )
else:
# No repository_metadata.
return ( latest_installable_changeset_revision, False )
else:
# No installable changeset_revision.
return ( None, False )
def is_level_one_certified( self, repository_metadata ):
if repository_metadata:
repository = repository_metadata.repository
if repository:
if repository.deprecated or repository.deleted:
return False
tuple = ( str( repository.name ), str( repository.user.username ), str( repository_metadata.changeset_revision ) )
if repository.type in [ rt_util.REPOSITORY_SUITE_DEFINITION ]:
return tuple in self.certified_level_one_suite_tuples
else:
return tuple in self.certified_level_one_repository_and_suite_tuples
return False
def is_valid( self, repository ):
if repository and not repository.deleted and not repository.deprecated and repository.downloadable_revisions:
return True
return False
def load_certified_level_one_repository_and_suite_tuple( self, repository ):
# The received repository has been determined to be level one certified.
name = str( repository.name )
owner = str( repository.user.username )
tip_changeset_hash = repository.tip( self.app )
if tip_changeset_hash != hg_util.INITIAL_CHANGELOG_HASH:
certified_level_one_tuple = ( name, owner, tip_changeset_hash )
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if certified_level_one_tuple not in self.certified_level_one_suite_tuples:
self.certified_level_one_suite_tuples.append( certified_level_one_tuple )
else:
if certified_level_one_tuple not in self.certified_level_one_repository_and_suite_tuples:
self.certified_level_one_repository_and_suite_tuples.append( certified_level_one_tuple )
def load_repository_and_suite_tuple( self, repository ):
name = str( repository.name )
owner = str( repository.user.username )
for repository_metadata in repository.metadata_revisions:
changeset_revision = str( repository_metadata.changeset_revision )
tuple = ( name, owner, changeset_revision )
if tuple not in self.repository_and_suite_tuples:
self.repository_and_suite_tuples.append( tuple )
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if tuple not in self.suite_tuples:
self.suite_tuples.append( tuple )
def load_repository_and_suite_tuples( self ):
# Load self.certified_level_one_repository_and_suite_tuples and self.certified_level_one_suite_tuples.
for repository in self.sa_session.query( model.Repository ) \
.join( model.RepositoryMetadata.table ) \
.filter( or_( *self.certified_level_one_clause_list ) ) \
.join( model.User.table ):
self.load_certified_level_one_repository_and_suite_tuple( repository )
# Load self.repository_and_suite_tuples and self.suite_tuples
for repository in self.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.deprecated == False ) ) \
.join( model.User.table ):
self.load_repository_and_suite_tuple( repository )
def load_viewable_repositories_and_suites_by_category( self ):
# Clear all dictionaries just in case they were previously loaded.
self.certified_level_one_viewable_repositories_and_suites_by_category = {}
self.certified_level_one_viewable_suites_by_category = {}
self.certified_level_two_viewable_repositories_and_suites_by_category = {}
self.certified_level_two_viewable_suites_by_category = {}
self.viewable_repositories_and_suites_by_category = {}
self.viewable_suites_by_category = {}
self.viewable_valid_repositories_and_suites_by_category = {}
self.viewable_valid_suites_by_category = {}
for category in self.sa_session.query( model.Category ):
category_name = str( category.name )
if category not in self.certified_level_one_viewable_repositories_and_suites_by_category:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] = 0
if category not in self.certified_level_one_viewable_suites_by_category:
self.certified_level_one_viewable_suites_by_category[ category_name ] = 0
if category not in self.viewable_repositories_and_suites_by_category:
self.viewable_repositories_and_suites_by_category[ category_name ] = 0
if category not in self.viewable_suites_by_category:
self.viewable_suites_by_category[ category_name ] = 0
if category not in self.viewable_valid_repositories_and_suites_by_category:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] = 0
if category not in self.viewable_valid_suites_by_category:
self.viewable_valid_suites_by_category[ category_name ] = 0
for rca in category.repositories:
repository = rca.repository
if not repository.deleted and not repository.deprecated:
is_valid = self.is_valid( repository )
encoded_repository_id = self.app.security.encode_id( repository.id )
tip_changeset_hash = repository.tip( self.app )
repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app,
encoded_repository_id,
tip_changeset_hash )
self.viewable_repositories_and_suites_by_category[ category_name ] += 1
if is_valid:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] += 1
if repository.type in [ rt_util.REPOSITORY_SUITE_DEFINITION ]:
self.viewable_suites_by_category[ category_name ] += 1
if is_valid:
self.viewable_valid_suites_by_category[ category_name ] += 1
if self.is_level_one_certified( repository_metadata ):
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] += 1
if repository.type in [ rt_util.REPOSITORY_SUITE_DEFINITION ]:
self.certified_level_one_viewable_suites_by_category[ category_name ] += 1
def remove_category_entry( self, category ):
catgeory_name = str( category.name )
if catgeory_name in self.viewable_repositories_and_suites_by_category:
del self.viewable_repositories_and_suites_by_category[ catgeory_name ]
if catgeory_name in self.viewable_valid_repositories_and_suites_by_category:
del self.viewable_valid_repositories_and_suites_by_category[ catgeory_name ]
if catgeory_name in self.viewable_suites_by_category:
del self.viewable_suites_by_category[ catgeory_name ]
if catgeory_name in self.viewable_valid_suites_by_category:
del self.viewable_valid_suites_by_category[ catgeory_name ]
if catgeory_name in self.certified_level_one_viewable_repositories_and_suites_by_category:
del self.certified_level_one_viewable_repositories_and_suites_by_category[ catgeory_name ]
if catgeory_name in self.certified_level_one_viewable_suites_by_category:
del self.certified_level_one_viewable_suites_by_category[ catgeory_name ]
def remove_entry( self, repository ):
try:
if repository:
is_valid = self.is_valid( repository )
certified_level_one_tuple = self.get_certified_level_one_tuple( repository )
latest_installable_changeset_revision, is_level_one_certified = certified_level_one_tuple
for rca in repository.categories:
category = rca.category
category_name = str( category.name )
if category_name in self.viewable_repositories_and_suites_by_category:
if self.viewable_repositories_and_suites_by_category[ category_name ] > 0:
self.viewable_repositories_and_suites_by_category[ category_name ] -= 1
else:
self.viewable_repositories_and_suites_by_category[ category_name ] = 0
if is_valid:
if category_name in self.viewable_valid_repositories_and_suites_by_category:
if self.viewable_valid_repositories_and_suites_by_category[ category_name ] > 0:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] -= 1
else:
self.viewable_valid_repositories_and_suites_by_category[ category_name ] = 0
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if category_name in self.viewable_suites_by_category:
if self.viewable_suites_by_category[ category_name ] > 0:
self.viewable_suites_by_category[ category_name ] -= 1
else:
self.viewable_suites_by_category[ category_name ] = 0
if is_valid:
if category_name in self.viewable_valid_suites_by_category:
if self.viewable_valid_suites_by_category[ category_name ] > 0:
self.viewable_valid_suites_by_category[ category_name ] -= 1
else:
self.viewable_valid_suites_by_category[ category_name ] = 0
if is_level_one_certified:
if category_name in self.certified_level_one_viewable_repositories_and_suites_by_category:
if self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] > 0:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] -= 1
else:
self.certified_level_one_viewable_repositories_and_suites_by_category[ category_name ] = 0
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if category_name in self.certified_level_one_viewable_suites_by_category:
if self.certified_level_one_viewable_suites_by_category[ category_name ] > 0:
self.certified_level_one_viewable_suites_by_category[ category_name ] -= 1
else:
self.certified_level_one_viewable_suites_by_category[ category_name ] = 0
self.unload_repository_and_suite_tuple( repository )
if is_level_one_certified:
self.unload_certified_level_one_repository_and_suite_tuple( repository )
except Exception, e:
# The viewable repository numbers and the categorized (filtered) lists of repository tuples
# may be slightly skewed, but that is no reason to result in a potential server error. All
# will be corrected at next server start.
log.exception( "Handled error removing entry from repository registry: %s." % str( e ) )
@property
def sa_session( self ):
return self.app.model.context.current
def unload_certified_level_one_repository_and_suite_tuple( self, repository ):
# The received repository has been determined to be level one certified.
name = str( repository.name )
owner = str( repository.user.username )
tip_changeset_hash = repository.tip( self.app )
if tip_changeset_hash != hg_util.INITIAL_CHANGELOG_HASH:
certified_level_one_tuple = ( name, owner, tip_changeset_hash )
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if certified_level_one_tuple in self.certified_level_one_suite_tuples:
self.certified_level_one_suite_tuples.remove( certified_level_one_tuple )
else:
if certified_level_one_tuple in self.certified_level_one_repository_and_suite_tuples:
self.certified_level_one_repository_and_suite_tuples.remove( certified_level_one_tuple )
def unload_repository_and_suite_tuple( self, repository ):
name = str( repository.name )
owner = str( repository.user.username )
for repository_metadata in repository.metadata_revisions:
changeset_revision = str( repository_metadata.changeset_revision )
tuple = ( name, owner, changeset_revision )
if tuple in self.repository_and_suite_tuples:
self.repository_and_suite_tuples.remove( tuple )
if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
if tuple in self.suite_tuples:
self.suite_tuples.remove( tuple )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/repository_registry.py
|
Python
|
gpl-3.0
| 27,672
|
[
"Galaxy"
] |
c37b9eb51e6e64d73c662024da7b6cd1f2bd80650290cb502706c1657ff9c217
|
from unittest import TestCase
from octopus.core import app, initialise
import time, esprit
# switch out the live index for the test index
app.config['ELASTIC_SEARCH_INDEX'] = app.config['ELASTIC_SEARCH_TEST_INDEX']
# if a test on a previous run has totally failed and tearDown has not run, then make sure the index is gone first
TEST_CONN = esprit.raw.Connection(app.config.get('ELASTIC_SEARCH_HOST'), app.config.get('ELASTIC_SEARCH_INDEX'))
esprit.raw.delete(TEST_CONN)
time.sleep(1)
class ESTestCase(TestCase):
def setUp(self):
initialise()
time.sleep(1)
def tearDown(self):
esprit.raw.delete(TEST_CONN)
time.sleep(1)
|
JiscPER/magnificent-octopus
|
octopus/modules/es/testindex.py
|
Python
|
apache-2.0
| 663
|
[
"Octopus"
] |
bd5bf211f1f2f6a82a677f0b9e274a0e31e7f93c7736a15a3d838a36ff63a354
|
"""
Computational Neurodynamics
Exercise 2
Simulates the movement of a robot with differential wheels under the
control of a spiking neural network. The simulation runs for a very
long time --- if you get bored, press Ctrl+C a couple of times.
(C) Murray Shanahan et al, 2015
"""
import numpy as np
import numpy.random as rn
import matplotlib.pyplot as plt
from Environment import Environment
from RobotConnect4L import RobotConnect4L
from RobotUpdate import RobotUpdate
# Create the environment
print 'Initialising environment'
xmax = 100
ymax = 100
Env = Environment(15, 10, 20, xmax, ymax)
# Robot controller
print 'Initialising Robot Controller'
Ns = 4 # Sensor neurons. Try 1, 4, and 8
Nm = 4 # Motor neurons. Try 1, 4, and 8
net = RobotConnect4L(Ns, Nm)
Dmax = 5 # Maximum synaptic delay
Ib = 30 # Base current
Rmax = 40 # Estimated peak motor firing rate in Hz
Umin = 0.025 # Minimum wheel velocity in cm/ms
Umax = Umin + Umin / 6.0 # Maximum wheel velocity
# Initialise layers
for lr in xrange(net.Nlayers):
net.layer[lr].v = -65 * np.ones(net.layer[lr].N)
net.layer[lr].u = net.layer[lr].b * net.layer[lr].v
net.layer[lr].firings = np.array([])
# Simulation parameters
Tmax = 20000 # Simulation time in milliseconds
dt = 100 # Robot step size in milliseconds
# Initialise record of membrane potentials
v = {}
for lr in xrange(net.Nlayers):
v[lr] = np.zeros([dt, net.layer[lr].N])
# Initialise record of robot positions
T = np.arange(0, Tmax, dt)
x = np.zeros(len(T) + 1)
y = np.zeros(len(T) + 1)
w = np.zeros(len(T) + 1)
w[0] = np.pi / 4
# Size of layers
N0 = net.layer[0].N
N1 = net.layer[1].N
N2 = net.layer[2].N
N3 = net.layer[3].N
L = net.Nlayers
print 'Preparing Simulation'
# Draw Environment
plt.figure(2)
plt.xlim(0, xmax)
plt.ylim(0, ymax)
plt.title('Robot controlled by spiking neurons')
plt.xlabel('X')
plt.ylabel('Y')
for Ob in Env.Obs:
plt.scatter(Ob['x'], Ob['y'], s=np.pi * (Ob['r'] ** 2), c='lime')
plt.ion()
plt.show()
# SIMULATE
print 'Start Simulation'
for t in xrange(len(T)):
# Input from Sensors
# SL, SR = RobotGetSensors(Env, x[t], y[t], w[t], xmax, ymax)
SL, SR = Env.GetSensors(x[t], y[t], w[t])
# Carry over firings that might not have reached their targets yet
for lr in xrange(L):
firings = []
for f in net.layer[lr].firings:
# discard all earlier firings
if f[0] > dt - Dmax:
# also decrease the time so that it is in -Dmax to -1
f[0] = f[0] - dt
firings.append(f)
net.layer[lr].firings = np.array(firings)
for t2 in xrange(dt):
# Deliver stimulus as a Poisson spike stream
net.layer[0].I = rn.poisson(SL * 15, N0)
net.layer[1].I = rn.poisson(SR * 15, N1)
# Deliver noisy base current
net.layer[2].I = 5 * rn.randn(N2)
net.layer[3].I = 5 * rn.randn(N3)
# Update network
net.Update(t2)
# Maintain record of membrane potential
for lr in xrange(L):
v[lr][t2, :] = net.layer[lr].v
# Discard carried over firings with time less than 0
for lr in xrange(L):
firings = []
for f in net.layer[lr].firings:
if f[0] > 0:
firings.append(f)
net.layer[lr].firings = np.array(firings)
# Add Dirac pluses (mainly for presentation)
for lr in xrange(L):
firings = net.layer[lr].firings
if firings.size != 0:
v[lr][firings[:, 0], firings[:, 1]] = 30
# Output to motors
# Calculate motor firing rates in Hz
RL = 1.0 * len(net.layer[2].firings) / dt / N2 * 1000
RR = 1.0 * len(net.layer[3].firings) / dt / N3 * 1000
print([RL, RR])
# Set wheel velocities (as fractions of Umax)
UL = (Umin / Umax + RL / Rmax * (1 - Umin / Umax))
UR = (Umin / Umax + RR / Rmax * (1 - Umin / Umax))
# Update Environment
x[t + 1], y[t + 1], w[t + 1] = RobotUpdate(x[t], y[t], w[t], UL, UR,
Umax, dt, xmax, ymax)
# PLOTTING
# Plot membrane potential
plt.figure(1)
plt.clf()
plt.subplot(221)
plt.plot(v[0])
plt.subplot(221)
plt.title('Left sensory neurons')
plt.ylabel('Membrane potential (mV)')
plt.ylim(-90, 40)
plt.subplot(222)
plt.plot(v[1])
plt.title('Right sensory neurons')
plt.ylim(-90, 40)
plt.subplot(223)
plt.plot(v[2])
plt.title('Left motor neurons')
plt.ylabel('Membrane potential (mV)')
plt.ylim(-90, 40)
plt.xlabel('Time (ms)')
plt.subplot(224)
plt.plot(v[3])
plt.title('Right motor neurons')
plt.ylim(-90, 40)
plt.xlabel('Time (ms)')
plt.draw()
# Plot robot trajectory
plt.figure(2)
plt.scatter(x, y, marker='.')
plt.draw()
# Pause for screen to update
plt.pause(.1)
|
lawrencejones/neuro
|
Exercise_2/RobotRun4L.py
|
Python
|
gpl-3.0
| 4,878
|
[
"DIRAC"
] |
160365767b8818b7513237a36bc2cb2a59ccdc67ebbc4a10dd48777c180e110c
|
from vtk import *
from tonic.camera import *
import json, os, math, gzip, shutil
# -----------------------------------------------------------------------------
# Helper function
# -----------------------------------------------------------------------------
def getScalarFromRGB(rgb, scalarRange=[-1.0, 1.0]):
delta = (scalarRange[1] - scalarRange[0]) / 16777215.0 # 2^24 - 1 => 16,777,215
if rgb[0] != 0 or rgb[1] != 0 or rgb[2] != 0:
# Decode encoded value
return scalarRange[0] + delta * float(rgb[0]*65536 + rgb[1]*256 + rgb[2] - 1)
else:
# No value
return float('NaN')
def convertImageToFloat(srcPngImage, destFile, scalarRange=[0.0, 1.0]):
reader = vtkPNGReader()
reader.SetFileName(srcPngImage)
reader.Update()
rgbArray = reader.GetOutput().GetPointData().GetArray(0)
stackSize = rgbArray.GetNumberOfTuples()
size = reader.GetOutput().GetDimensions()
outputArray = vtkFloatArray()
outputArray.SetNumberOfComponents(1)
outputArray.SetNumberOfTuples(stackSize)
for idx in range(stackSize):
outputArray.SetTuple1(
idx,
getScalarFromRGB(rgbArray.GetTuple(idx), scalarRange)
)
# Write float file
with open(destFile, 'wb') as f:
f.write(buffer(outputArray))
return size
# -----------------------------------------------------------------------------
# Composite.json To order.array
# -----------------------------------------------------------------------------
class CompositeJSON(object):
def __init__(self, numberOfLayers):
self.nbLayers = numberOfLayers
self.encoding = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def load(self, file):
with open(file, "r") as f:
composite = json.load(f)
self.width = composite["dimensions"][0]
self.height = composite["dimensions"][1]
self.pixels = composite["pixel-order"].split('+')
self.imageSize = self.width * self.height
self.stackSize = self.imageSize * self.nbLayers
def getImageSize(self):
return self.imageSize
def getStackSize(self):
return self.stackSize
def writeOrderSprite(self,path):
ds = vtkImageData()
ds.SetDimensions(self.width, self.height, self.nbLayers)
ds.GetPointData().AddArray(self.getSortedOrderArray())
writer = vtkDataSetWriter()
writer.SetInputData(ds)
writer.SetFileName(path)
writer.Update()
def getSortedOrderArray(self):
sortedOrder = vtkUnsignedCharArray()
sortedOrder.SetName('layerIdx');
sortedOrder.SetNumberOfTuples(self.stackSize)
# Reset content
for idx in range(self.stackSize):
sortedOrder.SetValue(idx, 255)
idx = 0
for pixel in self.pixels:
x = (idx % self.width)
y = (idx / self.width)
flipYIdx = self.width * (self.height - y - 1) + x
if '@' in pixel:
idx += int(pixel[1:])
else:
# Need to decode the order
layerIdx = 0
for layer in pixel:
sortedOrder.SetValue(flipYIdx + self.imageSize * layerIdx, self.encoding.index(layer))
layerIdx += 1
# Move to next pixel
idx += 1
return sortedOrder
# -----------------------------------------------------------------------------
# Composite Sprite to Sorted Composite Dataset Builder
# -----------------------------------------------------------------------------
class ConvertCompositeSpriteToSortedStack(object):
def __init__(self, directory):
self.basePath = directory
self.layers = []
self.data = []
self.imageReader = vtkPNGReader()
# Load JSON metadata
with open(os.path.join(directory, "config.json"), "r") as f:
self.config = json.load(f)
self.nbLayers = len(self.config['scene'])
while len(self.layers) < self.nbLayers:
self.layers.append({})
with open(os.path.join(directory, "index.json"), "r") as f:
self.info = json.load(f)
with open(os.path.join(directory, "offset.json"), "r") as f:
offsets = json.load(f)
for key, value in offsets.iteritems():
meta = key.split('|')
if len(meta) == 2:
self.layers[int(meta[0])][meta[1]] = value
elif meta[1] in self.layers[int(meta[0])]:
self.layers[int(meta[0])][meta[1]][int(meta[2])] = value
else:
self.layers[int(meta[0])][meta[1]] = [value, value, value]
self.composite = CompositeJSON(len(self.layers))
def listData(self):
return self.data
def convert(self):
for root, dirs, files in os.walk(self.basePath):
if 'rgb.png' in files:
print 'Process', root
self.processDirectory(root)
def processDirectory(self, directory):
self.imageReader.SetFileName(os.path.join(directory, 'rgb.png'))
self.imageReader.Update()
rgbArray = self.imageReader.GetOutput().GetPointData().GetArray(0)
self.composite.load(os.path.join(directory, 'composite.json'))
orderArray = self.composite.getSortedOrderArray()
imageSize = self.composite.getImageSize()
stackSize = self.composite.getStackSize()
# Write order (sorted order way)
with open(os.path.join(directory, 'order.uint8'), 'wb') as f:
f.write(buffer(orderArray))
self.data.append({'name': 'order', 'type': 'array', 'fileName': '/order.uint8'})
# Encode Normals (sorted order way)
if 'normal' in self.layers[0]:
sortedNormal = vtkUnsignedCharArray()
sortedNormal.SetNumberOfComponents(3) # x,y,z
sortedNormal.SetNumberOfTuples(stackSize)
# Get Camera orientation and rotation information
camDir = [0,0,0]
worldUp = [0,0,0]
with open(os.path.join(directory, "camera.json"), "r") as f:
camera = json.load(f)
camDir = normalize([ camera['position'][i] - camera['focalPoint'][i] for i in range(3) ])
worldUp = normalize(camera['viewUp'])
# [ camRight, camUp, camDir ] will be our new orthonormal basis for normals
camRight = vectProduct(camDir, worldUp)
camUp = vectProduct(camRight, camDir)
# Tmp structure to capture (x,y,z) normal
normalByLayer = vtkFloatArray()
normalByLayer.SetNumberOfComponents(3)
normalByLayer.SetNumberOfTuples(stackSize)
# Capture all layer normals
layerIdx = 0
zPosCount = 0
zNegCount = 0
for layer in self.layers:
normalOffset = layer['normal']
for idx in range(imageSize):
normalByLayer.SetTuple3(
layerIdx * imageSize + idx,
getScalarFromRGB(rgbArray.GetTuple(idx + normalOffset[0] * imageSize)),
getScalarFromRGB(rgbArray.GetTuple(idx + normalOffset[1] * imageSize)),
getScalarFromRGB(rgbArray.GetTuple(idx + normalOffset[2] * imageSize))
)
# Re-orient normal to be view based
vect = normalByLayer.GetTuple3(layerIdx * imageSize + idx)
if not math.isnan(vect[0]):
# Express normal in new basis we computed above
rVect = normalize([ -dotProduct(vect, camRight), dotProduct(vect, camUp), dotProduct(vect, camDir) ])
# Need to reverse vector ?
if rVect[2] < 0:
normalByLayer.SetTuple3(layerIdx * imageSize + idx, -rVect[0], -rVect[1], -rVect[2])
else:
normalByLayer.SetTuple3(layerIdx * imageSize + idx, rVect[0], rVect[1], rVect[2])
layerIdx += 1
# Sort normals and encode them as 3 bytes ( -1 < xy < 1 | 0 < z < 1)
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
# No normal => same as view direction
sortedNormal.SetTuple3(idx, 128, 128, 255)
else:
offset = layerIdx * imageSize
imageIdx = idx % imageSize
vect = normalByLayer.GetTuple3(imageIdx + offset)
if not math.isnan(vect[0]) and not math.isnan(vect[1]) and not math.isnan(vect[2]):
sortedNormal.SetTuple3(idx, int(127.5 * (vect[0] + 1)), int(127.5 * (vect[1] + 1)), int(255 * vect[2]))
else:
print 'WARNING: encountered NaN in normal of layer ',layerIdx,': [',vect[0],',',vect[1],',',vect[2],']'
sortedNormal.SetTuple3(idx, 128, 128, 255)
# Write the sorted data
with open(os.path.join(directory, 'normal.uint8'), 'wb') as f:
f.write(buffer(sortedNormal))
self.data.append({'name': 'normal', 'type': 'array', 'fileName': '/normal.uint8', 'categories': ['normal']})
# Encode Intensity (sorted order way)
if 'intensity' in self.layers[0]:
intensityOffsets = []
sortedIntensity = vtkUnsignedCharArray()
sortedIntensity.SetNumberOfTuples(stackSize)
for layer in self.layers:
intensityOffsets.append(layer['intensity'])
for idx in range(stackSize):
layerIdx = int(orderArray.GetValue(idx))
if layerIdx == 255:
sortedIntensity.SetValue(idx, 255)
else:
offset = 3 * intensityOffsets[layerIdx] * imageSize
imageIdx = idx % imageSize
sortedIntensity.SetValue(idx, rgbArray.GetValue(imageIdx * 3 + offset))
with open(os.path.join(directory, 'intensity.uint8'), 'wb') as f:
f.write(buffer(sortedIntensity))
self.data.append({'name': 'intensity', 'type': 'array', 'fileName': '/intensity.uint8', 'categories': ['intensity']})
# Encode Each layer Scalar
layerIdx = 0
for layer in self.layers:
for scalar in layer:
if scalar not in ['intensity', 'normal']:
offset = imageSize * layer[scalar]
scalarRange = self.config['scene'][layerIdx]['colors'][scalar]['range']
delta = (scalarRange[1] - scalarRange[0]) / 16777215.0 # 2^24 - 1 => 16,777,215
scalarArray = vtkFloatArray()
scalarArray.SetNumberOfTuples(imageSize)
for idx in range(imageSize):
rgb = rgbArray.GetTuple(idx + offset)
if rgb[0] != 0 or rgb[1] != 0 or rgb[2] != 0:
# Decode encoded value
value = scalarRange[0] + delta * float(rgb[0]*65536 + rgb[1]*256 + rgb[2] - 1)
scalarArray.SetValue(idx, value)
else:
# No value
scalarArray.SetValue(idx, float('NaN'))
with open(os.path.join(directory, '%d_%s.float32' % (layerIdx, scalar)), 'wb') as f:
f.write(buffer(scalarArray))
self.data.append({'name': '%d_%s' % (layerIdx, scalar), 'type': 'array', 'fileName': '/%d_%s.float32' % (layerIdx, scalar), 'categories': ['%d_%s' % (layerIdx, scalar)]})
layerIdx += 1
|
Kitware/tonic-data-generator
|
python/tonic/paraview/data_converter.py
|
Python
|
bsd-3-clause
| 11,934
|
[
"VTK"
] |
dab39c9c80f3f59417122373813b10cf616832bf5fcfe02e2e37dea04812a6c0
|
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
import json
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
DEFAULT_START_DATE = datetime(2030, 1, 1, tzinfo=UTC())
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
default=DEFAULT_START_DATE,
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help="Grading policy definition for this class",
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
show_chat = Boolean(
display_name=_("Show Chat Widget"),
help=_("Enter true or false. When true, students can see the chat widget in the course."),
default=False,
scope=Scope.settings
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Each pair should be '
'formatted as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, format each pair '
'as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"] (be sure to include the "T" between the date and '
'time). An entry defining more than one blackout period might look like this: '
'[["2014-09-15", "2014-09-21"], ["2014-10-01", "2014-10-08"]]'
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2014_T1"}. The "id" '
'value for each category must be unique.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
checklists = List(
scope=Scope.settings,
default=[
{
"short_description": _("Getting Started With Studio"),
"items": [
{
"short_description": _("Add Course Team Members"),
"long_description": _(
"Grant your collaborators permission to edit your course so you can work together."
),
"is_checked": False,
"action_url": "ManageUsers",
"action_text": _("Edit Course Team"),
"action_external": False,
},
{
"short_description": _("Set Important Dates for Your Course"),
"long_description": _(
"Establish your course's student enrollment and launch dates on the Schedule and Details "
"page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Details & Schedule"),
"action_external": False,
},
{
"short_description": _("Draft Your Course's Grading Policy"),
"long_description": _(
"Set up your assignment types and grading policy even if you haven't created all your "
"assignments."
),
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": _("Edit Grading Settings"),
"action_external": False,
},
{
"short_description": _("Explore the Other Studio Checklists"),
"long_description": _(
"Discover other available course authoring tools, and find help when you need it."
),
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False,
},
],
},
{
"short_description": _("Draft a Rough Course Outline"),
"items": [
{
"short_description": _("Create Your First Section and Subsection"),
"long_description": _("Use your course outline to build your first Section and Subsection."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Set Section Release Dates"),
"long_description": _(
"Specify the release dates for each Section in your course. Sections become visible to "
"students on their release dates."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Designate a Subsection as Graded"),
"long_description": _(
"Set a Subsection to be graded as a specific assignment type. Assignments within graded "
"Subsections count toward a student's final grade."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Reordering Course Content"),
"long_description": _("Use drag and drop to reorder the content in your course."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Renaming Sections"),
"long_description": _("Rename Sections by clicking the Section name from the Course Outline."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Deleting Course Content"),
"long_description": _(
"Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is "
"no Undo function."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Add an Instructor-Only Section to Your Outline"),
"long_description": _(
"Some course authors find using a section for unsorted, in-progress work useful. To do "
"this, create a section and set the release date to the distant future."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
],
},
{
"short_description": _("Explore edX's Support Tools"),
"items": [
{
"short_description": _("Explore the Studio Help Forum"),
"long_description": _(
"Access the Studio Help forum from the menu that appears when you click your user name "
"in the top right corner of Studio."
),
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": _("Visit Studio Help"),
"action_external": True,
},
{
"short_description": _("Enroll in edX 101"),
"long_description": _("Register for edX 101, edX's primer for course creation."),
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": _("Register for edX 101"),
"action_external": True,
},
{
"short_description": _("Download the Studio Documentation"),
"long_description": _("Download the searchable Studio reference documentation in PDF form."),
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": _("Download Documentation"),
"action_external": True,
},
],
},
{
"short_description": _("Draft Your Course About Page"),
"items": [
{
"short_description": _("Draft a Course Description"),
"long_description": _(
"Courses on edX have an About page that includes a course video, description, and more. "
"Draft the text students will read before deciding to enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Staff Bios"),
"long_description": _(
"Showing prospective students who their instructor will be is helpful. "
"Include staff bios on the course About page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course FAQs"),
"long_description": _("Include a short list of frequently asked questions about your course."),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course Prerequisites"),
"long_description": _(
"Let students know what knowledge and/or skills they should have before "
"they enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
],
},
],
)
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format due dates are displayed in. Due dates must be in MM-DD-YYYY, DD-MM-YYYY, YYYY-MM-DD, "
"or YYYY-DD-MM format."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Has three possible states: 'end', 'early_with_info', 'early_no_info'. 'end' is the default behavior, "
"where certificates will only appear after a course has ended. 'early_with_info' will display all "
"certificate information before a course has ended. 'early_no_info' will hide all certificate "
"information unless a student has earned a certificate."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content."
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content."
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def may_certify(self):
"""
Return True if it is acceptable to show the student a certificate download link
"""
show_early = self.certificates_display_behavior in ('early_with_info', 'early_no_info') or self.certificates_show_before_end
return show_early or self.has_ended()
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': s,
'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)
}
section_format = s.format if s.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
_ = i18n.ugettext
strftime = i18n.strftime
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = strftime(result, format_string)
if format_string == "DATE_TIME":
result = self._add_timezone_string(result)
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.start_date_is_still_default:
# Translators: TBD stands for 'To Be Determined' and is used when a course
# does not yet have an announced start date.
return _('TBD')
else:
when = self.advertised_start or self.start
if format_string == "DATE_TIME":
return self._add_timezone_string(strftime(when, format_string))
return strftime(when, format_string)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return self.advertised_start is None and self.start == CourseFields.start.default
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
if self.end is None:
return ''
else:
strftime = self.runtime.service(self, "i18n").strftime
date_time = strftime(self.end, format_string)
return date_time if format_string == "SHORT_DATE" else self._add_timezone_string(date_time)
def _add_timezone_string(self, date_time):
"""
Adds 'UTC' string to the end of start/end date and time texts.
"""
return date_time + u" UTC"
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in self.discussion_blackouts]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts for course {0}".format(self.id))
return True
@property
def number(self):
return self.location.course
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
|
jswope00/griffinx
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 53,489
|
[
"VisIt"
] |
23909c3b3cdb3a9a3ed02afc7cc14f43a99e8c6cc4c07ef56882972a336d0fc0
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for compiled languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from string import Template
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Literal
from pygments.scanner import Scanner
# backwards compatibility
from pygments.lexers.functional import OcamlLexer
from pygments.lexers.jvm import JavaLexer, ScalaLexer
__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
'DylanLexer', 'ObjectiveCLexer', 'FortranLexer', 'GLShaderLexer',
'PrologLexer', 'CythonLexer', 'ValaLexer', 'OocLexer', 'GoLexer',
'FelixLexer', 'AdaLexer', 'Modula2Lexer', 'BlitzMaxLexer',
'NimrodLexer', 'FantomLexer']
class CLexer(RegexLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'^(\s*)([a-zA-Z_][a-zA-Z0-9_]*:(?!:))',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void)\b',
Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CppLexer(RegexLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(asm|auto|break|case|catch|const|const_cast|continue|'
r'default|delete|do|dynamic_cast|else|enum|explicit|export|'
r'extern|for|friend|goto|if|mutable|namespace|new|operator|'
r'private|protected|public|register|reinterpret_cast|return|'
r'restrict|sizeof|static|static_cast|struct|switch|template|'
r'this|throw|throws|try|typedef|typeid|typename|union|using|'
r'volatile|virtual|while)\b', Keyword),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(bool|int|long|float|short|double|char|unsigned|signed|'
r'void|wchar_t)\b', Keyword.Type),
(r'(_{0,2}inline|naked|thread)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave|wchar_t|w64|virtual_inheritance|'
r'uuidof|unaligned|super|single_inheritance|raise|noop|'
r'multiple_inheritance|m128i|m128d|m128|m64|interface|'
r'identifier|forceinline|event|assume)\b', Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'(__offload|__blockingoffload|__outer)\b', Keyword.Pseudo),
(r'(true|false)\b', Keyword.Constant),
(r'NULL\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
class ECLexer(RegexLexer):
"""
For eC source code with preprocessor directives.
*New in Pygments 1.5.*
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws + '#', Comment.Preproc, 'macro'),
(r'^(\s*)([a-zA-Z_][a-zA-Z0-9_]*:(?!:))', bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while|class|private|public|'
r'property|import|delete|new|new0|renew|renew0|define|get|set|remote|dllexport|dllimport|stdcall|'
r'subclass|__on_register_module|namespace|using|typed_object|any_object|incref|register|watch|'
r'stopwatching|firewatchers|watchable|class_designer|class_fixed|class_no_expansion|isset|'
r'class_default_property|property_category|class_data|class_property|virtual|thisclass|'
r'dbtable|dbindex|database_open|dbfield)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void)\b',
Keyword.Type),
(r'(uint|uint16|uint32|uint64|bool|byte|unichar|int64)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(true|false|null|value|this|NULL)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class DLexer(RegexLexer):
"""
For D source.
*New in Pygments 1.2.*
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
#(r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(r'(abstract|alias|align|asm|assert|auto|body|break|case|cast'
r'|catch|class|const|continue|debug|default|delegate|delete'
r'|deprecated|do|else|enum|export|extern|finally|final'
r'|foreach_reverse|foreach|for|function|goto|if|import|inout'
r'|interface|invariant|in|is|lazy|mixin|module|new|nothrow|out'
r'|override|package|pragma|private|protected|public|pure|ref|return'
r'|scope|static|struct|super|switch|synchronized|template|this'
r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile'
r'|while|with|__traits)\b', Keyword
),
(r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float'
r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong'
r'|ushort|void|wchar)\b', Keyword.Type
),
(r'(false|true|null)\b', Keyword.Constant),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String
),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q{', String, 'token_string'),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'{', Punctuation, 'token_string_nest'),
(r'}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'{', String, 'delimited_inside_curly'),
(r'}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
}
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = [
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
]
DELPHI_KEYWORDS = [
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
]
FREE_PASCAL_KEYWORDS = [
'dispose', 'exit', 'false', 'new', 'true'
]
BLOCK_KEYWORDS = set([
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
])
FUNCTION_MODIFIERS = set([
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
])
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set([
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
])
BUILTIN_TYPES = set([
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
])
BUILTIN_UNITS = {
'System': [
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
],
'SysUtils': [
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
],
'Classes': [
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
],
'Math': [
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
]
}
ASM_REGISTERS = set([
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
])
ASM_INSTRUCTIONS = set([
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
])
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
*New in Pygments 0.7.*
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl']
mimetypes = ['text/x-dylan']
flags = re.DOTALL
tokens = {
'root': [
(r'\b(subclass|abstract|block|c(on(crete|stant)|lass)|domain'
r'|ex(c(eption|lude)|port)|f(unction(al)?)|generic|handler'
r'|i(n(herited|line|stance|terface)|mport)|library|m(acro|ethod)'
r'|open|primary|sealed|si(deways|ngleton)|slot'
r'|v(ariable|irtual))\b', Name.Builtin),
(r'<\w+>', Keyword.Type),
(r'//.*?\n', Comment.Single),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'=>|\b(a(bove|fterwards)|b(e(gin|low)|y)|c(ase|leanup|reate)'
r'|define|else(if)?|end|f(inally|or|rom)|i[fn]|l(et|ocal)|otherwise'
r'|rename|s(elect|ignal)|t(hen|o)|u(n(less|til)|se)|wh(en|ile))\b',
Keyword),
(r'([ \t])([!\$%&\*\/:<=>\?~_^a-zA-Z0-9.+\-]*:)',
bygroups(Text, Name.Variable)),
(r'([ \t]*)(\S+[^:])([ \t]*)(\()([ \t]*)',
bygroups(Text, Name.Function, Text, Punctuation, Text)),
(r'-?[0-9.]+', Number),
(r'[(),;]', Punctuation),
(r'\$[a-zA-Z0-9-]+', Name.Constant),
(r'[!$%&*/:<>=?~^.+\[\]{}-]+', Operator),
(r'\s+', Text),
(r'#"[a-zA-Z0-9-]+"', Keyword),
(r'#[a-zA-Z0-9-]+', Keyword),
(r'#(\(|\[)', Punctuation),
(r'[a-zA-Z0-9-_]+', Name.Variable),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class ObjectiveCLexer(RegexLexer):
"""
For Objective-C source code with preprocessor directives.
"""
name = 'Objective-C'
aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
#XXX: objc has .h files too :-/
filenames = ['*.m']
mimetypes = ['text/x-objective-c']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while|in|@selector|'
r'@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|IBOutlet|IBAction|SEL)\b', Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b',
Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(TRUE|FALSE|nil|NULL)\b', Name.Builtin),
('[a-zA-Z$_][a-zA-Z0-9$_]*:(?!:)', Name.Label),
('[a-zA-Z$_][a-zA-Z0-9$_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function,
using(this), Text, Punctuation),
'function'),
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
bygroups(Keyword, Text, using(this),
Text, Name.Function),
'method'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function,
using(this), Text, Punctuation)),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
('', Text, 'statement'),
],
'classname' : [
# interface definition that inherits
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'forward_classname' : [
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'forward_classname'),
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'method': [
include('whitespace'),
(r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
('', Text, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if '@import' in text or '@interface' in text or \
'@implementation' in text:
return True
elif '@"' in text: # strings
return True
elif re.match(r'\[[a-zA-Z0-9.]:', text): # message
return True
return False
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
*New in Pygments 0.10.*
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f', '*.f90', '*.F', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][a-z0-9_]*', Name.Variable),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(r'\b(ACCEPT|ALLOCATABLE|ALLOCATE|ARRAY|ASSIGN|ASYNCHRONOUS|'
r'BACKSPACE|BIND|BLOCK DATA|BYTE|CALL|CASE|CLOSE|COMMON|CONTAINS|'
r'CONTINUE|CYCLE|DATA|DEALLOCATE|DECODE|DEFERRED|DIMENSION|DO|'
r'ELSE|ENCODE|END FILE|ENDIF|END|ENTRY|ENUMERATOR|EQUIVALENCE|'
r'EXIT|EXTERNAL|EXTRINSIC|FINAL|FORALL|FORMAT|FUNCTION|GENERIC|'
r'GOTO|IF|IMPLICIT|IMPORT|INCLUDE|INQUIRE|INTENT|INTERFACE|'
r'INTRINSIC|MODULE|NAMELIST|NULLIFY|NONE|NON_INTRINSIC|'
r'NON_OVERRIDABLE|NOPASS|OPEN|OPTIONAL|OPTIONS|PARAMETER|PASS|'
r'PAUSE|POINTER|PRINT|PRIVATE|PROGRAM|PROTECTED|PUBLIC|PURE|READ|'
r'RECURSIVE|RETURN|REWIND|SAVE|SELECT|SEQUENCE|STOP|SUBROUTINE|'
r'TARGET|THEN|TYPE|USE|VALUE|VOLATILE|WHERE|WRITE|WHILE)\s*\b',
Keyword),
# Data Types
(r'\b(CHARACTER|COMPLEX|DOUBLE PRECISION|DOUBLE COMPLEX|INTEGER|'
r'LOGICAL|REAL|C_INT|C_SHORT|C_LONG|C_LONG_LONG|C_SIGNED_CHAR|'
r'C_SIZE_T|C_INT8_T|C_INT16_T|C_INT32_T|C_INT64_T|C_INT_LEAST8_T|'
r'C_INT_LEAST16_T|C_INT_LEAST32_T|C_INT_LEAST64_T|C_INT_FAST8_T|'
r'C_INT_FAST16_T|C_INT_FAST32_T|C_INT_FAST64_T|C_INTMAX_T|'
r'C_INTPTR_T|C_FLOAT|C_DOUBLE|C_LONG_DOUBLE|C_FLOAT_COMPLEX|'
r'C_DOUBLE_COMPLEX|C_LONG_DOUBLE_COMPLEX|C_BOOL|C_CHAR|C_PTR|'
r'C_FUNPTR)\s*\b',
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[(),:&%;]', Punctuation),
# Intrinsics
(r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|'
r'All|Allocated|ALog|AMax|AMin|AMod|And|ANInt|Any|ASin|Associated|'
r'ATan|BesJ|BesJN|BesY|BesYN|Bit_Size|BTest|CAbs|CCos|Ceiling|'
r'CExp|Char|ChDir|ChMod|CLog|Cmplx|Command_Argument_Count|Complex|'
r'Conjg|Cos|CosH|Count|CPU_Time|CShift|CSin|CSqRt|CTime|C_Funloc|'
r'C_Loc|C_Associated|C_Null_Ptr|C_Null_Funptr|C_F_Pointer|'
r'C_Null_Char|C_Alert|C_Backspace|C_Form_Feed|C_New_Line|'
r'C_Carriage_Return|C_Horizontal_Tab|C_Vertical_Tab|'
r'DAbs|DACos|DASin|DATan|Date_and_Time|DbesJ|'
r'DbesJ|DbesJN|DbesY|DbesY|DbesYN|Dble|DCos|DCosH|DDiM|DErF|DErFC|'
r'DExp|Digits|DiM|DInt|DLog|DLog|DMax|DMin|DMod|DNInt|Dot_Product|'
r'DProd|DSign|DSinH|DSin|DSqRt|DTanH|DTan|DTime|EOShift|Epsilon|'
r'ErF|ErFC|ETime|Exit|Exp|Exponent|Extends_Type_Of|FDate|FGet|'
r'FGetC|Float|Floor|Flush|FNum|FPutC|FPut|Fraction|FSeek|FStat|'
r'FTell|GError|GetArg|Get_Command|Get_Command_Argument|'
r'Get_Environment_Variable|GetCWD|GetEnv|GetGId|GetLog|GetPId|'
r'GetUId|GMTime|HostNm|Huge|IAbs|IAChar|IAnd|IArgC|IBClr|IBits|'
r'IBSet|IChar|IDate|IDiM|IDInt|IDNInt|IEOr|IErrNo|IFix|Imag|'
r'ImagPart|Index|Int|IOr|IRand|IsaTty|IShft|IShftC|ISign|'
r'Iso_C_Binding|Is_Iostat_End|Is_Iostat_Eor|ITime|Kill|Kind|'
r'LBound|Len|Len_Trim|LGe|LGt|Link|LLe|LLt|LnBlnk|Loc|Log|'
r'Logical|Long|LShift|LStat|LTime|MatMul|Max|MaxExponent|MaxLoc|'
r'MaxVal|MClock|Merge|Move_Alloc|Min|MinExponent|MinLoc|MinVal|'
r'Mod|Modulo|MvBits|Nearest|New_Line|NInt|Not|Or|Pack|PError|'
r'Precision|Present|Product|Radix|Rand|Random_Number|Random_Seed|'
r'Range|Real|RealPart|Rename|Repeat|Reshape|RRSpacing|RShift|'
r'Same_Type_As|Scale|Scan|Second|Selected_Int_Kind|'
r'Selected_Real_Kind|Set_Exponent|Shape|Short|Sign|Signal|SinH|'
r'Sin|Sleep|Sngl|Spacing|Spread|SqRt|SRand|Stat|Sum|SymLnk|'
r'System|System_Clock|Tan|TanH|Time|Tiny|Transfer|Transpose|Trim|'
r'TtyNam|UBound|UMask|Unlink|Unpack|Verify|XOr|ZAbs|ZCos|ZExp|'
r'ZLog|ZSin|ZSqRt)\s*\b',
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.Ee])', Number.Integer),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
],
}
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
*New in Pygments 1.1.*
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
#FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'\b(attribute|const|uniform|varying|centroid|break|continue|'
r'do|for|while|if|else|in|out|inout|float|int|void|bool|true|'
r'false|invariant|discard|return|mat[234]|mat[234]x[234]|'
r'vec[234]|[ib]vec[234]|sampler[123]D|samplerCube|'
r'sampler[12]DShadow|struct)\b', Keyword),
(r'\b(asm|class|union|enum|typedef|template|this|packed|goto|'
r'switch|default|inline|noinline|volatile|public|static|extern|'
r'external|interface|long|short|double|half|fixed|unsigned|'
r'lowp|mediump|highp|precision|input|output|hvec[234]|'
r'[df]vec[234]|sampler[23]DRect|sampler2DRectShadow|sizeof|'
r'cast|namespace|using)\b', Keyword), #future use
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
(r'[0-9]+', Number),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
#(r'=(?=\s|[a-zA-Z\[])', Operator),
(r'(is|<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_][A-Za-z0-9_]*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Cython'
aliases = ['cython', 'pyx']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|'
r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|Warning|ZeroDivisionError'
r')\b', Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Keyword.Type),
(r'.', Text),
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
(r'', Text, '#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
*New in Pygments 1.1.*
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(r'(as|base|break|case|catch|construct|continue|default|delete|do|'
r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|'
r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b',
Keyword),
(r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|'
r'override|owned|private|protected|public|ref|requires|signal|'
r'static|throws|unowned|var|virtual|volatile|weak|yields)\b',
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|'
r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|'
r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
],
}
class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
*New in Pygments 1.2.*
"""
name = 'Ooc'
aliases = ['ooc']
filenames = ['*.ooc']
mimetypes = ['text/x-ooc']
tokens = {
'root': [
(r'\b(class|interface|implement|abstract|extends|from|'
r'this|super|new|const|final|static|import|use|extern|'
r'inline|proto|break|continue|fallthrough|operator|if|else|for|'
r'while|do|switch|case|as|in|version|return|true|false|null)\b',
Keyword),
(r'include\b', Keyword, 'include'),
(r'(cover)([ \t]+)(from)([ \t]+)([a-zA-Z0-9_]+[*@]?)',
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(func)((?:[ \t]|\\\n)+)(~[a-z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Function)),
(r'\bfunc\b', Keyword),
# Note: %= and ^= not listed on http://ooc-lang.org/syntax
(r'//.*', Comment),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
r'&&?|\|\|?|\^=?)', Operator),
(r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
Name.Function)),
(r'[A-Z][A-Z0-9_]+', Name.Constant),
(r'[A-Z][a-zA-Z0-9_]*([@*]|\[[ \t]*\])?', Name.Class),
(r'([a-z][a-zA-Z0-9_]*(?:~[a-z][a-zA-Z0-9_]*)?)((?:[ \t]|\\\n)*)(?=\()',
bygroups(Name.Function, Text)),
(r'[a-z][a-zA-Z0-9_]*', Name.Variable),
# : introduces types
(r'[:(){}\[\];,]', Punctuation),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0c[0-9]+', Number.Oct),
(r'0b[01]+', Number.Binary),
(r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
(r'[0-9_]+', Number.Decimal),
(r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"',
String.Double),
(r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@', Punctuation), # pointer dereference
(r'\.', Punctuation), # imports or chain operator
(r'\\[ \t\n]', Text),
(r'[ \t]+', Text),
],
'include': [
(r'[\w/]+', Name),
(r',', Punctuation),
(r'[ \t]', Text),
(r'[;\n]', Text, '#pop'),
],
}
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(break|default|func|interface|select'
r'|case|defer|go|map|struct'
r'|chan|else|goto|package|switch'
r'|const|fallthrough|if|range|type'
r'|continue|for|import|return|var)\b', Keyword
),
# It seems the builtin types aren't actually keywords.
(r'(uint8|uint16|uint32|uint64'
r'|int8|int16|int32|int64'
r'|float32|float64|byte'
r'|uint|int|float|uintptr'
r'|string|close|closed|len|cap|new|make)\b', Name.Builtin
),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char
),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.)|[+\-*/%&|^<>=!()\[\]{}.,;:]',
Punctuation
),
# identifier
(r'[a-zA-Z_]\w*', Name),
]
}
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = [
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
]
keywords = [
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
]
keyword_directives = [
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
]
keyword_declarations = [
'def', 'let', 'ref', 'val', 'var',
]
keyword_types = [
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
]
keyword_constants = [
'false', 'true',
]
operator_words = [
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
]
name_builtins = [
'_svc', 'while',
]
name_pseudo = [
'root', 'self', 'this',
]
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword,
'funcname'),
(r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator),
(r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration),
(r'(%s)\b' % '|'.join(keyword_types), Keyword.Type),
(r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^\/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[\/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
##(r'/[*](.|\n)*?[*]/', Comment),
##(r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
(r'', Error, 'modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class AdaLexer(RegexLexer):
"""
For Ada source code.
*New in Pygments 1.3.*
"""
name = 'Ada'
aliases = ['ada', 'ada95' 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.I # Ignore case
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)([a-z0-9_]+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(Byte|Character|Float|Integer|Long_Float|Long_Integer|'
r'Long_Long_Float|Long_Long_Integer|Natural|Positive|Short_Float|'
r'Short_Integer|Short_Short_Float|Short_Short_Integer|String|'
r'Wide_String|Duration)\b', Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<[a-z0-9_]+>>', Name.Label),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
r'record|renames|requeue|return|reverse|select|separate|subtype|'
r'synchronized|task|tagged|terminate|then|type|until|when|while|'
r'xor)\b',
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers' : [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute' : [
(r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram' : [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|[a-z0-9_]+', Name.Function),
include('root'),
],
'end' : [
('(if|case|record|loop|select)', Keyword.Reserved),
('"[^"]+"|[a-zA-Z0-9_.]+', Name.Function),
('\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('formal_part')),
include('root'),
],
'array_def' : [
(r';', Punctuation, '#pop'),
(r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
Keyword.Reserved)),
include('root'),
],
'import': [
(r'[a-z0-9_.]+', Name.Namespace, '#pop'),
(r'', Text, '#pop'),
],
'formal_part' : [
(r'\)', Punctuation, '#pop'),
(r'[a-z0-9_]+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
('is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
('\(', Punctuation, 'package_instantiation'),
('([a-zA-Z0-9_.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
Text, Punctuation)),
(r'[a-z0-9._\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
Additional options that determine which keywords are highlighted:
`pim`
Select PIM Modula-2 dialect (default: True).
`iso`
Select ISO Modula-2 dialect (default: False).
`objm2`
Select Objective Modula-2 dialect (default: False).
`gm2ext`
Also highlight GNU extensions (default: False).
*New in Pygments 1.3.*
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'identifiers': [
(r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name),
],
'numliterals': [
(r'[01]+B', Number.Binary), # binary number (ObjM2)
(r'[0-7]+B', Number.Oct), # octal number (PIM + ISO)
(r'[0-7]+C', Number.Oct), # char code (PIM + ISO)
(r'[0-9A-F]+C', Number.Hex), # char code (ObjM2)
(r'[0-9A-F]+H', Number.Hex), # hexadecimal number
(r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
(r'[0-9]+\.[0-9]+', Number.Float), # real number
(r'[0-9]+', Number.Integer), # decimal whole number
],
'strings': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'operators': [
(r'[*/+=#~&<>\^-]', Operator),
(r':=', Operator), # assignment
(r'@', Operator), # pointer deref (ISO)
(r'\.\.', Operator), # ellipsis or range
(r'`', Operator), # Smalltalk message (ObjM2)
(r'::', Operator), # type conversion (ObjM2)
],
'punctuation': [
(r'[\(\)\[\]{},.:;|]', Punctuation),
],
'comments': [
(r'//.*?\n', Comment.Single), # ObjM2
(r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
(r'\(\*([^\$].*?)\*\)', Comment.Multiline),
# TO DO: nesting of (* ... *) comments
],
'pragmas': [
(r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
(r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2
],
'root': [
include('whitespace'),
include('comments'),
include('pragmas'),
include('identifiers'),
include('numliterals'),
include('strings'),
include('operators'),
include('punctuation'),
]
}
pim_reserved_words = [
# 40 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'WHILE', 'WITH',
]
pim_pervasives = [
# 31 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
]
iso_reserved_words = [
# 46 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
'WITH',
]
iso_pervasives = [
# 42 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
]
objm2_reserved_words = [
# base language, 42 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
# OO extensions, 16 reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
]
objm2_pervasives = [
# base language, 38 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
# OO extensions, 3 pervasives
'OBJECT', 'NO', 'YES',
]
gnu_reserved_words = [
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
]
gnu_pervasives = [
# 21 identifiers, actually from pseudo-module SYSTEM
# but we will highlight them as if they were pervasives
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
]
def __init__(self, **options):
self.reserved_words = set()
self.pervasives = set()
# ISO Modula-2
if get_bool_opt(options, 'iso', False):
self.reserved_words.update(self.iso_reserved_words)
self.pervasives.update(self.iso_pervasives)
# Objective Modula-2
elif get_bool_opt(options, 'objm2', False):
self.reserved_words.update(self.objm2_reserved_words)
self.pervasives.update(self.objm2_pervasives)
# PIM Modula-2 (DEFAULT)
else:
self.reserved_words.update(self.pim_reserved_words)
self.pervasives.update(self.pim_pervasives)
# GNU extensions
if get_bool_opt(options, 'gm2ext', False):
self.reserved_words.update(self.gnu_reserved_words)
self.pervasives.update(self.gnu_pervasives)
# initialise
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# check for reserved words and pervasives
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
elif value in self.pervasives:
token = Keyword.Pervasive
# return result
yield index, token, value
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
*New in Pygments 1.4.*
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_][a-z0-9_]*'
bmax_var = r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)|([ \t]*)([:])([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)' % (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number), # Binary
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(r'\b(TNullMethodException|TNullFunctionException|'
r'TNullObjectException|TArrayBoundsException|'
r'TRuntimeException)\b', Name.Exception),
(r'\b(Strict|SuperStrict|Module|ModuleInfo|'
r'End|Return|Continue|Exit|Public|Private|'
r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|'
r'New|Release|Delete|'
r'Incbin|IncbinPtr|IncbinLen|'
r'Framework|Include|Import|Extern|EndExtern|'
r'Function|EndFunction|'
r'Type|EndType|Extends|'
r'Method|EndMethod|'
r'Abstract|Final|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|EachIn|'
r'While|Wend|EndWhile|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|EndSelect|'
r'Try|Catch|EndTry|Throw|Assert|'
r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0[xX][a-fA-F0-9][a-fA-F0-9_]*', Number.Hex, 'int-suffix'),
(r'0[bB][01][01_]*', Number, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-fA-F0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"\$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-fA-F0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'[eE][+-]?[0-9][0-9_]*', Number.Float),
(r'', Text, '#pop')
],
'float-suffix': [
(r'\'[fF](32|64)', Number.Float),
(r'', Text, '#pop')
],
'int-suffix': [
(r'\'[iI](32|64)', Number.Integer.Long),
(r'\'[iI](8|16)', Number.Integer),
(r'', Text, '#pop')
],
}
class FantomLexer(RegexLexer):
"""
For Fantom source code.
*New in Pygments 1.5.*
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict (
pod = r'[\"\w\.]+',
eos = r'\n|;',
id = r'[a-zA-Z_][a-zA-Z0-9_]*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type = r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]\|\->\?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline
(r'//.*?\n', Comment.Single), #Single line
#todo: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), #Fandoc
(r'#.*\n', Comment.Single) #Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number),
#Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex
(r'\b-?[\d_]+', Number.Integer), #Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char
(r'"', Punctuation, 'insideStr'), #Opening quote
(r'`', Punctuation, 'insideUri'), #Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), #Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state = 'inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state = 'inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'"', Punctuation, '#pop'), #Closing quot
(r'.', String) #String content
],
'insideUri': [ #TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'`', Punctuation, '#pop'), #Closing tick
(r'.', String.Backtick) #URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|'
r'switch|case|default|continue|break|do|return|get|set)\b',
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]\|\->:\?]', Punctuation),
(s(r'$id'), Name.Class),
(r'', Text, '#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
### Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state = 'inType'), Text,
Name.Variable, Text, Operator)),
### var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
### .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
### .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
### new makeXXX ( ####
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### Type name ( ####
(s(r'($type)([ \t]+)' #Return type and whitespace
r'($id)(\s*)(\()'), #method name + open brace
bygroups(using(this, state = 'inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### ArgType argName, #####
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
#### ArgType argName) ####
## Covered in 'insideParen' state
### ArgType argName -> ArgType| ###
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state = 'inType'),
Punctuation)),
### ArgType argName| ###
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
### Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), #Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), #ffi
(r'(\")?([\w\.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), #podname
(r'::', Punctuation, 'usingClass'),
(r'', Text, '#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w\$]+', Name.Class),
(r'', Text, '#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'{', Punctuation, 'facetFields'),
(r'', Text, '#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'}', Punctuation, '#pop'),
(r'.', Text)
],
}
|
sergioska/EvernoteSyntaxHighlight
|
pygments/lexers/compiled.py
|
Python
|
apache-2.0
| 123,851
|
[
"ASE"
] |
22694aaf9f766a5606adc82a41f7637f3840cccbf8b018219276442bedfcbdca
|
"""
Read a single output file and return.
"""
import logging
import os
import re
import csv
import string
import pandas as pd
from .. import io
from .. import parsing
from .. import util
logger = logging.getLogger(__name__)
class OutputReader(object):
"""
Given a path to an output folder or list of files, combine parsed data
from files and write CSV.
"""
def __init__(self, path, output_type=None):
logger.debug("creating `OutputReader` for path '{}'".format(path))
self.path = path
if output_type is None:
self.type = self._sniff_output_type()
def _sniff_output_type(self):
"""
Return predicted output type based on specified path.
"""
output_types = ['metrics', 'QC', 'counts', 'validation']
output_match = [t.lower() for t in output_types
if re.search(t, self.path)]
if len(output_match):
return output_match[0]
def _get_outputs(self, library, output_type):
"""
Return list of outputs of specified type.
"""
output_filetypes = {'metrics': 'txt|html',
'qc': 'txt',
'counts': 'txt',
'validation': 'csv'}
return [os.path.join(self.path, f)
for f in os.listdir(self.path)
if re.search(output_type, f)
and re.search(library, f)
and not re.search('combined', f)
and re.search(output_filetypes[output_type], os.path.splitext(f)[-1])]
def _get_parser(self, output_type, output_source):
"""
Return the appropriate parser for the current output file.
"""
parsers = {
'metrics': {'htseq': getattr(io, 'HtseqMetricsFile'),
'picard-rnaseq': getattr(io, 'PicardMetricsFile'),
'picard-markdups': getattr(io, 'PicardMetricsFile'),
'picard-align': getattr(io, 'PicardMetricsFile'),
'picard-alignment': getattr(io, 'PicardMetricsFile'),
'tophat-stats': getattr(io, 'TophatStatsFile')},
'qc': {'fastqc': getattr(io, 'FastQCFile')},
'counts': {'htseq': getattr(io, 'HtseqCountsFile')},
'validation': {'sexcheck': getattr(io, 'SexcheckFile')}
}
logger.debug("matched parser '{}' for output type '{}' and source '{}'"
.format(parsers[output_type][output_source],
output_type, output_source))
return parsers[output_type][output_source]
def read_data(self, seqlib_id):
"""
Parse and store data for a output file.
"""
outputs = self._get_outputs(seqlib_id, self.type)
self.data = {}
for o in outputs:
logger.debug("parsing output file '{}'".format(o))
out_items = parsing.parse_output_filename(o)
proclib_id = out_items['sample_id']
out_type = out_items['type']
out_source = out_items['source']
logger.debug("storing data from '{}' in '{}' '{}'".format(out_source, proclib_id, out_type))
out_parser = self._get_parser(out_type, out_source)(path=o)
#self.data.setdefault(out_type, {}).setdefault(proclib_id, []).append({out_source: out_parser.parse()})
dataframe = out_parser.parse()
#logger.info("dataframe: {}".format(dataframe))
if self.type == 'counts':
self.data = dataframe.set_index('geneName')['count'].to_dict()
else:
mod_source = out_source.replace("-", "_")
self.data.setdefault(out_type, []).append({mod_source: out_parser.parse()})
return self.data
|
jaeddy/bripipetools
|
bripipetools/postprocessing/reading.py
|
Python
|
mit
| 3,872
|
[
"HTSeq"
] |
5b4b4841ca1912241927491c9a118e6c0b23288c3736e9af302d3b7e184ab913
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils.extmath import fast_logdet, safe_sparse_dot, randomized_svd, \
fast_dot
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
""" Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = (self.explained_variance_ /
self.explained_variance_.sum())
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(self.explained_variance_,
n_samples, n_features)
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = self.explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
self.components_ = self.components_[:n_components, :]
self.explained_variance_ = \
self.explained_variance_[:n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:n_components]
self.n_components_ = n_components
return (U, S, V)
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
self._dim = n_features
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
n_components = self.n_components
if n_components is None:
n_components = n_features
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
self.explained_variance_[:n_components],
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = (Xr ** 2).sum() / (n_samples * n_features)
else:
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.mean_ = None
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if hasattr(X, 'todense'):
warnings.warn("Sparse matrix support is deprecated"
" and will be dropped in 0.16."
" Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if not hasattr(X, 'todense'):
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
self.explained_variance_ratio_ = exp_var / exp_var.sum()
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
depet/scikit-learn
|
sklearn/decomposition/pca.py
|
Python
|
bsd-3-clause
| 20,538
|
[
"Gaussian"
] |
6135a9b77771b51dcc5b54a44a69531f9a11103e75a647bb12b9c71bf94e4395
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import csv
import json
import os
import sys
from datetime import datetime, timedelta
from pathlib import Path
csv_columns_to_json_keys_movie = {"id": "@id", "name": "name", "description": "description", "uri": "url", "duration": "duration", "EIDR": "titleEIDR"}
csv_columns_to_json_keys_episode = {"id": "@id", "name": "name", "description": "description", "uri": "url", "episodeNumber": "episodeNumber", "duration": "duration", "EIDR": "titleEIDR"}
context_json_key = "@context"
context_value_media = ["http://schema.org", {"@language": "en"}]
context_value = "http://schema.org"
id_json_key = "@id"
type_json_key = "@type"
type_json_value = "DataFeed"
dateModified_json_key = "dateModified"
dateModified_json_value = datetime.now().replace(microsecond=0).isoformat()+"Z"
dataFeed_json_key = "dataFeedElement"
partOfSeries_json_key = "partOfSeries"
partOfSeason_json_key = "partOfSeason"
tvSeriesUri_csv_key = "TVSeriesUri"
tvSeasonUri_csv_key = "TVSeasonUri"
seasonNumber_csv_key = "seasonNumber"
category_csv_key = "category"
potential_action_key = "potentialAction"
csv_type_to_media_action_type = {"clip" : "Clip", "episode": "TVEpisode", "movie": "Movie"}
def main():
# Parse the output file argument
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("input_file", help="CSV file to read from")
arg_parser.add_argument("output_movie_file", help="File to output media actions feed for movies in json such as movies.json")
arg_parser.add_argument("output_episodes_file", help="File to output media actions feed for TV episodes in json such as episodes.json")
args = arg_parser.parse_args()
# Avoid overwriting an existing movies file
if Path(args.output_movie_file).exists():
sys.exit(args.output_movie_file + " already exists; aborting")
# Avoid overwriting an existing episodes file
if Path(args.output_episodes_file).exists():
sys.exit(args.output_episodes_file + " already exists; aborting")
# Read in the csv and process it
try:
with open(args.input_file) as csv_file, open(args.output_movie_file, 'w') as movie_json_file, open(args.output_episodes_file,'w') as episodes_json_file:
# Read each row and convert to JSON
dict_reader = csv.DictReader(csv_file)
csv_content_list = list(dict_reader)
# Create content list for movies for the media actions feed
json_movies_list = create_json_movies_list(csv_content_list)
# Generate wrapper with content for movies
movies_dict = {context_json_key: context_value, type_json_key: type_json_value,
dateModified_json_key: dateModified_json_value, dataFeed_json_key: json_movies_list}
json.dump(obj=movies_dict, fp=movie_json_file, indent=4)
# get list of unique TV series
csv_tv_series_tuple_list = get_unique_tv_series(csv_content_list)
# Create content list for TV Series for the media actions feed
json_tv_series_list = create_tv_series_list(csv_tv_series_tuple_list)
# Create content list for TV Episodes for the media actions feed
json_episodes_list = create_json_episodes_list(csv_content_list)
# Generate wrapper with content for TV episodes
episodes_dict = {context_json_key: context_value, type_json_key: type_json_value,
dateModified_json_key: dateModified_json_value, dataFeed_json_key: json_tv_series_list + json_episodes_list}
json.dump(obj=episodes_dict, fp=episodes_json_file, indent=4)
except FileNotFoundError as err:
sys.exit("File doesn't exist: {0}".format(err))
except PermissionError as err:
sys.exit("Permission error, failed to read file: (0)".format(err))
except KeyError as err:
# Remove the output files since they weren't written to successfully
os.remove(args.output_movie_file)
os.remove(args.output_episodes_file)
sys.exit("Missing key in input CSV: {0}".format(err))
print("Movies media action feed JSON written to " + args.output_movie_file)
print("TV episodes media action feed JSON written to " + args.output_episodes_file)
def create_json_movies_list(csv_content_list):
'''Takes a dict from the CSV input and returns a dict for the movies media feed output'''
json_movies_list = []
for content in csv_content_list:
if csv_type_to_media_action_type[content["type"]] != "Movie":
continue
movie_item = {}
movie_item[context_json_key] = context_value_media
movie_item[type_json_key] = csv_type_to_media_action_type[content["type"]]
for csv_key, json_key in csv_columns_to_json_keys_movie.items():
movie_item[json_key] = content[csv_key]
movie_item[potential_action_key] = create_potentialAction_object(content["uri"])
json_movies_list.append(movie_item)
return json_movies_list
def create_json_episodes_list(csv_content_list):
'''Takes a dict from the CSV input and returns a dict for the movies media feed output'''
json_episodes_list = []
for content in csv_content_list:
if csv_type_to_media_action_type[content["type"]] != "TVEpisode":
continue
episode_item = {}
episode_item[context_json_key] = context_value_media
episode_item[type_json_key] = csv_type_to_media_action_type[content["type"]]
for csv_key, json_key in csv_columns_to_json_keys_episode.items():
episode_item[json_key] = content[csv_key]
episode_item[partOfSeason_json_key] = create_partOfSeason_object(content)
episode_item[partOfSeries_json_key] = create_partOfSeries_object(content)
episode_item[potential_action_key] = create_potentialAction_object(content["uri"])
json_episodes_list.append(episode_item)
return json_episodes_list
def create_tv_series_list(tv_series_tuple_list):
tv_series_list = []
for series_tuple in tv_series_tuple_list:
series_item = {}
series_item[context_json_key] = context_value_media
series_item[type_json_key] = "TVSeries"
series_item[id_json_key] = series_tuple[tvSeriesUri_csv_key]
series_item["url"] = series_tuple[tvSeriesUri_csv_key]
series_item["name"] = series_tuple[category_csv_key]
series_item[potential_action_key] = \
create_potentialAction_object(series_tuple[tvSeriesUri_csv_key])
tv_series_list.append(series_item)
return tv_series_list
def create_partOfSeason_object(csv_content):
partOfSeason = {}
partOfSeason[type_json_key] = "TVSeason"
partOfSeason[id_json_key] = csv_content[tvSeasonUri_csv_key]
partOfSeason["seasonNumber"] = csv_content[seasonNumber_csv_key]
return partOfSeason
def create_partOfSeries_object(csv_content):
partOfSeries = {}
partOfSeries[type_json_key] = "TVSeries"
partOfSeries[id_json_key] = csv_content[tvSeriesUri_csv_key]
partOfSeries["name"] = csv_content[category_csv_key]
return partOfSeries
def create_potentialAction_object(csv_content_uri):
potentialAction = {}
potentialAction[type_json_key] = "WatchAction"
target = {}
target[type_json_key] = "EntryPoint"
target["urlTemplate"] = csv_content_uri
target["actionPlatform"] = ["http://schema.org/AndroidTVPlatform",
"http://schema.googleapis.com/GoogleVideoCast"]
potentialAction["target"] = target
accessibility_requirement = {}
accessibility_requirement[type_json_key] = "ActionAccessSpecification"
accessibility_requirement[category_csv_key] = "nologinrequired"
accessibility_requirement["availabilityStarts"] = datetime.now().replace(microsecond=0) \
.isoformat()+"Z"
"""
Since the content is always available after it's published, the availability end date property is
set to be 20 years from the current date. For further details on the properties, visit the link -
https://developers.google.com/actions/media/reference/data-specification/watch-actions-common-specification.
"""
accessibility_requirement["availabilityEnds"] = (datetime.now() + timedelta(days=(20*365))) \
.replace(microsecond=0).isoformat()+"Z"
accessibility_requirement["eligibleRegion"] = "EARTH"
potentialAction["actionAccessibilityRequirement"] = accessibility_requirement
return potentialAction
def get_unique_tv_series(csv_content_list):
unique_tv_series = []
for content in csv_content_list:
if csv_type_to_media_action_type[content["type"]] != "TVEpisode":
continue
tv_series = {tvSeriesUri_csv_key: content[tvSeriesUri_csv_key], category_csv_key: content[category_csv_key]}
if tv_series not in unique_tv_series:
unique_tv_series.append(tv_series)
return unique_tv_series
if __name__ == "__main__":
main()
|
android/tv-samples
|
ReferenceAppKotlin/scripts/atv-create-media-feed.py
|
Python
|
apache-2.0
| 9,780
|
[
"VisIt"
] |
f4ffc71d12922d6bdd15263b36ca99fe75ac49e5c2dbba61ed05a26aabba2ba5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import pytz
from datetime import timedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from temba.contacts.models import Contact, ContactField, ContactURN, TEL_SCHEME
from temba.channels.models import Channel, ChannelEvent, ChannelLog
from temba.msgs.models import Msg, ExportMessagesTask, RESENT, FAILED, OUTGOING, PENDING, WIRED, DELIVERED, ERRORED
from temba.msgs.models import Broadcast, Label, SystemLabel, UnreachableException
from temba.msgs.models import HANDLED, QUEUED, SENT, INCOMING, INBOX, FLOW
from temba.msgs.tasks import purge_broadcasts_task
from temba.orgs.models import Language
from temba.schedules.models import Schedule
from temba.tests import TembaTest, AnonymousOrg
from temba.utils import dict_to_struct, datetime_to_str
from temba.utils.expressions import get_function_listing
from temba.values.models import Value
from redis_cache import get_redis_connection
from xlrd import open_workbook
from .management.commands.msg_console import MessageConsole
from .tasks import squash_systemlabels
class MsgTest(TembaTest):
def setUp(self):
super(MsgTest, self).setUp()
self.joe = self.create_contact("Joe Blow", "123")
self.frank = self.create_contact("Frank Blow", "321")
self.kevin = self.create_contact("Kevin Durant", "987")
self.just_joe = self.create_group("Just Joe", [self.joe])
self.joe_and_frank = self.create_group("Joe and Frank", [self.joe, self.frank])
def test_archive_and_release(self):
msg1 = Msg.create_incoming(self.channel, 'tel:123', "Incoming")
label = Label.get_or_create(self.org, self.admin, "Spam")
label.toggle_label([msg1], add=True)
msg1.archive()
msg1 = Msg.all_messages.get(pk=msg1.pk)
self.assertEqual(msg1.visibility, Msg.VISIBILITY_ARCHIVED)
self.assertEqual(set(msg1.labels.all()), {label}) # don't remove labels
msg1.restore()
msg1 = Msg.all_messages.get(pk=msg1.pk)
self.assertEqual(msg1.visibility, Msg.VISIBILITY_VISIBLE)
msg1.release()
msg1 = Msg.all_messages.get(pk=msg1.pk)
self.assertEqual(msg1.visibility, Msg.VISIBILITY_DELETED)
self.assertEqual(set(msg1.labels.all()), set()) # do remove labels
self.assertTrue(Label.label_objects.filter(pk=label.pk).exists()) # though don't delete the label object
# can't archive outgoing messages
msg2 = Msg.create_outgoing(self.org, self.admin, self.joe, "Outgoing")
self.assertRaises(ValueError, msg2.archive)
def assertReleaseCount(self, direction, status, visibility, msg_type, label):
if direction == OUTGOING:
msg = Msg.create_outgoing(self.org, self.admin, self.joe, "Whattup Joe")
else:
msg = Msg.create_incoming(self.channel, "tel:+250788123123", "Hey hey")
Msg.all_messages.filter(id=msg.id).update(status=status, direction=direction,
visibility=visibility, msg_type=msg_type)
# assert our folder count is right
counts = SystemLabel.get_counts(self.org)
self.assertEqual(counts[label], 1)
# recalculate, check the count again
SystemLabel.recalculate_counts(self.org, label)
counts = SystemLabel.get_counts(self.org)
self.assertEqual(counts[label], 1)
# release the msg, count should now be 0
msg.release()
counts = SystemLabel.get_counts(self.org)
self.assertEqual(counts[label], 0)
# more recalculations
SystemLabel.recalculate_counts(self.org, label)
counts = SystemLabel.get_counts(self.org)
self.assertEqual(counts[label], 0)
def test_release_counts(self):
# outgoing labels
self.assertReleaseCount(OUTGOING, SENT, Msg.VISIBILITY_VISIBLE, INBOX, SystemLabel.TYPE_SENT)
self.assertReleaseCount(OUTGOING, QUEUED, Msg.VISIBILITY_VISIBLE, INBOX, SystemLabel.TYPE_OUTBOX)
self.assertReleaseCount(OUTGOING, FAILED, Msg.VISIBILITY_VISIBLE, INBOX, SystemLabel.TYPE_FAILED)
# incoming labels
self.assertReleaseCount(INCOMING, HANDLED, Msg.VISIBILITY_VISIBLE, INBOX, SystemLabel.TYPE_INBOX)
self.assertReleaseCount(INCOMING, HANDLED, Msg.VISIBILITY_ARCHIVED, INBOX, SystemLabel.TYPE_ARCHIVED)
self.assertReleaseCount(INCOMING, HANDLED, Msg.VISIBILITY_VISIBLE, FLOW, SystemLabel.TYPE_FLOWS)
def test_erroring(self):
# test with real message
msg = Msg.create_outgoing(self.org, self.admin, self.joe, "Test 1")
r = get_redis_connection()
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'E')
self.assertEqual(msg.error_count, 1)
self.assertIsNotNone(msg.next_attempt)
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'E')
self.assertEqual(msg.error_count, 2)
self.assertIsNotNone(msg.next_attempt)
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'F')
# test with mock message
msg = dict_to_struct('MsgStruct', Msg.create_outgoing(self.org, self.admin, self.joe, "Test 2").as_task_json())
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'E')
self.assertEqual(msg.error_count, 1)
self.assertIsNotNone(msg.next_attempt)
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'E')
self.assertEqual(msg.error_count, 2)
self.assertIsNotNone(msg.next_attempt)
Msg.mark_error(r, self.channel, msg)
msg = Msg.all_messages.get(pk=msg.id)
self.assertEqual(msg.status, 'F')
def test_send_message_auto_completion_processor(self):
outbox_url = reverse('msgs.msg_outbox')
# login in as manager, with contacts but without extra contactfields yet
self.login(self.admin)
completions = [dict(name='contact', display="Contact Name"),
dict(name='contact.first_name', display="Contact First Name"),
dict(name='contact.groups', display="Contact Groups"),
dict(name='contact.language', display="Contact Language"),
dict(name='contact.name', display="Contact Name"),
dict(name='contact.tel', display="Contact Phone"),
dict(name='contact.tel_e164', display="Contact Phone - E164"),
dict(name='contact.uuid', display="Contact UUID"),
dict(name="date", display="Current Date and Time"),
dict(name="date.now", display="Current Date and Time"),
dict(name="date.today", display="Current Date"),
dict(name="date.tomorrow", display="Tomorrow's Date"),
dict(name="date.yesterday", display="Yesterday's Date")]
response = self.client.get(outbox_url)
# check our completions JSON and functions JSON
self.assertEquals(response.context['completions'], json.dumps(completions))
self.assertEquals(response.context['function_completions'], json.dumps(get_function_listing()))
# add some contact fields
field = ContactField.get_or_create(self.org, self.admin, 'cell', "Cell")
completions.append(dict(name="contact.%s" % str(field.key), display="Contact Field: Cell"))
field = ContactField.get_or_create(self.org, self.admin, 'sector', "Sector")
completions.append(dict(name="contact.%s" % str(field.key), display="Contact Field: Sector"))
response = self.client.get(outbox_url)
# contact fields are included at the end in alphabetical order
self.assertEquals(response.context['completions'], json.dumps(completions))
def test_create_outgoing(self):
tel_urn = "tel:250788382382"
tel_contact = Contact.get_or_create(self.org, self.user, urns=[tel_urn])
tel_urn_obj = tel_contact.urn_objects[tel_urn]
twitter_urn = "twitter:joe"
twitter_contact = Contact.get_or_create(self.org, self.user, urns=[twitter_urn])
twitter_urn_obj = twitter_contact.urn_objects[twitter_urn]
# check creating by URN string
msg = Msg.create_outgoing(self.org, self.admin, tel_urn, "Extra spaces to remove ")
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
self.assertEquals(msg.text, "Extra spaces to remove") # check message text is stripped
# check creating by URN string and specific channel
msg = Msg.create_outgoing(self.org, self.admin, tel_urn, "Hello 1", channel=self.channel)
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
# try creating by URN string and specific channel with different scheme
with self.assertRaises(UnreachableException):
Msg.create_outgoing(self.org, self.admin, twitter_urn, "Hello 1", channel=self.channel)
# check creating by URN object
msg = Msg.create_outgoing(self.org, self.admin, tel_urn_obj, "Hello 1")
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
# check creating by URN object and specific channel
msg = Msg.create_outgoing(self.org, self.admin, tel_urn_obj, "Hello 1", channel=self.channel)
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
# try creating by URN object and specific channel with different scheme
with self.assertRaises(UnreachableException):
Msg.create_outgoing(self.org, self.admin, twitter_urn_obj, "Hello 1", channel=self.channel)
# check creating by contact
msg = Msg.create_outgoing(self.org, self.admin, tel_contact, "Hello 1")
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
# check creating by contact and specific channel
msg = Msg.create_outgoing(self.org, self.admin, tel_contact, "Hello 1", channel=self.channel)
self.assertEquals(msg.contact, tel_contact)
self.assertEquals(msg.contact_urn, tel_urn_obj)
# try creating by contact and specific channel with different scheme
with self.assertRaises(UnreachableException):
Msg.create_outgoing(self.org, self.admin, twitter_contact, "Hello 1", channel=self.channel)
# Can't handle outgoing messages
with self.assertRaises(ValueError):
msg.handle()
# can't create outgoing messages without org or user
with self.assertRaises(ValueError):
Msg.create_outgoing(None, self.admin, "tel:250783835665", "Hello World")
with self.assertRaises(ValueError):
Msg.create_outgoing(self.org, None, "tel:250783835665", "Hello World")
# case where the channel number is amongst contact broadcasted to
# cannot sent more than 10 same message in period of 5 minutes
for number in range(0, 10):
Msg.create_outgoing(self.org, self.admin, "tel:" + self.channel.address, 'Infinite Loop')
# now that we have 10 same messages then,
must_return_none = Msg.create_outgoing(self.org, self.admin, "tel:" + self.channel.address, 'Infinite Loop')
self.assertIsNone(must_return_none)
def test_create_incoming(self):
Msg.create_incoming(self.channel, "tel:250788382382", "It's going well")
Msg.create_incoming(self.channel, "tel:250788382382", "My name is Frank")
msg = Msg.create_incoming(self.channel, "tel:250788382382", "Yes, 3.")
self.assertEqual(msg.text, "Yes, 3.")
self.assertEqual(unicode(msg), "Yes, 3.")
# assert there are 3 unread msgs for this org
self.assertEqual(Msg.get_unread_msg_count(self.admin), 3)
# second go shouldn't hit DB
with self.assertNumQueries(0):
self.assertEqual(Msg.get_unread_msg_count(self.admin), 3)
# Can't send incoming messages
with self.assertRaises(Exception):
msg.send()
# can't create outgoing messages against an unassigned channel
unassigned_channel = Channel.create(None, self.admin, None, 'A', None, secret="67890", gcm_id="456")
with self.assertRaises(Exception):
Msg.create_incoming(unassigned_channel, "tel:250788382382", "No dice")
# test blocked contacts are skipped from inbox and are not handled by flows
contact = self.create_contact("Blocked contact", "250728739305")
contact.is_blocked = True
contact.save()
ignored_msg = Msg.create_incoming(self.channel, contact.get_urn().urn, "My msg should be archived")
ignored_msg = Msg.all_messages.get(pk=ignored_msg.pk)
self.assertEqual(ignored_msg.visibility, Msg.VISIBILITY_ARCHIVED)
self.assertEqual(ignored_msg.status, HANDLED)
# hit the inbox page, that should reset our unread count
self.login(self.admin)
self.client.get(reverse('msgs.msg_inbox'))
self.assertEqual(Msg.get_unread_msg_count(self.admin), 3)
def test_empty(self):
broadcast = Broadcast.create(self.org, self.admin, "If a broadcast is sent and nobody receives it, does it still send?", [])
broadcast.send(True)
# should have no messages but marked as sent
self.assertEquals(0, broadcast.msgs.all().count())
self.assertEquals(SENT, broadcast.status)
def test_update_contacts(self):
broadcast = Broadcast.create(self.org, self.admin, "If a broadcast is sent and nobody receives it, does it still send?", [])
# update the contacts using contact ids
broadcast.update_contacts([self.joe.id])
broadcast.refresh_from_db()
self.assertEquals(1, broadcast.recipient_count)
# send it
broadcast.send()
# assert that recipient is set
self.assertEqual(broadcast.recipients.all().count(), 1)
self.assertEqual(broadcast.recipients.all()[0], self.joe.urns.all().first())
def test_outbox(self):
self.login(self.admin)
contact = Contact.get_or_create(self.channel.org, self.admin, name=None, urns=['tel:250788382382'])
broadcast1 = Broadcast.create(self.channel.org, self.admin, 'How is it going?', [contact])
# now send the broadcast so we have messages
broadcast1.send(trigger_send=False)
(msg1,) = tuple(Msg.all_messages.filter(broadcast=broadcast1))
response = self.client.get(reverse('msgs.msg_outbox'))
self.assertContains(response, "Outbox (1)")
self.assertEqual(set(response.context_data['object_list']), {msg1})
broadcast2 = Broadcast.create(self.channel.org, self.admin, 'kLab is an awesome place for @contact.name',
[self.kevin, self.joe_and_frank])
# now send the broadcast so we have messages
broadcast2.send(trigger_send=False)
msg4, msg3, msg2 = tuple(Msg.all_messages.filter(broadcast=broadcast2))
response = self.client.get(reverse('msgs.msg_outbox'))
self.assertContains(response, "Outbox (4)")
self.assertEqual(set(response.context_data['object_list']), {msg4, msg3, msg2, msg1})
response = self.client.get("%s?search=kevin" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), {Msg.all_messages.get(contact=self.kevin)})
response = self.client.get("%s?search=joe" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), {Msg.all_messages.get(contact=self.joe)})
response = self.client.get("%s?search=frank" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), {Msg.all_messages.get(contact=self.frank)})
response = self.client.get("%s?search=just" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), set())
response = self.client.get("%s?search=is" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), {msg4, msg3, msg2, msg1})
# make sure variables that are replaced in text messages match as well
response = self.client.get("%s?search=durant" % reverse('msgs.msg_outbox'))
self.assertEqual(set(response.context_data['object_list']), {Msg.all_messages.get(contact=self.kevin)})
def do_msg_action(self, url, msgs, action, label=None, label_add=True):
post_data = dict()
post_data['action'] = action
post_data['objects'] = [m.id for m in msgs]
post_data['label'] = label.pk if label else None
post_data['add'] = label_add
return self.client.post(url, post_data, follow=True)
def test_inbox(self):
inbox_url = reverse('msgs.msg_inbox')
joe_tel = self.joe.get_urn(TEL_SCHEME).urn
msg1 = Msg.create_incoming(self.channel, joe_tel, "message number 1")
msg2 = Msg.create_incoming(self.channel, joe_tel, "message number 2")
msg3 = Msg.create_incoming(self.channel, joe_tel, "message number 3")
Msg.create_incoming(self.channel, joe_tel, "message number 4")
msg5 = Msg.create_incoming(self.channel, joe_tel, "message number 5")
msg6 = Msg.create_incoming(self.channel, joe_tel, "message number 6")
# msg6 is still pending
msg6.status = PENDING
msg6.msg_type = None
msg6.save()
# visit inbox page as a user not in the organization
self.login(self.non_org_user)
response = self.client.get(inbox_url)
self.assertEquals(302, response.status_code)
# visit inbox page as a manager of the organization
response = self.fetch_protected(inbox_url, self.admin)
self.assertEquals(response.context['object_list'].count(), 5)
self.assertEquals(response.context['folders'][0]['url'], '/msg/inbox/')
self.assertEquals(response.context['folders'][0]['count'], 5)
self.assertEquals(response.context['actions'], ['archive', 'label'])
# visit inbox page as administrator
response = self.fetch_protected(inbox_url, self.admin)
self.assertEquals(response.context['object_list'].count(), 5)
self.assertEquals(response.context['actions'], ['archive', 'label'])
# let's add some labels
folder = Label.get_or_create_folder(self.org, self.user, "folder")
label1 = Label.get_or_create(self.org, self.user, "label1", folder)
Label.get_or_create(self.org, self.user, "label2", folder)
label3 = Label.get_or_create(self.org, self.user, "label3")
# test labeling a messages
self.do_msg_action(inbox_url, [msg1, msg2], 'label', label1)
self.assertEqual(list(Msg.all_messages.filter(labels=label1)), [msg2, msg1])
# test removing a label
self.do_msg_action(inbox_url, [msg2], 'label', label1, label_add=False)
self.assertEqual(list(Msg.all_messages.filter(labels=label1)), [msg1])
# label more messages
self.do_msg_action(inbox_url, [msg1, msg2, msg3], 'label', label3)
self.assertEqual(list(Msg.all_messages.filter(labels=label1)), [msg1])
self.assertEqual(list(Msg.all_messages.filter(labels=label3)), [msg3, msg2, msg1])
# update our label name
response = self.client.get(reverse('msgs.label_update', args=[label1.pk]))
self.assertEquals(200, response.status_code)
self.assertTrue('folder' in response.context['form'].fields)
post_data = dict(name="Foo")
response = self.client.post(reverse('msgs.label_update', args=[label1.pk]), post_data)
self.assertEquals(302, response.status_code)
label1 = Label.label_objects.get(pk=label1.pk)
self.assertEquals("Foo", label1.name)
# test deleting the label
response = self.client.get(reverse('msgs.label_delete', args=[label1.pk]))
self.assertEquals(200, response.status_code)
response = self.client.post(reverse('msgs.label_delete', args=[label1.pk]))
self.assertEquals(302, response.status_code)
self.assertFalse(Label.label_objects.filter(pk=label1.id))
# shouldn't have a remove on the update page
# test archiving a msg
self.assertEqual(set(msg1.labels.all()), {label3})
post_data = dict(action='archive', objects=msg1.pk)
response = self.client.post(inbox_url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
# now one msg is archived
self.assertEqual(list(Msg.all_messages.filter(visibility=Msg.VISIBILITY_ARCHIVED)), [msg1])
# archiving doesn't remove labels
msg1 = Msg.all_messages.get(pk=msg1.pk)
self.assertEqual(set(msg1.labels.all()), {label3})
# visit the the archived messages page
archive_url = reverse('msgs.msg_archived')
# visit archived page as a user not in the organization
self.login(self.non_org_user)
response = self.client.get(archive_url)
self.assertEquals(302, response.status_code)
# visit archived page as a manager of the organization
response = self.fetch_protected(archive_url, self.admin)
self.assertEquals(response.context['object_list'].count(), 1)
self.assertEquals(response.context['actions'], ['restore', 'label', 'delete'])
# check that the inbox does not contains archived messages
# visit inbox page as a user not in the organization
self.login(self.non_org_user)
response = self.client.get(inbox_url)
self.assertEquals(302, response.status_code)
# visit inbox page as an admin of the organization
response = self.fetch_protected(inbox_url, self.admin)
self.assertEquals(response.context['object_list'].count(), 4)
self.assertEquals(response.context['actions'], ['archive', 'label'])
# test restoring an archived message back to inbox
post_data = dict(action='restore', objects=[msg1.pk])
self.client.post(inbox_url, post_data, follow=True)
self.assertEquals(Msg.all_messages.filter(visibility=Msg.VISIBILITY_ARCHIVED).count(), 0)
# messages from test contact are not included in the inbox
test_contact = Contact.get_test_contact(self.admin)
Msg.create_incoming(self.channel, test_contact.get_urn().urn, 'Bla Blah')
response = self.client.get(inbox_url)
self.assertEqual(Msg.all_messages.all().count(), 7)
self.assertEqual(response.context['object_list'].count(), 5)
# archiving a message removes it from the inbox
Msg.apply_action_archive(self.user, [msg1])
response = self.client.get(inbox_url)
self.assertEqual(response.context['object_list'].count(), 4)
# and moves it to the Archived page
response = self.client.get(archive_url)
self.assertEqual(response.context['object_list'].count(), 1)
# deleting it removes it from the Archived page
response = self.client.post(archive_url, dict(action='delete', objects=[msg1.pk]), follow=True)
self.assertEqual(response.context['object_list'].count(), 0)
# now check inbox as viewer user
response = self.fetch_protected(inbox_url, self.user)
self.assertEqual(response.context['object_list'].count(), 4)
# check that viewer user cannot label messages
post_data = dict(action='label', objects=[msg5.pk], label=label1.pk, add=True)
self.client.post(inbox_url, post_data, follow=True)
self.assertEqual(msg5.labels.all().count(), 0)
# or archive messages
self.assertEqual(Msg.all_messages.get(pk=msg5.pk).visibility, Msg.VISIBILITY_VISIBLE)
post_data = dict(action='archive', objects=[msg5.pk])
self.client.post(inbox_url, post_data, follow=True)
self.assertEqual(Msg.all_messages.get(pk=msg5.pk).visibility, Msg.VISIBILITY_VISIBLE)
# search on inbox just on the message text
response = self.client.get("%s?search=message" % inbox_url)
self.assertEqual(len(response.context_data['object_list']), 4)
response = self.client.get("%s?search=5" % inbox_url)
self.assertEqual(len(response.context_data['object_list']), 1)
# can search on contact field
response = self.client.get("%s?search=joe" % inbox_url)
self.assertEqual(len(response.context_data['object_list']), 4)
def test_flows(self):
url = reverse('msgs.msg_flow')
msg1 = Msg.create_incoming(self.channel, self.joe.get_urn().urn, "test 1", msg_type='F')
# user not in org can't access
self.login(self.non_org_user)
self.assertLoginRedirect(self.client.get(url))
# org viewer can
self.login(self.admin)
response = self.client.get(url)
self.assertEquals(set(response.context['object_list']), {msg1})
self.assertEquals(response.context['actions'], ['label'])
def test_failed(self):
failed_url = reverse('msgs.msg_failed')
msg1 = Msg.create_outgoing(self.org, self.admin, self.joe, "message number 1")
msg1.status = 'F'
msg1.save()
# create a log for it
log = ChannelLog.objects.create(channel=msg1.channel, msg=msg1, is_error=True, description="Failed")
# create broadcast and fail the only message
broadcast = Broadcast.create(self.org, self.admin, "message number 2", [self.joe])
broadcast.send(trigger_send=False)
broadcast.get_messages().update(status='F')
broadcast.update()
msg2 = broadcast.get_messages()[0]
self.assertEquals(FAILED, broadcast.status)
# message without a broadcast
msg3 = Msg.create_outgoing(self.org, self.admin, self.joe, "messsage number 3")
msg3.status = 'F'
msg3.save()
# visit fail page as a user not in the organization
self.login(self.non_org_user)
response = self.client.get(failed_url)
self.assertEquals(302, response.status_code)
# visit inbox page as an administrator
response = self.fetch_protected(failed_url, self.admin)
self.assertEquals(response.context['object_list'].count(), 3)
self.assertEquals(response.context['actions'], ['resend'])
self.assertContains(response, reverse('channels.channellog_read', args=[log.id]))
# make the org anonymous
with AnonymousOrg(self.org):
response = self.fetch_protected(failed_url, self.admin)
self.assertNotContains(response, reverse('channels.channellog_read', args=[log.id]))
# let's resend some messages
self.client.post(failed_url, dict(action='resend', objects=msg2.pk), follow=True)
# check for the resent message and the new one being resent
self.assertEqual(set(Msg.all_messages.filter(status=RESENT)), {msg2})
self.assertEqual(Msg.all_messages.filter(status=PENDING).count(), 1)
# make sure there was a new outgoing message created that got attached to our broadcast
self.assertEquals(1, broadcast.get_messages().count())
resent_msg = broadcast.get_messages()[0]
self.assertNotEquals(msg2, resent_msg)
self.assertEquals(msg2.text, resent_msg.text)
self.assertEquals(msg2.contact, resent_msg.contact)
self.assertEquals(PENDING, resent_msg.status)
@patch('temba.utils.email.send_temba_email')
def test_message_export(self, mock_send_temba_email):
self.clear_storage()
self.login(self.admin)
# create some messages...
joe_urn = self.joe.get_urn(TEL_SCHEME).urn
msg1 = Msg.create_incoming(self.channel, joe_urn, "hello 1")
msg2 = Msg.create_incoming(self.channel, joe_urn, "hello 2")
msg3 = Msg.create_incoming(self.channel, joe_urn, "hello 3")
msg4 = Msg.create_incoming(None, None, "hello 4", org=self.org, contact=self.joe) # like a surveyor message
# inbound message with media attached, such as an ivr recording
msg5 = Msg.create_incoming(self.channel, joe_urn, "Media message", media='audio:http://rapidpro.io/audio/sound.mp3')
# outgoing message
msg6 = Msg.create_outgoing(self.org, self.admin, self.joe, "Hey out 6")
msg7 = Msg.create_outgoing(self.org, self.admin, self.joe, "Hey out 7")
msg8 = Msg.create_outgoing(self.org, self.admin, self.joe, "Hey out 8")
msg9 = Msg.create_outgoing(self.org, self.admin, self.joe, "Hey out 9")
# mark msg as sent
msg6.status = SENT
msg6.save()
# mark msg as delivered
msg7.status = DELIVERED
msg7.save()
# mark msg as errored
msg8.status = ERRORED
msg8.save()
# mark message as failed
msg9.status = FAILED
msg9.save()
self.assertTrue(msg5.is_media_type_audio())
self.assertEqual('http://rapidpro.io/audio/sound.mp3', msg5.get_media_path())
# label first message
label = Label.get_or_create(self.org, self.user, "label1")
label.toggle_label([msg1], add=True)
# archive last message
msg3.visibility = Msg.VISIBILITY_ARCHIVED
msg3.save()
# create a dummy export task so that we won't be able to export
blocking_export = ExportMessagesTask.objects.create(org=self.org, host='test',
created_by=self.admin, modified_by=self.admin)
response = self.client.post(reverse('msgs.msg_export'), follow=True)
self.assertContains(response, "already an export in progress")
# perform the export manually, assert how many queries
self.assertNumQueries(7, lambda: blocking_export.do_export())
self.client.post(reverse('msgs.msg_export'))
task = ExportMessagesTask.objects.all().order_by('-id').first()
filename = "%s/test_orgs/%d/message_exports/%s.xls" % (settings.MEDIA_ROOT, self.org.pk, task.uuid)
workbook = open_workbook(filename, 'rb')
sheet = workbook.sheets()[0]
self.assertEquals(sheet.nrows, 9) # msg3 not included as it's archived
self.assertExcelRow(sheet, 0, ["Date", "Contact", "Contact Type", "Name", "Contact UUID", "Direction",
"Text", "Labels", "Status"])
self.assertExcelRow(sheet, 1,
[msg9.created_on, "123", "tel", "Joe Blow", msg9.contact.uuid, "Outgoing",
"Hey out 9", "", "Failed Sending"], pytz.UTC)
self.assertExcelRow(sheet, 2,
[msg8.created_on, "123", "tel", "Joe Blow", msg8.contact.uuid, "Outgoing",
"Hey out 8", "", "Error Sending"], pytz.UTC)
self.assertExcelRow(sheet, 3,
[msg7.created_on, "123", "tel", "Joe Blow", msg7.contact.uuid, "Outgoing",
"Hey out 7", "", "Delivered"], pytz.UTC)
self.assertExcelRow(sheet, 4,
[msg6.created_on, "123", "tel", "Joe Blow", msg6.contact.uuid, "Outgoing",
"Hey out 6", "", "Sent"], pytz.UTC)
self.assertExcelRow(sheet, 5, [msg5.created_on, "123", "tel", "Joe Blow", msg5.contact.uuid, "Incoming",
"Media message", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 6, [msg4.created_on, "", "", "Joe Blow", msg4.contact.uuid, "Incoming",
"hello 4", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 7, [msg2.created_on, "123", "tel", "Joe Blow", msg2.contact.uuid, "Incoming",
"hello 2", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 8, [msg1.created_on, "123", "tel", "Joe Blow", msg1.contact.uuid, "Incoming",
"hello 1", "label1", "Handled"], pytz.UTC)
email_args = mock_send_temba_email.call_args[0] # all positional args
self.assertEqual(email_args[0], "Your messages export is ready")
self.assertIn('https://app.rapidpro.io/assets/download/message_export/%d/' % task.pk, email_args[1])
self.assertNotIn('{{', email_args[1])
self.assertIn('https://app.rapidpro.io/assets/download/message_export/%d/' % task.pk, email_args[2])
self.assertNotIn('{{', email_args[2])
ExportMessagesTask.objects.all().delete()
# visit the filter page
response = self.client.get(reverse('msgs.msg_filter', args=[label.pk]))
self.assertContains(response, "Export Data")
self.client.post("%s?label=%s" % (reverse('msgs.msg_export'), label.pk))
task = ExportMessagesTask.objects.get()
filename = "%s/test_orgs/%d/message_exports/%s.xls" % (settings.MEDIA_ROOT, self.org.pk, task.uuid)
workbook = open_workbook(filename, 'rb')
sheet = workbook.sheets()[0]
self.assertEquals(sheet.nrows, 2) # only header and msg1
self.assertExcelRow(sheet, 1, [msg1.created_on, "123", "tel", "Joe Blow", msg1.contact.uuid, "Incoming", "hello 1", "label1", "Handled"], pytz.UTC)
ExportMessagesTask.objects.all().delete()
# test as anon org to check that URNs don't end up in exports
with AnonymousOrg(self.org):
self.client.post(reverse('msgs.msg_export'))
task = ExportMessagesTask.objects.get()
filename = "%s/test_orgs/%d/message_exports/%s.xls" % (settings.MEDIA_ROOT, self.org.pk, task.uuid)
workbook = open_workbook(filename, 'rb')
sheet = workbook.sheets()[0]
self.assertEquals(sheet.nrows, 9)
self.assertExcelRow(sheet, 1, [msg9.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg9.contact.uuid,
"Outgoing", "Hey out 9", "", "Failed Sending"], pytz.UTC)
self.assertExcelRow(sheet, 2, [msg8.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg8.contact.uuid,
"Outgoing", "Hey out 8", "", "Error Sending"], pytz.UTC)
self.assertExcelRow(sheet, 3, [msg7.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg7.contact.uuid,
"Outgoing", "Hey out 7", "", "Delivered"], pytz.UTC)
self.assertExcelRow(sheet, 4, [msg6.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg6.contact.uuid,
"Outgoing", "Hey out 6", "", "Sent"], pytz.UTC)
self.assertExcelRow(sheet, 5, [msg5.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg5.contact.uuid,
"Incoming", "Media message", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 6, [msg4.created_on, "%010d" % self.joe.pk, "", "Joe Blow", msg4.contact.uuid,
"Incoming", "hello 4", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 7, [msg2.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg2.contact.uuid,
"Incoming", "hello 2", "", "Handled"], pytz.UTC)
self.assertExcelRow(sheet, 8, [msg1.created_on, "%010d" % self.joe.pk, "tel", "Joe Blow", msg1.contact.uuid,
"Incoming", "hello 1", "label1", "Handled"], pytz.UTC)
def assertHasClass(self, text, clazz):
self.assertTrue(text.find(clazz) >= 0)
def test_templatetags(self):
from .templatetags.sms import as_icon
msg = Msg.create_outgoing(self.org, self.admin, "tel:250788382382", "How is it going?")
now = timezone.now()
two_hours_ago = now - timedelta(hours=2)
self.assertHasClass(as_icon(msg), 'icon-bubble-dots-2 green')
msg.created_on = two_hours_ago
self.assertHasClass(as_icon(msg), 'icon-bubble-dots-2 green')
msg.status = 'S'
self.assertHasClass(as_icon(msg), 'icon-bubble-right green')
msg.status = 'D'
self.assertHasClass(as_icon(msg), 'icon-bubble-check green')
msg.status = 'E'
self.assertHasClass(as_icon(msg), 'icon-bubble-notification red')
msg.direction = 'I'
self.assertHasClass(as_icon(msg), 'icon-bubble-user primary')
msg.msg_type = 'V'
self.assertHasClass(as_icon(msg), 'icon-phone')
# default cause is pending sent
self.assertHasClass(as_icon(None), 'icon-bubble-dots-2 green')
in_call = ChannelEvent.create(self.channel, self.joe.get_urn(TEL_SCHEME).urn,
ChannelEvent.TYPE_CALL_IN, timezone.now(), 5)
self.assertHasClass(as_icon(in_call), 'icon-call-incoming green')
in_miss = ChannelEvent.create(self.channel, self.joe.get_urn(TEL_SCHEME).urn,
ChannelEvent.TYPE_CALL_IN_MISSED, timezone.now(), 5)
self.assertHasClass(as_icon(in_miss), 'icon-call-incoming red')
out_call = ChannelEvent.create(self.channel, self.joe.get_urn(TEL_SCHEME).urn,
ChannelEvent.TYPE_CALL_OUT, timezone.now(), 5)
self.assertHasClass(as_icon(out_call), 'icon-call-outgoing green')
out_miss = ChannelEvent.create(self.channel, self.joe.get_urn(TEL_SCHEME).urn,
ChannelEvent.TYPE_CALL_OUT_MISSED, timezone.now(), 5)
self.assertHasClass(as_icon(out_miss), 'icon-call-outgoing red')
class MsgCRUDLTest(TembaTest):
def setUp(self):
super(MsgCRUDLTest, self).setUp()
self.joe = self.create_contact("Joe Blow", "+250788000001")
self.frank = self.create_contact("Frank Blow", "250788000002")
self.billy = self.create_contact("Billy Bob", twitter="billy_bob")
def test_filter(self):
# create some folders and labels
folder = Label.get_or_create_folder(self.org, self.user, "folder")
label1 = Label.get_or_create(self.org, self.user, "label1", folder)
label2 = Label.get_or_create(self.org, self.user, "label2", folder)
label3 = Label.get_or_create(self.org, self.user, "label3")
# create some messages
msg1 = self.create_msg(direction='I', msg_type='I', contact=self.joe, text="test1")
msg2 = self.create_msg(direction='I', msg_type='I', contact=self.frank, text="test2")
msg3 = self.create_msg(direction='I', msg_type='I', contact=self.billy, text="test3")
msg4 = self.create_msg(direction='I', msg_type='I', contact=self.joe, text="test4", visibility=Msg.VISIBILITY_ARCHIVED)
msg5 = self.create_msg(direction='I', msg_type='I', contact=self.joe, text="test5", visibility=Msg.VISIBILITY_DELETED)
msg6 = self.create_msg(direction='I', msg_type='F', contact=self.joe, text="flow test")
# apply the labels
label1.toggle_label([msg1, msg2], add=True)
label2.toggle_label([msg2, msg3], add=True)
label3.toggle_label([msg1, msg2, msg3, msg4, msg5, msg6], add=True)
# can't visit a filter page as a non-org user
self.login(self.non_org_user)
response = self.client.get(reverse('msgs.msg_filter', args=[label3.pk]))
self.assertLoginRedirect(response)
# can as org viewer user
self.login(self.user)
response = self.client.get(reverse('msgs.msg_filter', args=[label3.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['actions'], ['unlabel', 'label'])
self.assertNotContains(response, reverse('msgs.label_update', args=[label3.pk])) # can't update label
self.assertNotContains(response, reverse('msgs.label_delete', args=[label3.pk])) # can't delete label
# check that test and non-visible messages are excluded, and messages and ordered newest to oldest
self.assertEqual(list(response.context['object_list']), [msg6, msg3, msg2, msg1])
# check viewing a folder
response = self.client.get(reverse('msgs.msg_filter', args=[folder.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['actions'], ['unlabel', 'label'])
self.assertNotContains(response, reverse('msgs.label_update', args=[folder.pk])) # can't update folder
self.assertNotContains(response, reverse('msgs.label_delete', args=[folder.pk])) # can't delete folder
# messages from contained labels are rolled up without duplicates
self.assertEqual(list(response.context['object_list']), [msg3, msg2, msg1])
# search on folder by message text
response = self.client.get("%s?search=test2" % reverse('msgs.msg_filter', args=[folder.pk]))
self.assertEqual(set(response.context_data['object_list']), {msg2})
# search on label by contact name
response = self.client.get("%s?search=joe" % reverse('msgs.msg_filter', args=[label3.pk]))
self.assertEqual(set(response.context_data['object_list']), {msg1, msg6})
# check admin users see edit and delete options for labels and folders
self.login(self.admin)
response = self.client.get(reverse('msgs.msg_filter', args=[folder.pk]))
self.assertContains(response, reverse('msgs.label_update', args=[folder.pk]))
self.assertContains(response, reverse('msgs.label_delete', args=[folder.pk]))
response = self.client.get(reverse('msgs.msg_filter', args=[label1.pk]))
self.assertContains(response, reverse('msgs.label_update', args=[label1.pk]))
self.assertContains(response, reverse('msgs.label_delete', args=[label1.pk]))
class BroadcastTest(TembaTest):
def setUp(self):
super(BroadcastTest, self).setUp()
self.joe = self.create_contact("Joe Blow", "123")
self.frank = self.create_contact("Frank Blow", "321")
self.just_joe = self.create_group("Just Joe", [self.joe])
self.joe_and_frank = self.create_group("Joe and Frank", [self.joe, self.frank])
self.kevin = self.create_contact(name="Kevin Durant", number="987")
self.lucy = self.create_contact(name="Lucy M", twitter="lucy")
# a Twitter channel
self.twitter = Channel.create(self.org, self.user, None, 'TT')
def test_broadcast_batch(self):
broadcast = Broadcast.create(self.org, self.user, "Like a tweet", [self.joe_and_frank, self.kevin])
self.assertEquals(3, broadcast.recipient_count)
# change our broadcast size to 2
import temba.msgs.models as msgs_models
orig_batch_size = msgs_models.BATCH_SIZE
try:
# downsize our batches and send it (this tests other code paths)
msgs_models.BATCH_SIZE = 2
broadcast.send()
self.assertEquals(broadcast.get_message_count(), 3)
self.assertEqual(broadcast.recipients.all().count(), 3)
finally:
msgs_models.BATCH_SIZE = orig_batch_size
def test_broadcast_model(self):
def assertBroadcastStatus(sms, new_sms_status, broadcast_status):
sms.status = new_sms_status
sms.save()
sms.broadcast.update()
self.assertEquals(sms.broadcast.status, broadcast_status)
broadcast = Broadcast.create(self.org, self.user, "Like a tweet", [self.joe_and_frank, self.kevin, self.lucy])
self.assertEquals('I', broadcast.status)
self.assertEquals(4, broadcast.recipient_count)
# no recipients created yet, done when we send
self.assertEquals(0, broadcast.recipients.all().count())
broadcast.send(trigger_send=False)
self.assertEquals('Q', broadcast.status)
self.assertEquals(broadcast.get_message_count(), 4)
self.assertEqual(broadcast.recipients.all().count(), 4)
bcast_commands = broadcast.get_sync_commands(self.channel)
self.assertEquals(1, len(bcast_commands))
self.assertEquals(3, len(bcast_commands[0]['to']))
# set our single message as sent
broadcast.get_messages().update(status='S')
self.assertEquals(0, len(broadcast.get_sync_commands(self.channel)))
# back to Q
broadcast.get_messages().update(status='Q')
# after calling send, all messages are queued
self.assertEquals(broadcast.status, 'Q')
# test errored broadcast logic now that all sms status are queued
msgs = broadcast.get_messages()
assertBroadcastStatus(msgs[0], 'E', 'Q')
assertBroadcastStatus(msgs[1], 'E', 'Q')
assertBroadcastStatus(msgs[2], 'E', 'E') # now more than half are errored
assertBroadcastStatus(msgs[3], 'E', 'E')
# test failed broadcast logic now that all sms status are errored
assertBroadcastStatus(msgs[0], 'F', 'E')
assertBroadcastStatus(msgs[1], 'F', 'E')
assertBroadcastStatus(msgs[2], 'F', 'F') # now more than half are failed
assertBroadcastStatus(msgs[3], 'F', 'F')
# first make sure there are no failed messages
for msg in broadcast.get_messages():
msg.status = 'S'
msg.save()
assertBroadcastStatus(broadcast.get_messages()[0], 'Q', 'Q')
# test queued broadcast logic
# test sent broadcast logic
broadcast.get_messages().update(status='D')
assertBroadcastStatus(broadcast.get_messages()[0], 'S', 'S')
# test delivered broadcast logic
assertBroadcastStatus(broadcast.get_messages()[0], 'D', 'D')
self.assertEquals("Temba (%d)" % broadcast.id, str(broadcast))
def test_send(self):
# remove all channels first
for channel in Channel.objects.all():
channel.release(notify_mage=False)
send_url = reverse('msgs.broadcast_send')
self.login(self.admin)
# try with no channel
post_data = dict(text="some text", omnibox="c-%s" % self.joe.uuid)
response = self.client.post(send_url, post_data, follow=True)
self.assertContains(response, "You must add a phone number before sending messages", status_code=400)
# test when we are simulating
response = self.client.get(send_url + "?simulation=true")
self.assertEquals(['omnibox', 'text', 'schedule'], response.context['fields'])
test_contact = Contact.get_test_contact(self.admin)
post_data = dict(text="you simulator display this", omnibox="c-%s,c-%s,c-%s" % (self.joe.uuid, self.frank.uuid, test_contact.uuid))
self.client.post(send_url + "?simulation=true", post_data)
self.assertEquals(Broadcast.objects.all().count(), 1)
self.assertEquals(Broadcast.objects.all()[0].groups.all().count(), 0)
self.assertEquals(Broadcast.objects.all()[0].contacts.all().count(), 1)
self.assertEquals(Broadcast.objects.all()[0].contacts.all()[0], test_contact)
# delete this broadcast to keep future test right
Broadcast.objects.all()[0].delete()
# test when we have many channels
Channel.create(self.org, self.user, None, "A", secret="123456", gcm_id="1234")
Channel.create(self.org, self.user, None, "A", secret="12345", gcm_id="123")
Channel.create(self.org, self.user, None, "TT")
response = self.client.get(send_url)
self.assertEquals(['omnibox', 'text', 'schedule'], response.context['fields'])
post_data = dict(text="message #1", omnibox="g-%s,c-%s,c-%s" % (self.joe_and_frank.uuid, self.joe.uuid, self.lucy.uuid))
self.client.post(send_url, post_data, follow=True)
broadcast = Broadcast.objects.get(text="message #1")
self.assertEquals(1, broadcast.groups.count())
self.assertEquals(2, broadcast.contacts.count())
self.assertIsNotNone(Msg.all_messages.filter(contact=self.joe, text="message #1"))
self.assertIsNotNone(Msg.all_messages.filter(contact=self.frank, text="message #1"))
self.assertIsNotNone(Msg.all_messages.filter(contact=self.lucy, text="message #1"))
# test with one channel now
for channel in Channel.objects.all():
channel.release(notify_mage=False)
Channel.create(self.org, self.user, None, 'A', None, secret="12345", gcm_id="123")
response = self.client.get(send_url)
self.assertEquals(['omnibox', 'text', 'schedule'], response.context['fields'])
post_data = dict(text="message #2", omnibox='g-%s,c-%s' % (self.joe_and_frank.uuid, self.kevin.uuid))
self.client.post(send_url, post_data, follow=True)
broadcast = Broadcast.objects.get(text="message #2")
self.assertEquals(broadcast.groups.count(), 1)
self.assertEquals(broadcast.contacts.count(), 1)
# directly on user page
post_data = dict(text="contact send", from_contact=True, omnibox="c-%s" % self.kevin.uuid)
response = self.client.post(send_url, post_data)
self.assertRedirect(response, reverse('contacts.contact_read', args=[self.kevin.uuid]))
self.assertEquals(Broadcast.objects.all().count(), 3)
# test sending to an arbitrary user
post_data = dict(text="message content", omnibox='n-2065551212')
self.client.post(send_url, post_data, follow=True)
self.assertEquals(Broadcast.objects.all().count(), 4)
self.assertEquals(1, Contact.objects.filter(urns__path='2065551212').count())
# test missing senders
post_data = dict(text="message content")
response = self.client.post(send_url, post_data, follow=True)
self.assertIn("At least one recipient is required", response.content)
# Test AJAX sender
post_data = dict(text="message content", omnibox='', _format="json")
response = self.client.post(send_url, post_data, follow=True)
self.assertIn("At least one recipient is required", response.content)
self.assertEquals('application/json', response._headers.get('content-type')[1])
post_data = dict(text="this is a test message", omnibox="c-%s" % self.kevin.uuid, _format="json")
response = self.client.post(send_url, post_data, follow=True)
self.assertIn("success", response.content)
# send using our omnibox
post_data = dict(text="this is a test message", omnibox="c-%s,g-%s,n-911" % (self.kevin.pk, self.joe_and_frank.pk), _format="json")
response = self.client.post(send_url, post_data, follow=True)
self.assertIn("success", response.content)
def test_unreachable(self):
no_urns = Contact.get_or_create(self.org, self.admin, name="Ben Haggerty", urns=[])
tel_contact = self.create_contact("Ryan Lewis", number="+12067771234")
twitter_contact = self.create_contact("Lucy", twitter='lucy')
recipients = [no_urns, tel_contact, twitter_contact]
# send a broadcast to all (org has a tel and a twitter channel)
broadcast = Broadcast.create(self.org, self.admin, "Want to go thrift shopping?", recipients)
broadcast.send(True)
# should have only messages for Ryan and Lucy
msgs = broadcast.msgs.all()
self.assertEqual(len(msgs), 2)
self.assertEqual(sorted([m.contact.name for m in msgs]), ["Lucy", "Ryan Lewis"])
# send another broadcast to all and force use of the twitter channel
broadcast = Broadcast.create(self.org, self.admin, "Want to go thrift shopping?", recipients, channel=self.twitter)
broadcast.send(True)
# should have only one message created to Lucy
msgs = broadcast.msgs.all()
self.assertEqual(len(msgs), 1)
self.assertTrue(msgs[0].contact, twitter_contact)
# remove twitter relayer
self.twitter.release(trigger_sync=False, notify_mage=False)
# send another broadcast to all
broadcast = Broadcast.create(self.org, self.admin, "Want to go thrift shopping?", recipients)
broadcast.send(True)
# should have only one message created to Ryan
msgs = broadcast.msgs.all()
self.assertEqual(len(msgs), 1)
self.assertTrue(msgs[0].contact, tel_contact)
def test_message_parts(self):
contact = self.create_contact("Matt", "+12067778811")
sms = self.create_msg(contact=contact, text="Text", direction=OUTGOING)
self.assertEquals(["Text"], Msg.get_text_parts(sms.text))
sms.text = ""
self.assertEquals([""], Msg.get_text_parts(sms.text))
# 160 chars
sms.text = "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"
self.assertEquals(1, len(Msg.get_text_parts(sms.text)))
# 161 characters with space
sms.text = "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 1234567890"
parts = Msg.get_text_parts(sms.text)
self.assertEquals(2, len(parts))
self.assertEquals(150, len(parts[0]))
self.assertEquals(10, len(parts[1]))
# 161 characters without space
sms.text = "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901"
parts = Msg.get_text_parts(sms.text)
self.assertEquals(2, len(parts))
self.assertEquals(160, len(parts[0]))
self.assertEquals(1, len(parts[1]))
# 160 characters with max length 40
sms.text = "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"
parts = Msg.get_text_parts(sms.text, max_length=40)
self.assertEquals(4, len(parts))
self.assertEquals(40, len(parts[0]))
self.assertEquals(40, len(parts[1]))
self.assertEquals(40, len(parts[2]))
self.assertEquals(40, len(parts[3]))
def test_substitute_variables(self):
ContactField.get_or_create(self.org, self.admin, 'goats', "Goats", False, Value.TYPE_DECIMAL)
self.joe.set_field(self.user, 'goats', "3 ")
ContactField.get_or_create(self.org, self.admin, 'dob', "Date of birth", False, Value.TYPE_DATETIME)
self.joe.set_field(self.user, 'dob', "28/5/1981")
self.assertEquals(("Hello World", []), Msg.substitute_variables("Hello World", self.joe, dict()))
self.assertEquals(("Hello World Joe", []), Msg.substitute_variables("Hello World @contact.first_name", self.joe, dict()))
self.assertEquals(("Hello World Joe Blow", []), Msg.substitute_variables("Hello World @contact", self.joe, dict()))
self.assertEquals(("Hello World: Well", []), Msg.substitute_variables("Hello World: @flow.water_source", self.joe, dict(flow=dict(water_source="Well"))))
self.assertEquals(("Hello World: Well Boil: @flow.boil", ["Undefined variable: flow.boil"]), Msg.substitute_variables("Hello World: @flow.water_source Boil: @flow.boil", self.joe, dict(flow=dict(water_source="Well"))))
self.assertEquals(("Hello joe", []), Msg.substitute_variables("Hello @(LOWER(contact.first_name))", self.joe, dict()))
self.assertEquals(("Hello Joe", []), Msg.substitute_variables("Hello @(PROPER(LOWER(contact.first_name)))", self.joe, dict()))
self.assertEquals(("Hello Joe", []), Msg.substitute_variables("Hello @(first_word(contact))", self.joe, dict()))
self.assertEquals(("Hello Blow", []), Msg.substitute_variables("Hello @(Proper(remove_first_word(contact)))", self.joe, dict()))
self.assertEquals(("Hello Joe Blow", []), Msg.substitute_variables("Hello @(PROPER(contact))", self.joe, dict()))
self.assertEquals(("Hello JOE", []), Msg.substitute_variables("Hello @(UPPER(contact.first_name))", self.joe, dict()))
self.assertEquals(("Hello 3", []), Msg.substitute_variables("Hello @(contact.goats)", self.joe, dict()))
self.assertEquals(("Email is: foo@bar.com", []),
Msg.substitute_variables("Email is: @(remove_first_word(flow.sms))", self.joe, dict(flow=dict(sms="Join foo@bar.com"))))
self.assertEquals(("Email is: foo@@bar.com", []),
Msg.substitute_variables("Email is: @(remove_first_word(flow.sms))", self.joe, dict(flow=dict(sms="Join foo@@bar.com"))))
# check date variables
text, errors = Msg.substitute_variables("Today is @date.today", self.joe, dict())
self.assertEquals(errors, [])
self.assertRegexpMatches(text, "Today is \d\d-\d\d-\d\d\d\d")
text, errors = Msg.substitute_variables("Today is @date.now", self.joe, dict())
self.assertEquals(errors, [])
self.assertRegexpMatches(text, "Today is \d\d-\d\d-\d\d\d\d \d\d:\d\d")
text, errors = Msg.substitute_variables("Your DOB is @contact.dob", self.joe, dict())
self.assertEquals(errors, [])
# TODO clearly this is not ideal but unavoidable for now as we always add current time to parsed dates
self.assertRegexpMatches(text, "Your DOB is 28-05-1981 \d\d:\d\d")
# unicode tests
self.joe.name = u"شاملیدل عمومی"
self.joe.save()
self.assertEquals((u"شاملیدل", []), Msg.substitute_variables("@(first_word(contact))", self.joe, dict()))
self.assertEquals((u"عمومی", []), Msg.substitute_variables("@(proper(remove_first_word(contact)))", self.joe, dict()))
# credit card
self.joe.name = '1234567890123456'
self.joe.save()
self.assertEquals(("1 2 3 4 , 5 6 7 8 , 9 0 1 2 , 3 4 5 6", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
# phone number
self.joe.name = '123456789012'
self.joe.save()
self.assertEquals(("1 2 3 , 4 5 6 , 7 8 9 , 0 1 2", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
# triplets
self.joe.name = '123456'
self.joe.save()
self.assertEquals(("1 2 3 , 4 5 6", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
# soc security
self.joe.name = '123456789'
self.joe.save()
self.assertEquals(("1 2 3 , 4 5 , 6 7 8 9", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
# regular number, street address, etc
self.joe.name = '12345'
self.joe.save()
self.assertEquals(("1,2,3,4,5", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
# regular number, street address, etc
self.joe.name = '123'
self.joe.save()
self.assertEquals(("1,2,3", []), Msg.substitute_variables("@(read_digits(contact))", self.joe, dict()))
def test_message_context(self):
ContactField.get_or_create(self.org, self.admin, "superhero_name", "Superhero Name")
self.joe.send("keyword remainder-remainder", self.admin)
self.joe.set_field(self.user, 'superhero_name', 'batman')
self.joe.save()
msg = Msg.all_messages.get()
context = msg.build_message_context()
self.assertEqual(context['__default__'], "keyword remainder-remainder")
self.assertEqual(context['value'], "keyword remainder-remainder")
self.assertEqual(context['contact']['__default__'], "Joe Blow")
self.assertEqual(context['contact']['superhero_name'], "batman")
# time should be in org format and timezone
msg_time = datetime_to_str(msg.created_on, '%d-%m-%Y %H:%M', tz=pytz.timezone(self.org.timezone))
self.assertEqual(msg_time, context['time'])
def test_variables_substitution(self):
ContactField.get_or_create(self.org, self.admin, "sector", "sector")
ContactField.get_or_create(self.org, self.admin, "team", "team")
self.joe.set_field(self.user, "sector", "Kacyiru")
self.frank.set_field(self.user, "sector", "Remera")
self.kevin.set_field(self.user, "sector", "Kanombe")
self.joe.set_field(self.user, "team", "Amavubi")
self.kevin.set_field(self.user, "team", "Junior")
self.broadcast = Broadcast.create(self.org, self.user,
"Hi @contact.name, You live in @contact.sector and your team is @contact.team.",
[self.joe_and_frank, self.kevin])
self.broadcast.send(trigger_send=False)
# there should be three broadcast objects
broadcast_groups = self.broadcast.get_sync_commands(self.channel)
self.assertEquals(3, len(broadcast_groups))
# no message created for Frank because he misses some fields for variables substitution
self.assertEquals(Msg.all_messages.all().count(), 3)
sms_to_joe = Msg.all_messages.get(contact=self.joe)
sms_to_frank = Msg.all_messages.get(contact=self.frank)
sms_to_kevin = Msg.all_messages.get(contact=self.kevin)
self.assertEquals(sms_to_joe.text, 'Hi Joe Blow, You live in Kacyiru and your team is Amavubi.')
self.assertFalse(sms_to_joe.has_template_error)
self.assertEquals(sms_to_frank.text, 'Hi Frank Blow, You live in Remera and your team is .')
self.assertFalse(sms_to_frank.has_template_error)
self.assertEquals(sms_to_kevin.text, 'Hi Kevin Durant, You live in Kanombe and your team is Junior.')
self.assertFalse(sms_to_kevin.has_template_error)
def test_purge(self):
broadcast = Broadcast.create(self.org, self.user, "I think I'm going to purge",
[self.joe_and_frank, self.kevin, self.lucy])
broadcast.send(trigger_send=False)
broadcast.created_on = timezone.now() - timedelta(days=100)
broadcast.save()
purge_broadcasts_task()
broadcast.refresh_from_db()
self.assertTrue(broadcast.purged)
self.assertEquals(4, len(broadcast.msgs.filter(purged=True)))
class BroadcastCRUDLTest(TembaTest):
def setUp(self):
super(BroadcastCRUDLTest, self).setUp()
self.joe = Contact.get_or_create(self.org, self.user, name="Joe Blow", urns=["tel:123"])
self.frank = Contact.get_or_create(self.org, self.user, name="Frank Blow", urns=["tel:1234"])
def test_send(self):
url = reverse('msgs.broadcast_send')
# can't send if you're not logged in
response = self.client.post(url, dict(text="Test", omnibox="c-%s" % self.joe.uuid))
self.assertLoginRedirect(response)
# or just a viewer user
self.login(self.user)
response = self.client.post(url, dict(text="Test", omnibox="c-%s" % self.joe.uuid))
self.assertLoginRedirect(response)
# but editors can
self.login(self.editor)
just_joe = self.create_group("Just Joe")
just_joe.contacts.add(self.joe)
post_data = dict(omnibox="g-%s,c-%s,n-0780000001" % (just_joe.uuid, self.frank.uuid),
text="Hey Joe, where you goin' with that gun in your hand?")
response = self.client.post(url + '?_format=json', post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['status'], 'success')
# raw number means a new contact created
new_urn = ContactURN.objects.get(path='+250780000001')
Contact.objects.get(urns=new_urn)
broadcast = Broadcast.objects.get()
self.assertEqual(broadcast.text, "Hey Joe, where you goin' with that gun in your hand?")
self.assertEqual(set(broadcast.groups.all()), {just_joe})
self.assertEqual(set(broadcast.contacts.all()), {self.frank})
self.assertEqual(set(broadcast.urns.all()), {new_urn})
def test_update(self):
self.login(self.editor)
self.client.post(reverse('msgs.broadcast_send'), dict(omnibox="c-%s" % self.joe.uuid,
text="Lunch reminder", schedule=True))
broadcast = Broadcast.objects.get()
url = reverse('msgs.broadcast_update', args=[broadcast.pk])
response = self.client.get(url)
self.assertEqual(response.context['form'].fields.keys(), ['message', 'omnibox', 'loc'])
response = self.client.post(url, dict(message="Dinner reminder", omnibox="c-%s" % self.frank.uuid))
self.assertEqual(response.status_code, 302)
broadcast = Broadcast.objects.get()
self.assertEqual(broadcast.text, "Dinner reminder")
self.assertEqual(set(broadcast.contacts.all()), {self.frank})
def test_schedule_list(self):
url = reverse('msgs.broadcast_schedule_list')
# can't view if you're not logged in
response = self.client.get(url)
self.assertLoginRedirect(response)
self.login(self.editor)
# send some messages - one immediately, one scheduled
self.client.post(reverse('msgs.broadcast_send'), dict(omnibox="c-%s" % self.joe.uuid,
text="See you later"))
self.client.post(reverse('msgs.broadcast_send'), dict(omnibox="c-%s" % self.joe.uuid,
text="Lunch reminder", schedule=True))
scheduled = Broadcast.objects.exclude(schedule=None).first()
response = self.client.get(url)
self.assertEqual(set(response.context['object_list']), {scheduled})
def test_schedule_read(self):
self.login(self.editor)
self.client.post(reverse('msgs.broadcast_send'), dict(omnibox="c-%s" % self.joe.uuid,
text="Lunch reminder", schedule=True))
broadcast = Broadcast.objects.get()
# view with empty Send History
response = self.client.get(reverse('msgs.broadcast_schedule_read', args=[broadcast.pk]))
self.assertEqual(response.context['object'], broadcast)
self.assertEqual(response.context['object_list'].count(), 0)
broadcast.fire()
# view again with 1 item in Send History
response = self.client.get(reverse('msgs.broadcast_schedule_read', args=[broadcast.pk]))
self.assertEqual(response.context['object'], broadcast)
self.assertEqual(response.context['object_list'].count(), 1)
class LabelTest(TembaTest):
def setUp(self):
super(LabelTest, self).setUp()
self.joe = self.create_contact("Joe Blow", number="073835001")
self.frank = self.create_contact("Frank", number="073835002")
def test_get_or_create(self):
label1 = Label.get_or_create(self.org, self.user, "Spam")
self.assertEqual(label1.name, "Spam")
self.assertIsNone(label1.folder)
followup = Label.get_or_create_folder(self.org, self.user, "Follow up")
label2 = Label.get_or_create(self.org, self.user, "Complaints", followup)
self.assertEqual(label2.name, "Complaints")
self.assertEqual(label2.folder, followup)
# don't allow invalid name
self.assertRaises(ValueError, Label.get_or_create, self.org, self.user, "+Important")
def test_is_valid_name(self):
self.assertTrue(Label.is_valid_name('x'))
self.assertTrue(Label.is_valid_name('1'))
self.assertTrue(Label.is_valid_name('x' * 64))
self.assertFalse(Label.is_valid_name(' '))
self.assertFalse(Label.is_valid_name(' x'))
self.assertFalse(Label.is_valid_name('x '))
self.assertFalse(Label.is_valid_name('+x'))
self.assertFalse(Label.is_valid_name('@x'))
self.assertFalse(Label.is_valid_name('x' * 65))
def test_toggle_label(self):
label = Label.get_or_create(self.org, self.user, "Spam")
msg1 = self.create_msg(text="Message 1", contact=self.joe, direction='I')
msg2 = self.create_msg(text="Message 2", contact=self.joe, direction='I')
msg3 = self.create_msg(text="Message 3", contact=self.joe, direction='I')
self.assertEqual(label.get_visible_count(), 0)
label.toggle_label([msg1, msg2, msg3], add=True) # add label to 3 messages
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 3)
self.assertEqual(set(label.get_messages()), {msg1, msg2, msg3})
label.toggle_label([msg3], add=False) # remove label from a message
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 2)
self.assertEqual(set(label.get_messages()), {msg1, msg2})
msg2.archive() # won't remove label from msg, but msg no longer counts toward visible count
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 1)
self.assertEqual(set(label.get_messages()), {msg1, msg2})
msg2.restore() # msg back in visible count
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 2)
self.assertEqual(set(label.get_messages()), {msg1, msg2})
msg2.release() # removes label message no longer visible
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 1)
self.assertEqual(set(label.get_messages()), {msg1})
msg3.archive()
label.toggle_label([msg3], add=True) # labelling an already archived message doesn't increment the count
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 1)
self.assertEqual(set(label.get_messages()), {msg1, msg3})
msg3.restore() # but then restoring that message will
label = Label.label_objects.get(pk=label.pk)
self.assertEqual(label.get_visible_count(), 2)
self.assertEqual(set(label.get_messages()), {msg1, msg3})
# can't label test messages
msg4 = self.create_msg(text="Message", contact=Contact.get_test_contact(self.user), direction='I')
self.assertRaises(ValueError, label.toggle_label, [msg4], add=True)
# can't label outgoing messages
msg5 = self.create_msg(text="Message", contact=self.joe, direction='O')
self.assertRaises(ValueError, label.toggle_label, [msg5], add=True)
# can't get a count of a folder
folder = Label.get_or_create_folder(self.org, self.user, "Folder")
self.assertRaises(ValueError, folder.get_visible_count)
def test_get_messages_and_hierarchy(self):
folder1 = Label.get_or_create_folder(self.org, self.user, "Sorted")
folder2 = Label.get_or_create_folder(self.org, self.user, "Todo")
label1 = Label.get_or_create(self.org, self.user, "Spam", folder1)
label2 = Label.get_or_create(self.org, self.user, "Social", folder1)
label3 = Label.get_or_create(self.org, self.user, "Other")
msg1 = self.create_msg(text="Message 1", contact=self.joe, direction='I')
msg2 = self.create_msg(text="Message 2", contact=self.joe, direction='I')
msg3 = self.create_msg(text="Message 3", contact=self.joe, direction='I')
label1.toggle_label([msg1, msg2], add=True)
label2.toggle_label([msg2, msg3], add=True)
label3.toggle_label([msg3], add=True)
self.assertEqual(set(folder1.get_messages()), {msg1, msg2, msg3})
self.assertEqual(set(folder2.get_messages()), set())
self.assertEqual(set(label1.get_messages()), {msg1, msg2})
self.assertEqual(set(label2.get_messages()), {msg2, msg3})
self.assertEqual(set(label3.get_messages()), {msg3})
with self.assertNumQueries(2):
hierarchy = Label.get_hierarchy(self.org)
self.assertEqual(list(hierarchy), [label3, folder1, folder2])
self.assertEqual(list(hierarchy[1].children.all()), [label2, label1])
def test_delete_folder(self):
folder1 = Label.get_or_create_folder(self.org, self.user, "Folder")
label1 = Label.get_or_create(self.org, self.user, "Spam", folder1)
label2 = Label.get_or_create(self.org, self.user, "Social", folder1)
label3 = Label.get_or_create(self.org, self.user, "Other")
msg1 = self.create_msg(text="Message 1", contact=self.joe, direction='I')
msg2 = self.create_msg(text="Message 2", contact=self.joe, direction='I')
msg3 = self.create_msg(text="Message 3", contact=self.joe, direction='I')
label1.toggle_label([msg1, msg2], add=True)
label2.toggle_label([msg1], add=True)
label3.toggle_label([msg3], add=True)
folder1.delete()
self.assertFalse(Label.all_objects.filter(pk=folder1.pk).exists())
# check that contained labels are also deleted
self.assertEqual(Label.all_objects.filter(pk__in=[label1.pk, label2.pk]).count(), 0)
self.assertEqual(set(Msg.all_messages.get(pk=msg1.pk).labels.all()), set())
self.assertEqual(set(Msg.all_messages.get(pk=msg2.pk).labels.all()), set())
self.assertEqual(set(Msg.all_messages.get(pk=msg3.pk).labels.all()), {label3})
label3.delete()
self.assertFalse(Label.all_objects.filter(pk=label3.pk).exists())
self.assertEqual(set(Msg.all_messages.get(pk=msg3.pk).labels.all()), set())
class LabelCRUDLTest(TembaTest):
def test_create_and_update(self):
create_label_url = reverse('msgs.label_create')
create_folder_url = reverse('msgs.label_create_folder')
self.login(self.admin)
# try to create label with invalid name
response = self.client.post(create_label_url, dict(name="+label_one"))
self.assertFormError(response, 'form', 'name', "Name must not be blank or begin with punctuation")
# try again with valid name
self.client.post(create_label_url, dict(name="label_one"), follow=True)
label_one = Label.label_objects.get()
self.assertEqual(label_one.name, "label_one")
self.assertIsNone(label_one.folder)
# check that we can't create another with same name
response = self.client.post(create_label_url, dict(name="label_one"))
self.assertFormError(response, 'form', 'name', "Name must be unique")
# create a folder
self.client.post(create_folder_url, dict(name="Folder"), follow=True)
folder = Label.folder_objects.get(name="Folder")
# and a label in it
self.client.post(create_label_url, dict(name="label_two", folder=folder.pk), follow=True)
label_two = Label.label_objects.get(name="label_two")
self.assertEqual(label_two.folder, folder)
# update label one
self.client.post(reverse('msgs.label_update', args=[label_one.pk]), dict(name="label_1"))
label_one = Label.label_objects.get(pk=label_one.pk)
self.assertEqual(label_one.name, "label_1")
self.assertIsNone(label_one.folder)
# try to update to invalid label name
response = self.client.post(reverse('msgs.label_update', args=[label_one.pk]), dict(name="+label_1"))
self.assertFormError(response, 'form', 'name', "Name must not be blank or begin with punctuation")
def test_label_delete(self):
label_one = Label.get_or_create(self.org, self.user, "label1")
delete_url = reverse('msgs.label_delete', args=[label_one.pk])
self.login(self.user)
response = self.client.get(delete_url)
self.assertEquals(response.status_code, 302)
self.login(self.admin)
response = self.client.get(delete_url)
self.assertEquals(response.status_code, 200)
def test_list(self):
folder = Label.get_or_create_folder(self.org, self.user, "Folder")
Label.get_or_create(self.org, self.user, "Spam", folder=folder)
Label.get_or_create(self.org, self.user, "Junk", folder=folder)
Label.get_or_create(self.org, self.user, "Important")
self.create_secondary_org()
Label.get_or_create(self.org2, self.admin2, "Other Org")
# viewers can't edit flows so don't have access to this JSON endpoint as that's only place it's used
self.login(self.user)
response = self.client.get(reverse('msgs.label_list'))
self.assertLoginRedirect(response)
# editors can though
self.login(self.editor)
response = self.client.get(reverse('msgs.label_list'))
results = json.loads(response.content)
# results should be A-Z and not include folders or labels from other orgs
self.assertEqual(len(results), 3)
self.assertEqual(results[0]['text'], "Important")
self.assertEqual(results[1]['text'], "Junk")
self.assertEqual(results[2]['text'], "Spam")
class ScheduleTest(TembaTest):
def tearDown(self):
from temba.channels import models as channel_models
channel_models.SEND_QUEUE_DEPTH = 500
channel_models.SEND_BATCH_SIZE = 100
Broadcast.BULK_THRESHOLD = 50
def test_batch(self):
Broadcast.BULK_THRESHOLD = 10
# broadcast out to 11 contacts to test our batching
contacts = []
for i in range(1, 12):
contacts.append(self.create_contact("Contact %d" % i, "+250788123%d" % i))
batch_group = self.create_group("Batch Group", contacts)
# create our broadcast
broadcast = Broadcast.create(self.org, self.admin, 'Many message but only 5 batches.', [batch_group])
self.channel.channel_type = 'EX'
self.channel.save()
# create our messages, but don't sync
broadcast.send(trigger_send=False)
# get one of our messages, should be at bulk priority since it was in a broadcast over our bulk threshold
sms = broadcast.get_messages()[0]
self.assertEqual(sms.priority, Msg.PRIORITY_BULK)
# we should now have 11 messages pending
self.assertEquals(11, Msg.all_messages.filter(channel=self.channel, status=PENDING).count())
# let's trigger a sending of the messages
self.org.trigger_send()
# we should now have 11 messages that have sent
self.assertEquals(11, Msg.all_messages.filter(channel=self.channel, status=WIRED).count())
class ConsoleTest(TembaTest):
def setUp(self):
from temba.triggers.models import Trigger
super(ConsoleTest, self).setUp()
self.create_secondary_org()
# create a new console
self.console = MessageConsole(self.org, "tel:+250788123123")
# a few test contacts
self.john = self.create_contact("John Doe", "0788123123")
# create a flow and set "color" as its trigger
self.flow = self.create_flow()
Trigger.objects.create(flow=self.flow, keyword="color", created_by=self.admin, modified_by=self.admin, org=self.org)
def assertEchoed(self, needle, clear=True):
found = False
for line in self.console.echoed:
if line.find(needle) >= 0:
found = True
self.assertTrue(found, "Did not find '%s' in '%s'" % (needle, ", ".join(self.console.echoed)))
if clear:
self.console.clear_echoed()
def test_msg_console(self):
# make sure our org is properly set
self.assertEquals(self.console.org, self.org)
# try changing it with something empty
self.console.do_org("")
self.assertEchoed("Select org", clear=False)
self.assertEchoed("Temba")
# shouldn't have changed current org
self.assertEquals(self.console.org, self.org)
# try changing entirely
self.console.do_org("%d" % self.org2.id)
self.assertEchoed("You are now sending messages for Trileet Inc.")
self.assertEquals(self.console.org, self.org2)
self.assertEquals(self.console.contact.org, self.org2)
# back to temba
self.console.do_org("%d" % self.org.id)
self.assertEquals(self.console.org, self.org)
self.assertEquals(self.console.contact.org, self.org)
# contact help
self.console.do_contact("")
self.assertEchoed("Set contact by")
# switch our contact
self.console.do_contact("0788123123")
self.assertEchoed("You are now sending as John")
self.assertEquals(self.console.contact, self.john)
# send a message
self.console.default("Hello World")
self.assertEchoed("Hello World")
# make sure the message was created for our contact and handled
msg = Msg.all_messages.get()
self.assertEquals(msg.text, "Hello World")
self.assertEquals(msg.contact, self.john)
self.assertEquals(msg.status, HANDLED)
# now trigger a flow
self.console.default("Color")
self.assertEchoed("What is your favorite color?")
class BroadcastLanguageTest(TembaTest):
def setUp(self):
super(BroadcastLanguageTest, self).setUp()
self.francois = self.create_contact('Francois', '+12065551213')
self.francois.language = 'fre'
self.francois.save()
self.greg = self.create_contact('Greg', '+12065551212')
self.wilbert = self.create_contact('Wilbert', '+12065551214')
self.wilbert.language = 'fre'
self.wilbert.save()
def test_multiple_language_broadcast(self):
# set up our org to have a few different languages
eng = Language.create(self.org, self.admin, "English", 'eng')
Language.create(self.org, self.admin, "French", 'fre')
self.org.primary_language = eng
self.org.save()
eng_msg = "This is my message"
fre_msg = "Ceci est mon message"
# now create a broadcast with a couple contacts, one with an explicit language, the other not
bcast = Broadcast.create(self.org, self.admin, "This is my new message",
[self.francois, self.greg, self.wilbert],
language_dict=json.dumps(dict(eng=eng_msg, fre=fre_msg)))
bcast.send()
# assert the right language was used for each contact
self.assertEquals(fre_msg, Msg.all_messages.get(contact=self.francois).text)
self.assertEquals(eng_msg, Msg.all_messages.get(contact=self.greg).text)
self.assertEquals(fre_msg, Msg.all_messages.get(contact=self.wilbert).text)
class SystemLabelTest(TembaTest):
def test_get_counts(self):
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 0, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 0, SystemLabel.TYPE_OUTBOX: 0,
SystemLabel.TYPE_SENT: 0, SystemLabel.TYPE_FAILED: 0,
SystemLabel.TYPE_SCHEDULED: 0, SystemLabel.TYPE_CALLS: 0})
contact1 = self.create_contact("Bob", number="0783835001")
contact2 = self.create_contact("Jim", number="0783835002")
msg1 = Msg.create_incoming(self.channel, "tel:0783835001", text="Message 1")
Msg.create_incoming(self.channel, "tel:0783835001", text="Message 2")
msg3 = Msg.create_incoming(self.channel, "tel:0783835001", text="Message 3")
msg4 = Msg.create_incoming(self.channel, "tel:0783835001", text="Message 4")
call1 = ChannelEvent.create(self.channel, "tel:0783835001", ChannelEvent.TYPE_CALL_IN, timezone.now(), 10)
bcast1 = Broadcast.create(self.org, self.user, "Broadcast 1", [contact1, contact2])
Broadcast.create(self.org, self.user, "Broadcast 2", [contact1, contact2],
schedule=Schedule.create_schedule(timezone.now(), 'D', self.user))
# create a broadcast with a test contact to make sure they aren't included
test_bcast = Broadcast.create(self.org, self.user, "Test Broadcast", [Contact.get_test_contact(self.admin)])
# this will create some test outgoing messages as well
test_bcast.send()
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 4, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 0, SystemLabel.TYPE_OUTBOX: 0,
SystemLabel.TYPE_SENT: 0, SystemLabel.TYPE_FAILED: 0,
SystemLabel.TYPE_SCHEDULED: 1, SystemLabel.TYPE_CALLS: 1})
msg3.archive()
bcast1.send(status=QUEUED)
msg5, msg6 = tuple(Msg.all_messages.filter(broadcast=bcast1))
ChannelEvent.create(self.channel, "tel:0783835002", ChannelEvent.TYPE_CALL_IN, timezone.now(), 10)
Broadcast.create(self.org, self.user, "Broadcast 3", [contact1],
schedule=Schedule.create_schedule(timezone.now(), 'W', self.user))
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 3, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 1, SystemLabel.TYPE_OUTBOX: 2,
SystemLabel.TYPE_SENT: 0, SystemLabel.TYPE_FAILED: 0,
SystemLabel.TYPE_SCHEDULED: 2, SystemLabel.TYPE_CALLS: 2})
msg1.archive()
msg3.release() # deleting an archived msg
msg4.release() # deleting a visible msg
msg5.fail()
msg6.status_sent()
call1.release()
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 1, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 1, SystemLabel.TYPE_OUTBOX: 0,
SystemLabel.TYPE_SENT: 1, SystemLabel.TYPE_FAILED: 1,
SystemLabel.TYPE_SCHEDULED: 2, SystemLabel.TYPE_CALLS: 1})
msg1.restore()
msg3.release() # already released
msg5.fail() # already failed
msg6.status_delivered()
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 2, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 0, SystemLabel.TYPE_OUTBOX: 0,
SystemLabel.TYPE_SENT: 1, SystemLabel.TYPE_FAILED: 1,
SystemLabel.TYPE_SCHEDULED: 2, SystemLabel.TYPE_CALLS: 1})
msg5.resend()
self.assertTrue(SystemLabel.objects.all().count() > 8)
# squash our counts
squash_systemlabels()
self.assertEqual(SystemLabel.get_counts(self.org), {SystemLabel.TYPE_INBOX: 2, SystemLabel.TYPE_FLOWS: 0,
SystemLabel.TYPE_ARCHIVED: 0, SystemLabel.TYPE_OUTBOX: 1,
SystemLabel.TYPE_SENT: 1, SystemLabel.TYPE_FAILED: 0,
SystemLabel.TYPE_SCHEDULED: 2, SystemLabel.TYPE_CALLS: 1})
# we should only have one system label per type
self.assertEqual(SystemLabel.objects.all().count(), 8)
|
ewheeler/rapidpro
|
temba/msgs/tests.py
|
Python
|
agpl-3.0
| 88,157
|
[
"VisIt"
] |
3b6e566818bd105aa0fecba9f46a7ba1ad4d762c7259429dcd3c60257c8a4cf1
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
from functools import reduce
import numpy
from pyscf import lib
from pyscf.fci import cistring
from pyscf.fci import rdm
from pyscf.fci.addons import _unpack_nelec
librdm = lib.load_library('libfci')
######################################################
# Spin squared operator
######################################################
# S^2 = (S+ * S- + S- * S+)/2 + Sz * Sz
# S+ = \sum_i S_i+ ~ effective for all beta occupied orbitals.
# S- = \sum_i S_i- ~ effective for all alpha occupied orbitals.
# There are two cases for S+*S-
# 1) same electron \sum_i s_i+*s_i-, <CI|s_i+*s_i-|CI> gives
# <p|s+s-|q> \gammalpha_qp = trace(\gammalpha) = neleca
# 2) different electrons for \sum s_i+*s_j- (i\neq j, n*(n-1) terms)
# As a two-particle operator S+*S-
# = <ij|s+s-|kl>Gamma_{ik,jl} = <iajb|s+s-|kbla>Gamma_{iakb,jbla}
# = <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
# <CI|S+*S-|CI> = neleca + <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
#
# There are two cases for S-*S+
# 1) same electron \sum_i s_i-*s_i+
# <p|s+s-|q> \gammabeta_qp = trace(\gammabeta) = nelecb
# 2) different electrons
# = <ij|s-s+|kl>Gamma_{ik,jl} = <ibja|s-s+|kalb>Gamma_{ibka,jalb}
# = <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
# <CI|S-*S+|CI> = nelecb + <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
#
# Sz*Sz = Msz^2 = (neleca-nelecb)^2
# 1) same electron
# <p|ss|q>\gamma_qp = <p|q>\gamma_qp = (neleca+nelecb)/4
# 2) different electrons
# <ij|2s1s2|kl>Gamma_{ik,jl}/2
# =(<ia|ka><ja|la>Gamma_{iaka,jala} - <ia|ka><jb|lb>Gamma_{iaka,jblb}
# - <ib|kb><ja|la>Gamma_{ibkb,jala} + <ib|kb><jb|lb>Gamma_{ibkb,jblb})/4
# set aolst for local spin expectation value, which is defined as
# <CI|ao><ao|S^2|CI>
# For a complete list of AOs, I = \sum |ao><ao|, it becomes <CI|S^2|CI>
def spin_square_general(dm1a, dm1b, dm2aa, dm2ab, dm2bb, mo_coeff, ovlp=1):
r'''General spin square operator.
... math::
<CI|S_+*S_-|CI> &= n_\alpha + \delta_{ik}\delta_{jl}Gamma_{i\alpha k\beta ,j\beta l\alpha } \\
<CI|S_-*S_+|CI> &= n_\beta + \delta_{ik}\delta_{jl}Gamma_{i\beta k\alpha ,j\alpha l\beta } \\
<CI|S_z*S_z|CI> &= \delta_{ik}\delta_{jl}(Gamma_{i\alpha k\alpha ,j\alpha l\alpha }
- Gamma_{i\alpha k\alpha ,j\beta l\beta }
- Gamma_{i\beta k\beta ,j\alpha l\alpha}
+ Gamma_{i\beta k\beta ,j\beta l\beta})
+ (n_\alpha+n_\beta)/4
Given the overlap betwen non-degenerate alpha and beta orbitals, this
function can compute the expectation value spin square operator for
UHF-FCI wavefunction
'''
if isinstance(mo_coeff, numpy.ndarray) and mo_coeff.ndim == 2:
mo_coeff = (mo_coeff, mo_coeff)
# projected overlap matrix elements for partial trace
if isinstance(ovlp, numpy.ndarray):
ovlpaa = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[0]))
ovlpbb = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[1]))
ovlpab = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[1]))
ovlpba = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[0]))
else:
ovlpaa = numpy.dot(mo_coeff[0].T, mo_coeff[0])
ovlpbb = numpy.dot(mo_coeff[1].T, mo_coeff[1])
ovlpab = numpy.dot(mo_coeff[0].T, mo_coeff[1])
ovlpba = numpy.dot(mo_coeff[1].T, mo_coeff[0])
# if ovlp=1, ssz = (neleca-nelecb)**2 * .25
ssz =(numpy.einsum('ijkl,ij,kl->', dm2aa, ovlpaa, ovlpaa)
- numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)
+ numpy.einsum('ijkl,ij,kl->', dm2bb, ovlpbb, ovlpbb)
- numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)) * .25 \
+(numpy.einsum('ji,ij->', dm1a, ovlpaa)
+ numpy.einsum('ji,ij->', dm1b, ovlpbb)) *.25
dm2abba = -dm2ab.transpose(0,3,2,1) # alpha^+ beta^+ alpha beta
dm2baab = -dm2ab.transpose(2,1,0,3) # beta^+ alpha^+ beta alpha
ssxy =(numpy.einsum('ijkl,ij,kl->', dm2baab, ovlpba, ovlpab)
+ numpy.einsum('ijkl,ij,kl->', dm2abba, ovlpab, ovlpba)
+ numpy.einsum('ji,ij->', dm1a, ovlpaa)
+ numpy.einsum('ji,ij->', dm1b, ovlpbb)) * .5
ss = ssxy + ssz
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def spin_square(fcivec, norb, nelec, mo_coeff=None, ovlp=1):
__doc__ = spin_square_general.__doc__
from pyscf.fci import direct_spin1
if mo_coeff is None:
mo_coeff = (numpy.eye(norb),) * 2
(dm1a, dm1b), (dm2aa, dm2ab, dm2bb) = \
direct_spin1.make_rdm12s(fcivec, norb, nelec)
return spin_square_general(dm1a, dm1b, dm2aa, dm2ab, dm2bb, mo_coeff, ovlp)
def spin_square0(fcivec, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(fcivec, norb, nelec)
ss = numpy.einsum('ij,ij->', fcivec.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def local_spin(fcivec, norb, nelec, mo_coeff=None, ovlp=1, aolst=[]):
r'''Local spin expectation value, which is defined as
<CI|(local S^2)|CI>
The local S^2 operator only couples the orbitals specified in aolst. The
cross term which involves the interaction between the local part (in aolst)
and non-local part (not in aolst) is not included. As a result, the value
of local_spin is not additive. In other words, if local_spin is computed
twice with the complementary aolst in the two runs, the summation does not
equal to the S^2 of the entire system.
For a complete list of AOs, the value of local_spin is equivalent to <CI|S^2|CI>
'''
if isinstance(ovlp, numpy.ndarray):
nao = ovlp.shape[0]
if len(aolst) == 0:
lstnot = []
else:
lstnot = [i for i in range(nao) if i not in aolst]
s = ovlp.copy()
s[lstnot] = 0
s[:,lstnot] = 0
else:
if len(aolst) == 0:
aolst = numpy.arange(norb)
s = numpy.zeros((norb,norb))
s[aolst,aolst] = 1
return spin_square(fcivec, norb, nelec, mo_coeff, s)
# for S+*S-
# dm(pq,rs) * [p(beta)^+ q(alpha) r(alpha)^+ s(beta)]
# size of intermediate determinants (norb,neleca+1;norb,nelecb-1)
def make_rdm2_baab(fcivec, norb, nelec):
from pyscf.fci import direct_spin1
dm2aa, dm2ab, dm2bb = direct_spin1.make_rdm12s(fcivec, norb, nelec)[1]
dm2baab = -dm2ab.transpose(2,1,0,3)
return dm2baab
# for S-*S+
# dm(pq,rs) * [q(alpha)^+ p(beta) s(beta)^+ r(alpha)]
# size of intermediate determinants (norb,neleca-1;norb,nelecb+1)
def make_rdm2_abba(fcivec, norb, nelec):
from pyscf.fci import direct_spin1
dm2aa, dm2ab, dm2bb = direct_spin1.make_rdm12s(fcivec, norb, nelec)[1]
dm2abba = -dm2ab.transpose(0,3,2,1)
return dm2abba
def contract_ss(fcivec, norb, nelec):
'''Contract spin square operator with FCI wavefunction :math:`S^2 |CI>`
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
fcivec = fcivec.reshape(na,nb)
def gen_map(fstr_index, nelec, des=True):
a_index = fstr_index(range(norb), nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(cistring.gen_des_str_index, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(cistring.gen_des_str_index, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(cistring.gen_cre_str_index, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(cistring.gen_cre_str_index, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex, nea, neb):
if aindex is None or bindex is None:
return None
t1 = numpy.zeros((cistring.num_strings(norb,nea),
cistring.num_strings(norb,neb)))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(fcivec, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, addra, addrb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, maska, maskb)
ci1 = numpy.zeros((na,nb))
trans(ci1, ades, bcre, neleca-1, nelecb+1) # S+*S-
trans(ci1, acre, bdes, neleca+1, nelecb-1) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*fcivec
return ci1
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 0.,-1. ,-2. )],
['H', ( 1.,-1.5 , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
cis = fci.solver(mol)
cis.verbose = 5
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
e, ci0 = cis.kernel(h1e, eri, norb, nelec)
ss = spin_square(ci0, norb, nelec)
print(ss)
ss = spin_square0(ci0, norb, nelec)
print(ss)
ss = local_spin(ci0, norb, nelec, m.mo_coeff, m.get_ovlp(), range(5))
print('local spin for H1..H5 = 0.999', ss[0])
ci1 = numpy.zeros((4,4))
ci1[0,0] = 1
print(spin_square (ci1, 4, (3,1)))
print(spin_square0(ci1, 4, (3,1)))
print(numpy.einsum('ij,ij->', ci1, contract_ss(ci1, 4, (3,1))),
spin_square(ci1, 4, (3,1))[0])
|
gkc1000/pyscf
|
pyscf/fci/spin_op.py
|
Python
|
apache-2.0
| 11,490
|
[
"PySCF"
] |
a2c5e22a099e5ce19fc4e8e6d4ad243f7ff204ecc8a2e5527dc75ca2b532dda1
|
#!/usr/bin/env python
import vtk
def main():
# Create a graph
graph = vtk.vtkMutableDirectedGraph()
v1 = graph.AddVertex()
v2 = graph.AddVertex()
graph.AddEdge(v1,v2)
# Create an array for the vertex labels
vertexIDs = vtk.vtkIntArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue(0)
vertexIDs.InsertNextValue(1)
# Add the array to the graph
graph.GetVertexData().AddArray(vertexIDs)
graphLayoutView = vtk.vtkGraphLayoutView()
graphLayoutView.AddRepresentationFromInput(graph)
graphLayoutView.SetVertexLabelVisibility(1)
rGraph = vtk.vtkRenderedGraphRepresentation ()
rGraph.SafeDownCast(graphLayoutView.GetRepresentation()).GetVertexLabelTextProperty().SetColor(1,0,0)
graphLayoutView.SetLayoutStrategyToSimple2D()
graphLayoutView.SetVertexLabelArrayName("VertexIDs")
graphLayoutView.ResetCamera()
graphLayoutView.Render()
graphLayoutView.GetInteractor().Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Graphs/ColorVertexLabels.py
|
Python
|
apache-2.0
| 1,100
|
[
"VTK"
] |
96c982eaf5e0d58547545cf57d44ac2a436e08cca125a2479c29cdc0d05acb48
|
"""
Print out a hierarchical list of changes between two folders' CSV files.
Intended to be used for comparisons of release folders.
Only compares files that are found in both folders.
Sample call when comparing same-year releases:
```
RELEASE_DIR=/fs/ncanda-share/releases/internal/followup_5y
python compare_releases.py --cutoff 10 \
${RELEASE_DIR}/NCANDA_SNAPS_5Y_REDCAP_V01/summaries/redcap/ \
${RELEASE_DIR}/NCANDA_SNAPS_5Y_REDCAP_V02/summaries/redcap/
```
Sample call when comparing between-year releases:
```
RELEASE_DIR=/fs/ncanda-share/releases/internal
python compare_releases.py --cutoff 10 --exclude-visit followup_5y \
${RELEASE_DIR}/followup_4y/NCANDA_SNAPS_4Y_REDCAP_V02/summaries/redcap/ \
${RELEASE_DIR}/followup_5y/NCANDA_SNAPS_5Y_REDCAP_V02/summaries/redcap
```
(Note the `--exclude-visit followup_5y`, which makes the script omit any
differences on the provided year.)
"""
import argparse
import pandas as pd
import numpy as np
from pathlib import Path
from typing import Dict, List, Tuple, Union
def _parse_args(input_args: List = None):
"""
Handle CLI arguments
"""
parser = argparse.ArgumentParser(
description="Compare CSV contents of NCANDA releases")
parser.add_argument('first_folder', type=Path,
help="Older folder or file to compare")
parser.add_argument('second_folder', type=Path,
help="Newer folder or file to compare")
parser.add_argument('--index-keys', default=['subject', 'arm', 'visit'],
help="Foreign keys to compare the files on")
parser.add_argument('--exclude-visit', '-V',
help="Visits to exclude from the listing of results")
parser.add_argument('--exclude-files', '-F',
nargs='*', default=["locked_forms.csv"],
help="Any files in the folders to avoid processing")
parser.add_argument('--enumeration-cutoff', '--cutoff', '-c',
dest='cutoff', type=int,
help="Truncate results after this many values")
return parser.parse_args(input_args)
def collect_file_pairs(folder1, folder2, exclusions: List = []) -> List[Tuple]:
"""
Normalize path inputs into a list of path tuples
"""
if folder1.is_file():
assert folder2.is_file()
assert folder1.name == folder2.name
return [(folder1, folder2)]
else:
assert folder1.is_dir() and folder2.is_dir()
pairs: List[Tuple[str]] = []
for file in folder1.glob('*.csv'):
if file.name in exclusions or not file.is_file():
continue
file2 = folder2 / file.name
if file2.exists():
pairs.append((file, file2))
return sorted(pairs)
def prepare_index(df: pd.DataFrame, index_keys: List) -> pd.DataFrame:
"""
Ensure that df has a (Multi)Index consisting of index_keys
"""
if df.index.names == index_keys:
return df
elif len(df.index.names) == 1 and df.index.name is None:
return df.set_index(index_keys)
else:
return df.reset_index().set_index(index_keys)
def get_dataframe_differences(df1: pd.DataFrame, df2: pd.DataFrame,
index_keys: List) -> pd.DataFrame:
"""
Create a list of all variable differences between two dataframes.
Returns a DataFrame with a MultiIndex consisting of index_keys and
'colname', and two columns with changed values from each DataFrame.
"""
df1 = prepare_index(df1, index_keys)
df2 = prepare_index(df2, index_keys)
diff_columns = compare_columns(df1.columns, df2.columns)
shared_columns = df1.columns[~df1.columns.isin(diff_columns['removed'])]
df1_long = df1[shared_columns].stack().to_frame('df1')
df2_long = df2[shared_columns].stack().to_frame('df2')
comparison = df1_long.join(df2_long, how="outer")
comparison.index.rename('colname', level=-1, inplace=True)
differences = comparison.loc[comparison['df1'] != comparison['df2']]
return differences
def summarize_differences(difference_df: pd.DataFrame,
index_keys: List) -> Dict:
counts_by_index = difference_df.reset_index().groupby(index_keys).count()
summarized_changes = counts_by_index.apply(
lambda x: F"{x['df1']} changed, {x['df2']} new values", axis=1)
return _reshape_differences(summarized_changes)
def _reshape_differences(difference_series: pd.Series) -> Dict:
output_dict: Dict = {}
difference_dict = difference_series.to_dict()
for (subject, arm, visit), statement in difference_dict.items():
if visit not in output_dict:
output_dict[visit] = []
output_dict[visit].append(F"{subject}: {statement}")
return output_dict
def dict_to_string(d: Dict[str, Union[Dict, str]], prefix: str = '',
enumeration_cutoff: int = None) -> str:
"""
Intended sample output:
Visit 1:
Subject (X added, Y removed)
Visit 2:
300 subjects with changes or additions
"""
output: str = ''
for key in sorted(d.keys()):
value = d[key]
output += f"\n{prefix}{key}: "
if type(value) == dict:
output += dict_to_string(value, prefix=prefix + '\t')
elif type(value) == list:
sub_prefix = F"\n{prefix}\t"
if enumeration_cutoff and len(value) > enumeration_cutoff:
output += sub_prefix + F"{len(value)} subjects with changes or additions"
else:
output += sub_prefix + sub_prefix.join(value)
else:
output += value
return output
def compare_columns(columns1: pd.Index, columns2: pd.Index) -> Dict[str, List]:
added = columns2.difference(columns1).tolist()
removed = columns1.difference(columns2).tolist()
return {'removed': removed, 'added': added}
def compared_columns_to_str(comparison_dict: Dict[str, List],
enumeration_cutoff: int = None) -> str:
outputs: List[str] = []
for action, variables in comparison_dict.items():
count = len(variables)
if enumeration_cutoff and count > enumeration_cutoff:
outputs.append(f"{count} {action}")
elif count > 0:
outputs.append(f"{action}: " + ", ".join(variables))
if len(outputs) > 0:
return " (Variable changes - {})".format("; ".join(outputs))
else:
return ""
def test_compare_columns():
df1 = pd.DataFrame({'a': [1, 2], 'b': [np.nan, 3]})
df2 = pd.DataFrame({'a': [np.nan, 3], 'c': ['x', 'y']})
assert compare_columns(df1.columns, df2.columns) == {'removed': ['b'],
'added': ['c']}
if __name__ == '__main__':
args = _parse_args()
release1 = args.first_folder
release2 = args.second_folder
enumeration_cutoff = args.cutoff
pairs = collect_file_pairs(release1, release2)
for pair in pairs:
df1 = pd.read_csv(pair[0], dtype=str)
df2 = pd.read_csv(pair[1], dtype=str)
if df1.empty or df2.empty:
continue
differences = get_dataframe_differences(df1, df2, args.index_keys)
if differences.empty:
continue
col_diffs = compare_columns(df1.columns, df2.columns)
col_diffs_str = compared_columns_to_str(col_diffs, enumeration_cutoff)
print(F"{pair[0].name}{col_diffs_str}:", end='')
try:
summary = summarize_differences(differences, args.index_keys)
if args.exclude_visit and args.exclude_visit in summary:
del summary[args.exclude_visit]
print(dict_to_string(summary, "\t", enumeration_cutoff))
except Exception: # we don't care what failed
print()
continue
|
sibis-platform/ncanda-data-integration
|
scripts/reporting/compare_releases.py
|
Python
|
bsd-3-clause
| 7,859
|
[
"VisIt"
] |
5a2836a97947ef7582d0dc2f840556821a3b2e71bd71cc1035a1b833514cb70d
|
"""
Contains class for gaussian process hyperparameter optimizations.
"""
import os
import logging
import tempfile
from typing import Dict, List, Optional, Tuple, Union
from deepchem.data import Dataset
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
PARAM_DICT = Dict[str, Union[int, float]]
def compute_parameter_range(params_dict: PARAM_DICT,
search_range: Union[int, float, PARAM_DICT]
) -> Dict[str, Tuple[str, List[float]]]:
"""Convenience Function to compute parameter search space.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to Ints/Floats. An explicit list of
parameters is computed with `search_range`. The optimization range
computed is specified in the documentation for `search_range`
below.
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
Returns
-------
param_range: Dict
Dictionary mapping hyperparameter names to tuples. Each tuple is
of form `(value_type, value_range)` where `value_type` is a string
that is either "int" or "cont" and `value_range` is a list of two
elements of the form `[low, hi]`. This format is expected by
pyGPGO which `GaussianProcessHyperparamOpt` uses to perform
optimization.
"""
# Range of optimization
param_range = {}
if isinstance(search_range, dict):
if sorted(params_dict.keys()) != sorted(search_range.keys()):
raise ValueError(
"If search_range is provided as a dictionary, it must have the same keys as params_dict."
)
elif (not isinstance(search_range, int)) and (not isinstance(
search_range, float)):
raise ValueError("search_range must be a dict or int or float.")
for hp, value in params_dict.items():
if isinstance(search_range, dict):
hp_search_range = search_range[hp]
else:
# We know from guard above that this is an int/float
hp_search_range = search_range
if isinstance(value, int):
value_range = [value // hp_search_range, value * hp_search_range]
param_range[hp] = ("int", value_range)
elif isinstance(value, float):
value_range = [value / hp_search_range, value * hp_search_range]
param_range[hp] = ("cont", value_range)
return param_range
class GaussianProcessHyperparamOpt(HyperparamOpt):
"""
Gaussian Process Global Optimization(GPGO)
This class uses Gaussian Process optimization to select
hyperparameters. Underneath the hood it uses pyGPGO to optimize
models. If you don't have pyGPGO installed, you won't be able to use
this class.
Note that `params_dict` has a different semantics than for
`GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is
used as the center of a search range.
Example
-------
This example shows the type of constructor function expected.
>>> import sklearn
>>> import deepchem as dc
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(n_tasks=1, **p))
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments
(in this case, `n_tasks` and `n_features` which are properties of a
dataset and not hyperparameters to search over.)
>>> def model_builder(**model_params):
... n_layers = model_params['layers']
... layer_width = model_params['width']
... dropout = model_params['dropout']
... return dc.models.MultitaskClassifier(
... n_tasks=5,
... n_features=100,
... layer_sizes=[layer_width]*n_layers,
... dropouts=dropout
... )
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder)
Note
----
This class requires pyGPGO to be installed.
"""
# NOTE: mypy prohibits changing the number of arguments
# FIXME: Signature of "hyperparam_search" incompatible with supertype "HyperparamOpt"
def hyperparam_search( # type: ignore[override]
self,
params_dict: PARAM_DICT,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
use_max: bool = True,
logdir: Optional[str] = None,
max_iter: int = 20,
search_range: Union[int, float, PARAM_DICT] = 4,
logfile: Optional[str] = None):
"""Perform hyperparameter search using a gaussian process.
Parameters
----------
params_dict: Dict
Maps hyperparameter names (strings) to possible parameter
values. The semantics of this list are different than for
`GridHyperparamOpt`. `params_dict[hp]` must map to an int/float,
which is used as the center of a search with radius
`search_range` since pyGPGO can only optimize numerical
hyperparameters.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
use_max: bool, (default True)
Specifies whether to maximize or minimize `metric`.
maximization(True) or minimization(False)
logdir: str, optional, (default None)
The directory in which to store created models. If not set, will
use a temporary directory.
max_iter: int, (default 20)
number of optimization trials
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
logfile: str, optional (default None)
Name of logfile to write results to. If specified, this is must
be a valid file. If not specified, results of hyperparameter
search will be written to `logdir/.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
try:
from pyGPGO.covfunc import matern32
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
except ModuleNotFoundError:
raise ValueError("This class requires pyGPGO to be installed.")
# Specify logfile
log_file = None
if logfile:
log_file = logfile
elif logdir is not None:
# Make logdir if it doesn't exist.
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, "results.txt")
# setup range
param_range = compute_parameter_range(params_dict, search_range)
param_keys = list(param_range.keys())
# Stores all results
all_results = {}
# Store all model references so we don't have to reload
all_models = {}
# Stores all model locations
model_locations = {}
# Demarcating internal function for readability
def optimizing_function(**placeholders):
"""Private Optimizing function
Take in hyper parameter values and return valid set performances
Parameters
----------
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
# param values are always float in BO, so this line converts float to int
# see : https://github.com/josejimenezluna/pyGPGO/issues/10
hyper_parameters[hp] = int(placeholders[hp])
else:
hyper_parameters[hp] = float(placeholders[hp])
logger.info("Running hyperparameter set: %s" % str(hyper_parameters))
if log_file:
with open(log_file, 'w+') as f:
# Record hyperparameters
f.write("Parameters: %s" % str(hyper_parameters))
f.write('\n')
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
if logdir is not None:
filename = "model%s" % hp_str
model_dir = os.path.join(logdir, filename)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info("Error creating model_dir, using tempfile directory")
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
# Add it on to the information needed for the constructor
hyper_parameters["model_dir"] = model_dir
model = self.model_builder(**hyper_parameters)
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric])
score = multitask_scores[metric.name]
if log_file:
with open(log_file, 'a') as f:
# Record performances
f.write("Score: %s" % str(score))
f.write('\n')
# Store all results
all_results[hp_str] = score
# Store reference to model
all_models[hp_str] = model
model_locations[hp_str] = model_dir
# GPGO maximize performance by default
# set performance to its negative value for minimization
if use_max:
return score
else:
return -score
# execute GPGO
cov = matern32()
gp = GaussianProcess(cov)
acq = Acquisition(mode='ExpectedImprovement')
gpgo = GPGO(gp, acq, optimizing_function, param_range)
logger.info("Max number of iteration: %i" % max_iter)
gpgo.run(max_iter=max_iter)
hp_opt, valid_performance_opt = gpgo.getResult()
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
hyper_parameters[hp] = int(hp_opt[hp])
else:
# FIXME: Incompatible types in assignment
hyper_parameters[hp] = float(hp_opt[hp]) # type: ignore
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
# Let's fetch the model with the best parameters
best_model = all_models[hp_str]
# Compare best model to default hyperparameters
if log_file:
with open(log_file, 'a') as f:
# Record hyperparameters
f.write("params_dict:")
f.write(str(params_dict))
f.write('\n')
# Return default hyperparameters
return best_model, hyper_parameters, all_results
|
miaecle/deepchem
|
deepchem/hyper/gaussian_process.py
|
Python
|
mit
| 12,659
|
[
"Gaussian"
] |
0ab7ec2aac5b9a6176462a4e154aeeaabbbd98b955d34b3b0dfde42d9d5a05ce
|
from collections import defaultdict
from typing import Dict, List
from libcst import CSTVisitor, MetadataWrapper, Name, parse_module
from libcst.metadata import (
CodeRange,
PositionProvider,
QualifiedNameProvider,
ScopeProvider,
)
from pytest import mark
from breakfast.source import Source
from tests import make_source
class NameCollector(CSTVisitor): # type: ignore
METADATA_DEPENDENCIES = (PositionProvider, QualifiedNameProvider, ScopeProvider)
def __init__(self, line: int, column: int):
self.names: Dict[str, List[CodeRange]] = defaultdict(list)
self.found: List[CodeRange] = []
self.looking_for = (line, column)
def visit_Name(self, node: Name): # pylint: disable=invalid-name
metadata = self.get_metadata(QualifiedNameProvider, node)
code_range = self.get_metadata(PositionProvider, node)
for datum in metadata:
self.names[datum.name].append(code_range)
if self.looking_for == (code_range.start.line - 1, code_range.start.column):
self.found = self.names[datum.name]
return True
def rename(source: Source, *, row: int, column: int, new_name: str) -> str:
position = source.position(row=row, column=column)
parsed = parse_module("\n".join(source.lines))
wrapper = MetadataWrapper(parsed)
collector = NameCollector(position.row, position.column)
wrapper.visit(collector)
for code_range in collector.found:
start = source.position(code_range.start.line - 1, code_range.start.column)
end = source.position(code_range.end.line - 1, code_range.end.column)
source.modify_line(start, end, new_name)
return source.render()
def assert_renames(
old_source: str, new_source: str, *, row: int, column: int, new_name: str = "new"
) -> None:
source = make_source(old_source)
assert rename(source, row=row, column=column, new_name=new_name) == "\n".join(
make_source(new_source).lines
)
def test_distinguishes_local_variables_from_global():
assert_renames(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
""",
"""
def fun():
new = 12
old2 = 13
result = new + old2
del new
return result
old = 20
""",
row=2,
column=4,
)
def test_finds_non_local_variable():
assert_renames(
"""
old = 12
def fun():
result = old + 1
return result
old = 20
""",
"""
new = 12
def fun():
result = new + 1
return result
new = 20
""",
row=1,
column=0,
)
def test_does_not_rename_random_attributes():
assert_renames(
"""
import os
path = os.path.dirname(__file__)
""",
"""
import os
new = os.path.dirname(__file__)
""",
row=3,
column=0,
)
@mark.xfail()
def test_finds_parameter():
assert_renames(
"""
def fun(old=1):
print(old)
old = 8
fun(old=old)
""",
"""
def fun(new=1):
print(new)
old = 8
fun(new=old)
""",
row=1,
column=8,
)
|
thisfred/breakfast
|
tests/test_libcst_attempt.py
|
Python
|
bsd-2-clause
| 3,443
|
[
"VisIt"
] |
2155b0fe05e3d211d7e9868f31c6235a58b4f070fee32cd0d038ea81b0e4bc14
|
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import struct
import math
import sys
import string
import pprint
PY2 = sys.version_info[0] == 2
text_type = None
if PY2:
string_types = basestring
text_type = unicode
else:
string_types = text_type = str
class impulse(object):
def __nonzero__(self):
return True
def __str__(self):
return "Impulse"
def __repr__(self):
return "Impulse"
class null(object):
def __nonzero__(self):
return False
def __str__(self):
return "NULL"
def __repr__(self):
return "NULL"
def hexDump(data):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(data)):
sys.stdout.write("%2x " % (ord(data[i])))
if (i+1) % 8 == 0:
print(repr(data[i-7:i+1]))
if(len(data) % 8 != 0):
print(str.rjust("", 11), repr(data[i-len(data) % 8:i + 1]))
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.clearData()
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = b","
self.message = bytes()
def append(self, argument, typehint=None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + data
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
def readTrue(data):
return (True, data)
def readFalse(data):
return (False, data)
def readImpulse(data):
return (impulse(), data)
def readNull(data):
return (null(), data)
def readString(data):
if isinstance(data, str):
length = string.find(data, '\0')
else:
length = data.find(bytes("\0", 'ascii'))
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def readBlob(data):
try:
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
rest = data[nextData:]
blob = data[4:length+4]
return (blob, rest)
except struct.error:
print("Error: too few bytes for blob", data, len(data))
return ("", data)
def readInt(data):
try:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
except struct.error:
print("Error: too few bytes for int", data, len(data))
return (0, data)
def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
try:
big = struct.unpack(">q", data[0:8])[0]
rest = data[8:]
return (big, rest)
except struct.error:
print("Error: too few bytes for long", data, len(data))
return (0, data)
def readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit double float."""
try:
number = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (number, rest)
except struct.error:
print("Error: too few bytes for double", data, len(data))
return (0, data)
def readFloat(data):
try:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
except struct.error:
print("Error: too few bytes for float", data, len(data))
return (0, data)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(b""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = b'b'
else:
tag = b''
binary = b''
return (tag, binary)
def OSCArgument(data):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if isinstance(data, bytearray):
length = len(data)
padded = math.ceil((len(data)) / 4.0) * 4
binary = struct.pack(b">i%ds" % (padded), length, str(data))
tag = b'b'
elif isinstance(data, string_types) or isinstance(data, bytes):
OSCstringLength = math.ceil((len(data)+1) / 4.0) * 4
binary = struct.pack(b">%ds" % (OSCstringLength), data)
tag = b"s"
elif isinstance(data, bool):
binary = b""
if data:
tag = b"T"
else:
tag = b"F"
elif isinstance(data, float):
binary = struct.pack(b">f", data)
tag = b"f"
elif isinstance(data, int):
binary = struct.pack(b">i", data)
tag = b"i"
elif isinstance(data, impulse):
binary = b""
tag = b"I"
elif isinstance(data, null):
binary = b""
tag = b"N"
else:
binary = b""
tag = b""
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print(arg)
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
pass
parsed.append(interpretation)
return parsed
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
try:
table = { ord(b"i") : readInt,
ord(b"f") : readFloat,
ord(b"s") : readString,
ord(b"b") : readBlob,
ord(b"d") : readDouble,
ord(b"t") : readLong,
ord(b"T") : readTrue,
ord(b"F") : readFalse,
ord(b"I") : readImpulse,
ord(b"N") : readNull
}
decoded = []
address, rest = readString(data)
typetags = b""
if address == "#bundle":
time, rest = readLong(rest)
#decoded.append(address)
#decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ord(b","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print("Oops, typetag lacks the magic ,")
except Exception as e:
print("exception: %s" % (e))
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add(self.unbundler, "#bundle")
def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
if not message or len(message) == 0: # ignore empty messages
return
if type(message[0]) == list :
# smells like nested messages
for msg in message :
self.dispatch(msg, source)
elif type(message[0]) == str or type(message[0]) == bytes:
# got a single message
try:
address = message[0]
if address in self.callbacks:
callbackfunction = self.callbacks[address]
elif self.callbacks.has_key('default'):
callbackfunction = self.callbacks['default']
else:
print('address %s not found ' % address)
return
callbackfunction(message, source)
return
except IndexError as e:
import traceback
print('OSC callback %s caused an error: %s' % (address, e))
traceback.print_exc()
print('---------------------')
raise
else:
raise ValueError("OSC message not recognized", message)
def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback
def unbundler(self, messages):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
print()
message = OSCMessage()
message.setAddress("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print("Making and unmaking a message..")
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print("Retrieving arguments...")
data = raw
for i in range(6):
text, data = readString(data)
print(text)
number, data = readFloat(data)
print(number)
number, data = readFloat(data)
print(number)
number, data = readInt(data)
print(number)
hexDump(raw)
print(decodeOSC(raw))
print(decodeOSC(message.getBinary()))
print("Testing Blob types.")
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print(decodeOSC(blob.getBinary()))
def printingCallback(*stuff):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print("Testing the callback manager.")
c = CallbackManager()
c.add(printingCallback, "/print")
c.handle(message.getBinary())
message.setAddress("/print")
c.handle(message.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.".encode('utf-8'))
print1.append(42)
print1.append(3.1415926)
c.handle(print1.getBinary())
bundle = OSCMessage()
bundle.setAddress("")
bundle.append("#bundle".encode('utf-8'))
bundle.append(0)
bundle.append(0)
bundle.append(print1.getBinary(), 'b')
bundle.append(print1.getBinary(), 'b')
bundlebinary = bundle.message
print("sending a bundle to the callback manager")
c.handle(bundlebinary)
|
inclement/Pyonic-interpreter
|
pyonic/osc/OSC.py
|
Python
|
gpl-3.0
| 13,428
|
[
"VisIt"
] |
9c2f15d363e33b0743128fc2884eb6a5f29b80a06a1d14938131f0b9135bcfc0
|
"""
Parse wheel filenames
``wheel-filename`` lets you verify `wheel
<https://www.python.org/dev/peps/pep-0427/>`_ filenames and parse them into
their component fields.
This package adheres strictly to the relevant PEPs, with the following
exceptions:
- Unlike other filename components, version components may contain the
characters ``!`` and ``+`` for full PEP 440 support.
- Version components may be any sequence of the relevant set of characters;
they are not verified for PEP 440 compliance.
- The ``.whl`` file extension is matched case-insensitively.
Visit <https://github.com/jwodder/wheel-filename> for more information.
"""
__version__ = "1.3.0"
__author__ = "John Thorvald Wodder II"
__author_email__ = "wheel-filename@varonathe.org"
__license__ = "MIT"
__url__ = "https://github.com/jwodder/wheel-filename"
__all__ = [
"InvalidFilenameError",
"ParsedWheelFilename",
"parse_wheel_filename",
]
import os
import os.path
import re
from typing import Iterable, List, NamedTuple, Optional, Union
# These patterns are interpreted with re.UNICODE in effect, so there's probably
# some character that matches \d but not \w that needs to be included
PYTHON_TAG_RGX = r"[\w\d]+"
ABI_TAG_RGX = r"[\w\d]+"
PLATFORM_TAG_RGX = r"[\w\d]+"
WHEEL_FILENAME_CRGX = re.compile(
r"(?P<project>[A-Za-z0-9](?:[A-Za-z0-9._]*[A-Za-z0-9])?)"
r"-(?P<version>[A-Za-z0-9_.!+]+)"
r"(?:-(?P<build>[0-9][\w\d.]*))?"
r"-(?P<python_tags>{0}(?:\.{0})*)"
r"-(?P<abi_tags>{1}(?:\.{1})*)"
r"-(?P<platform_tags>{2}(?:\.{2})*)"
r"\.[Ww][Hh][Ll]".format(PYTHON_TAG_RGX, ABI_TAG_RGX, PLATFORM_TAG_RGX)
)
class ParsedWheelFilename(NamedTuple):
project: str
version: str
build: Optional[str]
python_tags: List[str]
abi_tags: List[str]
platform_tags: List[str]
def __str__(self) -> str:
if self.build:
fmt = "{0.project}-{0.version}-{0.build}-{1}-{2}-{3}.whl"
else:
fmt = "{0.project}-{0.version}-{1}-{2}-{3}.whl"
return fmt.format(
self,
".".join(self.python_tags),
".".join(self.abi_tags),
".".join(self.platform_tags),
)
def tag_triples(self) -> Iterable[str]:
"""
Returns a generator of all simple tag triples formed from the tags in
the filename
"""
for py in self.python_tags:
for abi in self.abi_tags:
for plat in self.platform_tags:
yield "-".join([py, abi, plat])
def parse_wheel_filename(
filename: Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"]
) -> ParsedWheelFilename:
"""
Parse a wheel filename into its components
:param path filename: a wheel path or filename
:rtype: ParsedWheelFilename
:raises InvalidFilenameError: if the filename is invalid
"""
basename = os.path.basename(os.fsdecode(filename))
m = WHEEL_FILENAME_CRGX.fullmatch(basename)
if not m:
raise InvalidFilenameError(basename)
return ParsedWheelFilename(
project=m.group("project"),
version=m.group("version"),
build=m.group("build"),
python_tags=m.group("python_tags").split("."),
abi_tags=m.group("abi_tags").split("."),
platform_tags=m.group("platform_tags").split("."),
)
class InvalidFilenameError(ValueError):
"""Raised when an invalid wheel filename is encountered"""
filename: str
def __init__(self, filename: str) -> None:
#: The invalid filename
self.filename = filename
def __str__(self) -> str:
return "Invalid wheel filename: " + repr(self.filename)
|
Dallinger/Dallinger
|
dallinger/docker/wheel_filename.py
|
Python
|
mit
| 3,657
|
[
"VisIt"
] |
3260298762839c4695a58646ff5ebef1c532a7ccb7a5cbfd43a3b7df7dc51d8e
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.chronos.forecaster.base_forecaster import BasePytorchForecaster
from zoo.chronos.forecaster.utils import set_pytorch_seed
from zoo.chronos.model.VanillaLSTM_pytorch import VanillaLSTMPytorch
from zoo.chronos.model.VanillaLSTM_pytorch import model_creator, optimizer_creator, loss_creator
class LSTMForecaster(BasePytorchForecaster):
"""
Example:
>>> #The dataset is split into x_train, x_val, x_test, y_train, y_val, y_test
>>> forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
...)
>>> forecaster.fit((x_train, y_train))
>>> forecaster.to_local() # if you set distributed=True
>>> test_pred = forecaster.predict(x_test)
>>> test_eval = forecaster.evaluate((x_test, y_test))
>>> forecaster.save({ckpt_name})
>>> forecaster.restore({ckpt_name})
"""
def __init__(self,
past_seq_len,
input_feature_num,
output_feature_num,
hidden_dim=32,
layer_num=1,
dropout=0.1,
optimizer="Adam",
loss="mse",
lr=0.001,
metrics=["mse"],
seed=None,
distributed=False,
workers_per_node=1,
distributed_backend="torch_distributed"):
"""
Build a LSTM Forecast Model.
:param past_seq_len: Specify the history time steps (i.e. lookback).
:param input_feature_num: Specify the feature dimension.
:param output_feature_num: Specify the output dimension.
:param hidden_dim: int or list, Specify the hidden dim of each lstm layer.
The value defaults to 32.
:param layer_num: Specify the number of lstm layer to be used. The value
defaults to 1.
:param dropout: int or list, Specify the dropout close possibility
(i.e. the close possibility to a neuron). This value defaults to 0.1.
:param optimizer: Specify the optimizer used for training. This value
defaults to "Adam".
:param loss: Specify the loss function used for training. This value
defaults to "mse". You can choose from "mse", "mae" and
"huber_loss".
:param lr: Specify the learning rate. This value defaults to 0.001.
:param metrics: A list contains metrics for evaluating the quality of
forecasting. You may only choose from "mse" and "mae" for a
distributed forecaster. You may choose from "mse", "me", "mae",
"mse","rmse","msle","r2", "mpe", "mape", "mspe", "smape", "mdape"
and "smdape" for a non-distributed forecaster.
:param seed: int, random seed for training. This value defaults to None.
:param distributed: bool, if init the forecaster in a distributed
fashion. If True, the internal model will use an Orca Estimator.
If False, the internal model will use a pytorch model. The value
defaults to False.
:param workers_per_node: int, the number of worker you want to use.
The value defaults to 1. The param is only effective when
distributed is set to True.
:param distributed_backend: str, select from "torch_distributed" or
"horovod". The value defaults to "torch_distributed".
"""
# config setting
self.data_config = {
"past_seq_len": past_seq_len,
"future_seq_len": 1, # lstm model only supports 1 step prediction
"input_feature_num": input_feature_num,
"output_feature_num": output_feature_num
}
self.config = {
"lr": lr,
"loss": loss,
"hidden_dim": hidden_dim,
"layer_num": layer_num,
"optim": optimizer,
"dropout": dropout
}
# model creator settings
self.local_model = VanillaLSTMPytorch
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
# distributed settings
self.distributed = distributed
self.distributed_backend = distributed_backend
self.workers_per_node = workers_per_node
# other settings
self.lr = lr
self.metrics = metrics
self.seed = seed
super().__init__()
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/chronos/forecaster/lstm_forecaster.py
|
Python
|
apache-2.0
| 5,235
|
[
"NEURON",
"ORCA"
] |
457648b0079f837e0b2bfc0ac0df7fc20d308e9a014b7b2d6637886a603a51be
|
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""Common docstring snippets for signal.
"""
ONE_AXIS_PARAMETER = \
""": :py:class:`int`, :py:class:`str`, or :py:class:`~hyperspy.axes.DataAxis`
The axis can be passed directly, or specified using the index of
the axis in the Signal's `axes_manager` or the axis name."""
MANY_AXIS_PARAMETER = \
""": :py:class:`int`, :py:class:`str`, :py:class:`~hyperspy.axes.DataAxis`, tuple (of DataAxis) or :py:data:`None`
Either one on its own, or many axes in a tuple can be passed. In
both cases the axes can be passed directly, or specified using the
index in `axes_manager` or the name of the axis. Any duplicates are
removed. If ``None``, the operation is performed over all navigation
axes (default)."""
OUT_ARG = \
"""out : :py:class:`~hyperspy.signal.BaseSignal` (or subclasses) or :py:data:`None`
If ``None``, a new Signal is created with the result of the
operation and returned (default). If a Signal is passed,
it is used to receive the output of the operation, and nothing is
returned."""
NAN_FUNC = \
"""Identical to :py:meth:`~hyperspy.signal.BaseSignal.{0}`, except ignores
missing (NaN) values. See that method's documentation for details.
"""
OPTIMIZE_ARG = \
"""optimize : bool
If ``True``, the location of the data in memory is optimised for the
fastest iteration over the navigation axes. This operation can
cause a peak of memory usage and requires considerable processing
times for large datasets and/or low specification hardware.
See the :ref:`signal.transpose` section of the HyperSpy user guide
for more information. When operating on lazy signals, if ``True``,
the chunks are optimised for the new axes configuration."""
RECHUNK_ARG = \
"""rechunk: bool
Only has effect when operating on lazy signal. If ``True`` (default),
the data may be automatically rechunked before performing this
operation."""
SHOW_PROGRESSBAR_ARG = \
"""show_progressbar : None or bool
If ``True``, display a progress bar. If ``None``, the default from
the preferences settings is used."""
PARALLEL_ARG = \
"""parallel : None or bool
If ``True``, perform computation in parallel using multithreading. If
``None``, the default from the preferences settings is used. The number
of threads is controlled by the ``max_workers`` argument."""
LAZY_OUTPUT_ARG = \
"""lazy_output : None or bool
If ``True``, the output will be returned as a lazy signal. This means
the calculation itself will be delayed until either compute() is used,
or the signal is stored as a file.
If ``False``, the output will be returned as a non-lazy signal, this
means the outputs will be calculated directly, and loaded into memory.
If ``None`` the output will be lazy if the input signal is lazy, and
non-lazy if the input signal is non-lazy."""
MAX_WORKERS_ARG = \
"""max_workers : None or int
Maximum number of threads used when ``parallel=True``. If None, defaults
to ``min(32, os.cpu_count())``."""
CLUSTER_SIGNALS_ARG = \
"""signal : {"mean", "sum", "centroid"}, optional
If "mean" or "sum" return the mean signal or sum respectively
over each cluster. If "centroid", returns the signals closest
to the centroid."""
HISTOGRAM_BIN_ARGS = \
"""bins : int or sequence of scalars or str, default "fd"
If `bins` is an int, it defines the number of equal-width
bins in the given range. If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
'knuth'
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
'blocks'
Determination of optimal adaptive-width histogram bins using
the Bayesian Blocks algorithm.
"""
HISTOGRAM_MAX_BIN_ARGS = \
"""max_num_bins : int, default 250
When estimating the bins using one of the str methods, the
number of bins is capped by this number to avoid a MemoryError
being raised by :py:func:`numpy.histogram`."""
SIGNAL_MASK_ARG = \
"""signal_mask: bool array
Restricts the operation to the signal locations not marked
as True (masked)."""
NAVIGATION_MASK_ARG = \
"""navigation_mask: bool array
Restricts the operation to the navigation locations not marked
as True (masked)."""
|
jat255/hyperspy
|
hyperspy/docstrings/signal.py
|
Python
|
gpl-3.0
| 7,478
|
[
"Gaussian"
] |
f3d88b59a840fce8c62ee9f0a3f7b5b2df808df56c14bdccbde6ab517be6aa63
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import time
from analysis.cpa import base
from analysis.storegraph import storegraph
import language.python.ast as ast
import language.python.annotations as annotations
from PADS.StrongConnectivity import StronglyConnectedComponents
from . database import structure
from . database import tupleset
from . database import mapping
from . database import lattice
from analysis.astcollector import getOps
contextSchema = structure.WildcardSchema()
operationSchema = structure.TypeSchema((ast.Expression, ast.Statement))
codeSchema = structure.CallbackSchema(lambda code: code.isCode())
def wrapOpContext(schema):
schema = mapping.MappingSchema(contextSchema, schema)
schema = mapping.MappingSchema(operationSchema, schema)
schema = mapping.MappingSchema(codeSchema, schema)
return schema
def wrapCodeContext(schema):
schema = mapping.MappingSchema(contextSchema, schema)
schema = mapping.MappingSchema(codeSchema, schema)
return schema
opDataflowSchema = wrapOpContext(lattice.setUnionSchema)
invokesStruct = structure.StructureSchema(
('code', codeSchema),
('context', contextSchema)
)
invokesSchema = wrapOpContext(tupleset.TupleSetSchema(invokesStruct))
invokeSourcesStruct = structure.StructureSchema(
('code', codeSchema),
('operation', operationSchema),
('context', contextSchema)
)
invokeSourcesSchema = wrapCodeContext(tupleset.TupleSetSchema(invokeSourcesStruct))
def invertInvokes(invokes):
invokeSources = invokeSourcesSchema.instance()
for code, ops in invokes:
assert code.isCode(), type(code)
for op, contexts in ops:
for context, invs in contexts:
for dstCode, dstContext in invs:
invokeSources[dstCode][dstContext].add(code, op, context)
return invokeSources
def filteredSCC(G):
o = []
for g in StronglyConnectedComponents(G):
if len(g) > 1:
o.append(g)
return o
class ObjectInfo(object):
def __init__(self, obj):
self.obj = obj
self.refersTo = set()
self.referedFrom = set()
self.localReference = set()
self.heldByClosure = set()
# Reasonable defaults
self.globallyVisible = obj.xtype.isExisting()
self.externallyVisible = obj.xtype.isExternal()
def isReachableFrom(self, refs):
return bool(self.heldByClosure.intersection(refs))
def leaks(self):
return self.globallyVisible or self.externallyVisible
def updateHeldBy(self, newHeld):
assert not self.leaks(), self.obj
diff = newHeld-self.heldByClosure
if diff:
self.heldByClosure.update(diff)
return True
else:
return False
class ReadModifyAnalysis(object):
def __init__(self, liveCode, invokeSources):
self.invokeSources = invokeSources
self.contextReads = collections.defaultdict(set)
self.contextModifies = collections.defaultdict(set)
self.collectDB(liveCode)
def handleModifies(self, code, op, modifies):
if modifies[0]:
for cindex, context in enumerate(code.annotation.contexts):
slots = modifies[1][cindex]
if op is not None: self.opModifyDB[code][op].merge(context, slots)
self.contextModifies[(code, context)].update(slots)
self.allModifies.update(slots)
def handleReads(self, code, op, reads):
if reads[0]:
for cindex, context in enumerate(code.annotation.contexts):
slots = reads[1][cindex]
filtered = set([slot for slot in slots if slot in self.allModifies])
if op is not None: self.opReadDB[code][op].merge(context, filtered)
self.contextReads[(code, context)].update(filtered)
self.allReads.update(slots)
def handleAllocates(self, code, op, allocates):
if allocates[0]:
for cindex, context in enumerate(code.annotation.contexts):
self.allocations[(code, context)].update(allocates[1][cindex])
def collectDB(self, liveCode):
self.allReads = set()
self.allModifies = set()
self.opReadDB = opDataflowSchema.instance()
self.opModifyDB = opDataflowSchema.instance()
self.allocations = collections.defaultdict(set)
# Copy modifies
for code in liveCode:
self.handleModifies(code, None, code.annotation.codeModifies)
ops, lcls = getOps(code)
for op in ops:
self.handleModifies(code, op, op.annotation.opModifies)
# Copy reads
for code in liveCode:
self.handleReads(code, None, code.annotation.codeReads)
self.handleAllocates(code, None, code.annotation.codeAllocates)
ops, lcls = getOps(code)
for op in ops:
self.handleReads(code, op, op.annotation.opReads)
self.handleAllocates(code, op, op.annotation.opAllocates)
def process(self, killed):
self.killed = killed
self.processReads()
self.processModifies()
def processReads(self):
self.dirty = set()
for (code, context), values in self.contextReads.iteritems():
if values: self.dirty.add((code, context))
while self.dirty:
current = self.dirty.pop()
self.processContextReads(current)
def processContextReads(self, current):
currentF, currentC = current
for prev in self.invokeSources[currentF][currentC]:
prevF, prevO, prevC = prev
prevRead = self.opReadDB[prevF][prevO]
killed = self.killed[(prevF, prevO, prevC)][(currentF, currentC)]
# Propigate reads
filtered = set([value for value in self.contextReads[(currentF, currentC)] if value.object not in killed])
current = prevRead[prevC]
diff = filtered-current if current else filtered
if diff:
self.contextReads[(prevF, prevC)].update(diff)
prevRead.merge(prevC, diff)
self.dirty.add((prevF, prevC))
def processModifies(self):
self.dirty = set()
for (code, context), values in self.contextModifies.iteritems():
if values: self.dirty.add((code, context))
while self.dirty:
current = self.dirty.pop()
self.processContextModifies(current)
def processContextModifies(self, current):
currentF, currentC = current
for prev in self.invokeSources[currentF][currentC]:
prevF, prevO, prevC = prev
prevMod = self.opModifyDB[prevF][prevO]
killed = self.killed[(prevF, prevO, prevC)][(currentF, currentC)]
# Propigate modifies
filtered = set([value for value in self.contextModifies[(currentF, currentC)] if value.object not in killed])
#diff = filtered-self.opModifies[prev]
current = prevMod[prevC]
diff = filtered-current if current else filtered
if diff:
self.contextModifies[(prevF, prevC)].update(diff)
prevMod.merge(prevC, diff)
self.dirty.add((prevF, prevC))
class DFSSearcher(object):
def __init__(self):
self._stack = []
self._touched = set()
def enqueue(self, *children):
for child in children:
if child not in self._touched:
self._touched.add(child)
self._stack.append(child)
def process(self):
while self._stack:
current = self._stack.pop()
self.visit(current)
class ObjectSearcher(DFSSearcher):
def __init__(self, la):
DFSSearcher.__init__(self)
self.la = la
def visit(self, obj):
objInfo = self.la.getObjectInfo(obj)
for slot in obj:
for next in slot:
nextInfo = self.la.getObjectInfo(next)
objInfo.refersTo.add(nextInfo)
nextInfo.referedFrom.add(objInfo)
self.enqueue(next)
class LifetimeAnalysis(object):
def __init__(self):
self.heapReferedToByHeap = collections.defaultdict(set)
self.heapReferedToByCode = collections.defaultdict(set)
self.codeRefersToHeap = collections.defaultdict(set)
self.objects = {}
self.globallyVisible = set()
self.externallyVisible = set()
def getObjectInfo(self, obj):
assert isinstance(obj, storegraph.ObjectNode), type(obj)
if obj not in self.objects:
info = ObjectInfo(obj)
self.objects[obj] = info
else:
info = self.objects[obj]
return info
def findGloballyVisible(self):
# Globally visible
active = set()
for info in self.objects.itervalues():
if info.globallyVisible:
active.add(info)
self.globallyVisible.add(info.obj)
while active:
current = active.pop()
for ref in current.refersTo:
if not ref.globallyVisible:
ref.globallyVisible = True
active.add(ref)
self.globallyVisible.add(ref.obj)
def findExternallyVisible(self):
# Externally visible
active = set()
for info in self.objects.itervalues():
if info.externallyVisible:
active.add(info)
self.externallyVisible.add(info.obj)
while active:
current = active.pop()
for ref in current.refersTo:
if not ref.externallyVisible:
ref.externallyVisible = True
active.add(ref)
self.externallyVisible.add(ref.obj)
def propagateVisibility(self):
self.findGloballyVisible()
self.findExternallyVisible()
self.escapes = self.globallyVisible.union(self.externallyVisible)
# Annotate the objects
for info in self.objects.itervalues():
info.obj.leaks = info.leaks()
def objEscapes(self, obj):
assert not isinstance(obj, ObjectInfo), obj
return obj in self.escapes
def propagateHeld(self):
dirty = set()
for obj, info in self.objects.iteritems():
if not self.objEscapes(obj):
if info.updateHeldBy(info.referedFrom):
for dst in info.refersTo:
if not self.objEscapes(dst.obj): dirty.add(dst)
while dirty:
current = dirty.pop()
assert not self.objEscapes(current.obj), current.obj
# Find the new heldby
newHeld = set()
for prev in current.referedFrom:
newHeld.update(prev.heldByClosure)
if current.updateHeldBy(newHeld):
# Mark as dirty
for dst in current.refersTo:
if not self.objEscapes(dst.obj): dirty.add(dst)
#self.displayHistogram()
def displayHistogram(self):
# Display a histogram of the number of live heap objects
# that may hold (directly or indirectly) a given live heap object.
hist = collections.defaultdict(lambda:0)
for obj, info in self.objects.iteritems():
if not obj in self.escapes:
if len(info.heldByClosure) >= 4:
print obj
for other in info.heldByClosure:
print '\t', other.obj
print
hist[len(info.heldByClosure)] += 1
else:
hist[-1] += 1
keys = sorted(hist.iterkeys())
for key in keys:
print key, hist[key]
def inferScope(self):
# Figure out how far back on the stack the object may propagate
self.live = collections.defaultdict(set)
self.killed = collections.defaultdict(lambda: collections.defaultdict(set))
# Seed the inital dirty set
self.dirty = set()
for (code, context), objs in self.rm.allocations.iteritems():
noescape = objs-self.escapes
self.live[(code, context)].update(noescape)
self.dirty.update(self.invokeSources[code][context])
while self.dirty:
current = self.dirty.pop()
self.processScope(current)
self.convertKills()
def convertKills(self):
# Convert kills on edges to kills on nodes.
self.contextKilled = collections.defaultdict(set)
for dstF, contexts in self.invokeSources:
for dstC, srcs in contexts:
if not srcs: continue
killedAll = None
for srcF, srcO, srcC in srcs:
newKilled = self.killed[(srcF, srcO, srcC)][(dstF, dstC)]
if killedAll is None:
killedAll = newKilled
else:
killedAll = killedAll.intersection(newKilled)
if killedAll:
self.contextKilled[(dstF, dstC)].update(killedAll)
for code, context in self.entries:
self.contextKilled[(code, context)].update(self.live[(code, context)])
def processScope(self, current):
currentF, currentO, currentC = current
assert currentF.isCode(), type(currentF)
operationSchema.validate(currentO)
newLive = set()
live = self.live
for dstF, dstC in self.invokes[currentF][currentO][currentC]:
for dstLive in live[(dstF, dstC)]:
if dstLive in live[(currentF, currentC)]:
continue
if dstLive in newLive:
continue
refs = self.codeRefersToHeap[(currentF, currentC)]
refinfos = [self.getObjectInfo(ref) for ref in refs]
# Could the object stay live?
if dstLive in refs:
# Directly held
newLive.add(dstLive)
elif self.getObjectInfo(dstLive).isReachableFrom(refinfos):
# Indirectly held
newLive.add(dstLive)
else:
# The object will never propagate along this invocation
self.killed[(currentF, currentO, currentC)][(dstF, dstC)].add(dstLive)
if newLive:
# Propigate dirty
live[(currentF, currentC)].update(newLive)
self.dirty.update(self.invokeSources[currentF][currentC])
def gatherInvokes(self, liveCode, entryContexts):
invokesDB = invokesSchema.instance()
self.entries = set()
for code in liveCode:
for context in code.annotation.contexts:
if context in entryContexts:
self.entries.add((code, context))
assert code.isCode(), type(code)
ops, lcls = getOps(code)
for op in ops:
invokes = op.annotation.invokes
if invokes is not None:
for cindex, context in enumerate(code.annotation.contexts):
opInvokes = invokes[1][cindex]
for dstF, dstC in opInvokes:
assert dstF.isCode(), type(dstF)
invokesDB[code][op][context].add(dstF, dstC)
for lcl in lcls:
refs = lcl.annotation.references
if refs is None:
continue
for cindex, context in enumerate(code.annotation.contexts):
for ref in refs[1][cindex]:
obj = self.getObjectInfo(ref)
obj.localReference.add(code)
self.codeRefersToHeap[(code, context)].add(ref)
self.invokes = invokesDB
self.invokeSources = invertInvokes(invokesDB)
def markVisible(self, lcl, cindex):
if lcl is not None:
refs = lcl.annotation.references[1][cindex]
for ref in refs:
obj = self.getObjectInfo(ref)
obj.externallyVisible = True
def gatherSlots(self, liveCode, entryContexts):
searcher = ObjectSearcher(self)
for code in liveCode:
callee = code.codeParameters()
ops, lcls = getOps(code)
for lcl in lcls:
for ref in lcl.annotation.references[0]:
searcher.enqueue(ref)
# Mark the return parameters for external contexts as visible.
for cindex, context in enumerate(code.annotation.contexts):
if context in entryContexts:
for param in callee.returnparams:
self.markVisible(param, cindex)
searcher.process()
def process(self, compiler, prgm):
with compiler.console.scope('solve'):
entryContexts = prgm.interface.entryContexts()
self.gatherSlots(prgm.liveCode, entryContexts)
self.gatherInvokes(prgm.liveCode, entryContexts)
self.propagateVisibility()
self.propagateHeld()
self.rm = ReadModifyAnalysis(prgm.liveCode, self.invokeSources)
self.inferScope()
self.rm.process(self.killed)
with compiler.console.scope('annotate'):
self.createDB(compiler, prgm)
del self.rm
def createDB(self, compiler, prgm):
self.annotationCount = 0
self.annotationCache = {}
readDB = self.rm.opReadDB
modifyDB = self.rm.opModifyDB
self.allocations = self.rm.allocations
for code in prgm.liveCode:
# Annotate the code
live = []
killed = []
for cindex, context in enumerate(code.annotation.contexts):
key = (code, context)
live.append(annotations.annotationSet(self.live[key]))
killed.append(annotations.annotationSet(self.contextKilled[key]))
code.rewriteAnnotation(live=annotations.makeContextualAnnotation(live),
killed=annotations.makeContextualAnnotation(killed))
# Annotate the ops
ops, lcls = getOps(code)
for op in ops:
# TODO is this a good HACK?
# if not op.annotation.invokes[0]: continue
reads = readDB[code][op]
modifies = modifyDB[code][op]
rout = []
mout = []
aout = []
for cindex, context in enumerate(code.annotation.contexts):
# HACK if an operation directly reads a field, but it is never modified
# it still must appear in the reads annotation so cloning behaves correctly!
reads.merge(context, op.annotation.opReads[1][cindex])
creads = reads[context]
creads = annotations.annotationSet(creads) if creads else ()
rout.append(creads)
cmod = modifies[context]
cmod = annotations.annotationSet(cmod) if cmod else ()
mout.append(cmod)
kills = self.killed[(code, op, context)]
calloc = set()
for dstCode, dstContext in op.annotation.invokes[1][cindex]:
live = self.live[(dstCode, dstContext)]
killed = kills[(dstCode, dstContext)]
calloc.update(live-killed)
calloc.update(op.annotation.opAllocates[1][cindex])
aout.append(annotations.annotationSet(calloc))
opReads = annotations.makeContextualAnnotation(rout)
opModifies = annotations.makeContextualAnnotation(mout)
opAllocates = annotations.makeContextualAnnotation(aout)
opReads = self.annotationCache.setdefault(opReads, opReads)
opModifies = self.annotationCache.setdefault(opModifies, opModifies)
opAllocates = self.annotationCache.setdefault(opAllocates, opAllocates)
self.annotationCount += 3
op.rewriteAnnotation(reads=opReads, modifies=opModifies, allocates=opAllocates)
compiler.console.output("Annotation compression %f - %d" % (float(len(self.annotationCache))/max(self.annotationCount, 1), self.annotationCount))
del self.annotationCache
del self.annotationCount
def evaluate(compiler, prgm):
with compiler.console.scope('lifetime analysis'):
la = LifetimeAnalysis().process(compiler, prgm)
|
ncbray/pystream
|
bin/analysis/lifetimeanalysis/__init__.py
|
Python
|
apache-2.0
| 17,698
|
[
"VisIt"
] |
1e17713602c290f5c87bd67fbf380ab784769ba7cf0265e16ba5c801ed9b5af6
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import unittest
from Bio.PopGen import GenePop
from Bio.PopGen.GenePop import FileParser
class RecordTest(unittest.TestCase):
def test_record_basic(self):
"""Basic test on Record
"""
r = GenePop.Record()
assert isinstance(r.marker_len, int)
assert isinstance(r.comment_line, str)
assert isinstance(r.loci_list, list)
assert isinstance(r.populations, list)
class ParserTest(unittest.TestCase):
def setUp(self):
files = ["c2line.gen", "c3line.gen", "c2space.gen", "c3space.gen",
"haplo3.gen", "haplo2.gen"]
self.handles = []
for filename in files:
self.handles.append(open(os.path.join("PopGen", filename)))
self.pops_indivs = [
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5])
]
self.num_loci = [3, 3, 3, 3, 3, 3]
self.marker_len = [2, 3, 2, 3, 3, 2]
self.pop_names = ["4", "b3", "5"]
def tearDown(self):
for handle in self.handles:
handle.close()
def test_record_parser(self):
"""Basic operation of the Record Parser.
"""
for index in range(len(self.handles)):
handle = self.handles[index]
rec = GenePop.read(handle)
assert isinstance(rec, GenePop.Record)
assert len(rec.loci_list) == self.num_loci[index]
assert rec.marker_len == self.marker_len[index]
assert len(rec.populations) == self.pops_indivs[index][0]
assert rec.pop_list == self.pop_names
for i in range(self.pops_indivs[index][0]):
assert len(rec.populations[i]) == \
self.pops_indivs[index][1][i]
def test_wrong_file_parser(self):
"""Testing the ability to deal with wrongly formatted files
"""
f = open(os.path.join("PopGen", "fdist1"))
try:
rec = GenePop.read(f)
raise Error("Should have raised exception")
except ValueError:
pass
f.close()
class FileParserTest(unittest.TestCase):
def setUp(self):
self.files = [os.path.join("PopGen", x) for x in
["c2line.gen", "c3line.gen", "c2space.gen",
"c3space.gen", "haplo3.gen", "haplo2.gen"]]
self.pops_indivs = [
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5])
]
self.num_loci = [3, 3, 3, 3, 3, 3]
def test_file_record_parser(self):
"""Basic operation of the File Record Parser.
"""
for index in range(len(self.files)):
fname = self.files[index]
rec = FileParser.read(fname)
assert isinstance(rec, FileParser.FileRecord)
assert len(rec.loci_list) == self.num_loci[index]
for skip in range(self.pops_indivs[index][0]):
if rec.skip_population() is False:
raise Error("Not enough populations")
if rec.skip_population() is True:
raise Error("Too much populations")
for i in range(self.pops_indivs[index][0]):
continue
assert len(rec.populations[i]) == \
self.pops_indivs[index][1][i]
rec._handle.close() # TODO - Needs a proper fix
def test_wrong_file_parser(self):
"""Testing the ability to deal with wrongly formatted files
"""
f = open(os.path.join("PopGen", "fdist1"))
try:
rec = GenePop.read(f)
raise Error("Should have raised exception")
except ValueError:
pass
f.close()
class UtilsTest(unittest.TestCase):
def setUp(self):
# All files have to have at least 3 loci and 2 pops
files = ["c2line.gen"]
self.handles = []
for filename in files:
self.handles.append(open(os.path.join("PopGen", filename)))
def tearDown(self):
for handle in self.handles:
handle.close()
def test_utils(self):
"""Basic operation of GenePop Utils.
"""
for index in range(len(self.handles)):
handle = self.handles[index]
rec = GenePop.read(handle)
initial_pops = len(rec.populations)
initial_loci = len(rec.loci_list)
first_loci = rec.loci_list[0]
rec.remove_population(0)
assert len(rec.populations) == initial_pops - 1
rec.remove_locus_by_name(first_loci)
assert len(rec.loci_list) == initial_loci - 1
assert rec.loci_list[0] != first_loci
rec.remove_locus_by_position(0)
assert len(rec.loci_list) == initial_loci - 2
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_PopGen_GenePop_nodepend.py
|
Python
|
gpl-2.0
| 5,263
|
[
"Biopython"
] |
2cb6c3f17f9cac3a14480ec864211f83796d18123bf258f7940bd77f41880455
|
#!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
import time
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
for i in range(len(mg_lst)):
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Seq_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Seq_size']=recruited_mg[i]['Seq_nt'].apply(lambda x: len(x))
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Seq_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Seq_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Seq_size','Seq_nt']]
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=results_Dir+"/"+name+"_iter_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_"
else:
prefix=results_Dir+"/"+name+"_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_"
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
# try:
# os.remove(outfile1)
# except OSError:
# pass
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
nyirock/mg_blast_wrapper
|
mg_blast_wrapper_v1.9.py
|
Python
|
mit
| 19,159
|
[
"BLAST"
] |
9030b404dfc9c86afae072a21fa7bfe0c6eb03c16812b45641fce01b46aed497
|
#!/usr/bin/env python
import sys
import gd_util
from Population import Population
################################################################################
if len(sys.argv) != 7:
gd_util.die('Usage')
input, input_type, ind_arg, p1_input, p2_input, output = sys.argv[1:]
p_total = Population()
p_total.from_wrapped_dict(ind_arg)
p1 = Population()
p1.from_population_file(p1_input)
if not p_total.is_superset(p1):
gd_util.die('There is an individual in the first population that is not in the SNP table')
p2 = Population()
p2.from_population_file(p2_input)
if not p_total.is_superset(p2):
gd_util.die('There is an individual in the second population that is not in the SNP table')
################################################################################
prog = 'offspring_heterozygosity'
args = [ prog ]
args.append(input) # a Galaxy SNP table
for tag in p1.tag_list():
column, name = tag.split(':')
if input_type == 'gd_genotype':
column = int(column) - 2
tag = '{0}:{1}:{2}'.format(column, 0, name)
args.append(tag)
for tag in p2.tag_list():
column, name = tag.split(':')
if input_type == 'gd_genotype':
column = int(column) - 2
tag = '{0}:{1}:{2}'.format(column, 1, name)
args.append(tag)
with open(output, 'w') as fh:
gd_util.run_program(prog, args, stdout=fh)
################################################################################
sys.exit(0)
|
gigascience/galaxy-genome-diversity
|
tools/offspring_heterozygosity/offspring_heterozygosity.py
|
Python
|
gpl-3.0
| 1,458
|
[
"Galaxy"
] |
4ad9eb380253940c011c17489227e8c3a4663404a548018af354e88858676642
|
"""
Dashboard view and supporting methods
"""
import datetime
import logging
from collections import defaultdict
from completion.exceptions import UnavailableCompletionData
from completion.utilities import get_key_to_last_completed_course_block
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from edx_django_utils import monitoring as monitoring_utils
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from six import text_type, iteritems
import track.views
from bulk_email.models import BulkEmailFlag, Optout # pylint: disable=import-error
from course_modes.models import CourseMode
from courseware.access import has_access
from edxmako.shortcuts import render_to_response, render_to_string
from entitlements.models import CourseEntitlement
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.catalog.utils import (
get_programs,
get_pseudo_session_for_entitlement,
get_visible_sessions_for_entitlement
)
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import ProgramDataExtender, ProgramProgressMeter
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.util.maintenance_banner import add_maintenance_banner
from openedx.core.djangoapps.waffle_utils import WaffleFlag, WaffleFlagNamespace
from openedx.core.djangoapps.user_api.accounts.utils import is_secondary_email_feature_enabled_for_user
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.enterprise_support.api import get_dashboard_consent_notification
from openedx.features.enterprise_support.utils import is_enterprise_learner
from openedx.features.journals.api import journals_enabled
from shoppingcart.api import order_history
from shoppingcart.models import CourseRegistrationCode, DonationConfiguration
from openedx.core.djangoapps.user_authn.cookies import set_logged_in_cookies
from student.helpers import cert_info, check_verify_status_by_course
from student.models import (
AccountRecovery,
CourseEnrollment,
CourseEnrollmentAttribute,
DashboardConfiguration,
UserProfile
)
from util.milestones_helpers import get_pre_requisite_courses_not_completed
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.student")
def get_org_black_and_whitelist_for_site():
"""
Returns the org blacklist and whitelist for the current site.
Returns:
(org_whitelist, org_blacklist): A tuple of lists of orgs that serve as
either a blacklist or a whitelist of orgs for the current site. The
whitelist takes precedence, and the blacklist is used if the
whitelist is None.
"""
# Default blacklist is empty.
org_blacklist = None
# Whitelist the orgs configured for the current site. Each site outside
# of edx.org has a list of orgs associated with its configuration.
org_whitelist = configuration_helpers.get_current_site_orgs()
if not org_whitelist:
# If there is no whitelist, the blacklist will include all orgs that
# have been configured for any other sites. This applies to edx.org,
# where it is easier to blacklist all other orgs.
org_blacklist = configuration_helpers.get_all_orgs()
return org_whitelist, org_blacklist
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""
Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
text_type(course_id): [mode for mode in modes]
for course_id, modes in iteritems(course_modes)
}
flat_all_modes = {
text_type(course_id): [mode.slug for mode in modes]
for course_id, modes in iteritems(CourseMode.all_modes_for_courses([course_id]))
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = configuration_helpers.get_value(
'ENABLE_DONATIONS',
DonationConfiguration.current().enabled
)
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enrollments_count = len(recently_enrolled_courses)
course_name_separator = ', '
# If length of enrolled course 2, join names with 'and'
if enrollments_count == 2:
course_name_separator = _(' and ')
course_names = course_name_separator.join(
[enrollment.course_overview.display_name for enrollment in recently_enrolled_courses]
)
allow_donations = any(
_allow_donation(course_modes, enrollment.course_overview.id, enrollment)
for enrollment in recently_enrolled_courses
)
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{
'course_names': course_names,
'enrollments_count': enrollments_count,
'allow_donations': allow_donations,
'platform_name': platform_name,
'course_id': recently_enrolled_courses[0].course_overview.id if enrollments_count == 1 else None
}
)
def get_course_enrollments(user, org_whitelist, org_blacklist):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_whitelist (list[str]): If not None, ONLY courses of these orgs will be returned.
org_blacklist (list[str]): Courses of these orgs will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user_with_overviews_preload(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not in the whitelist.
if org_whitelist and course_overview.location.org not in org_whitelist:
continue
# Conversely, filter out any enrollments in the blacklist.
elif org_blacklist and course_overview.location.org in org_blacklist:
continue
# Else, include the enrollment.
else:
yield enrollment
def get_filtered_course_entitlements(user, org_whitelist, org_blacklist):
"""
Given a user, return a filtered set of his or her course entitlements.
Arguments:
user (User): the user in question.
org_whitelist (list[str]): If not None, ONLY entitlements of these orgs will be returned.
org_blacklist (list[str]): CourseEntitlements of these orgs will be excluded.
Returns:
generator[CourseEntitlement]: a sequence of entitlements to be displayed
on the user's dashboard.
"""
course_entitlement_available_sessions = {}
unfulfilled_entitlement_pseudo_sessions = {}
course_entitlements = list(CourseEntitlement.get_active_entitlements_for_user(user))
filtered_entitlements = []
pseudo_session = None
course_run_key = None
for course_entitlement in course_entitlements:
course_entitlement.update_expired_at()
available_runs = get_visible_sessions_for_entitlement(course_entitlement)
if not course_entitlement.enrollment_course_run:
# Unfulfilled entitlements need a mock session for metadata
pseudo_session = get_pseudo_session_for_entitlement(course_entitlement)
unfulfilled_entitlement_pseudo_sessions[str(course_entitlement.uuid)] = pseudo_session
# Check the org of the Course and filter out entitlements that are not available.
if course_entitlement.enrollment_course_run:
course_run_key = course_entitlement.enrollment_course_run.course_id
elif available_runs:
course_run_key = CourseKey.from_string(available_runs[0]['key'])
elif pseudo_session:
course_run_key = CourseKey.from_string(pseudo_session['key'])
if course_run_key:
# If there is no course_run_key at this point we will be unable to determine if it should be shown.
# Therefore it should be excluded by default.
if org_whitelist and course_run_key.org not in org_whitelist:
continue
elif org_blacklist and course_run_key.org in org_blacklist:
continue
course_entitlement_available_sessions[str(course_entitlement.uuid)] = available_runs
filtered_entitlements.append(course_entitlement)
return filtered_entitlements, course_entitlement_available_sessions, unfulfilled_entitlement_pseudo_sessions
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""
Checking if registration is blocked or not.
"""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": text_type(course_key)},
page='dashboard',
)
break
return blocked
def get_verification_error_reasons_for_display(verification_error_codes):
"""
Returns the display text for the given verification error codes.
"""
verification_errors = []
verification_error_map = {
'photos_mismatched': _('Photos are mismatched'),
'id_image_missing_name': _('Name missing from ID photo'),
'id_image_missing': _('ID photo not provided'),
'id_invalid': _('ID is invalid'),
'user_image_not_clear': _('Learner photo is blurry'),
'name_mismatch': _('Name on ID does not match name on account'),
'user_image_missing': _('Learner photo not provided'),
'id_image_not_clear': _('ID photo is blurry'),
}
for error in verification_error_codes:
error_text = verification_error_map.get(error)
if error_text:
verification_errors.append(error_text)
return verification_errors
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(text_type(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": text_type(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
def _get_urls_for_resume_buttons(user, enrollments):
'''
Checks whether a user has made progress in any of a list of enrollments.
'''
resume_button_urls = []
for enrollment in enrollments:
try:
block_key = get_key_to_last_completed_course_block(user, enrollment.course_id)
url_to_block = reverse(
'jump_to',
kwargs={'course_id': enrollment.course_id, 'location': block_key}
)
except UnavailableCompletionData:
url_to_block = ''
resume_button_urls.append(url_to_block)
return resume_button_urls
@login_required
@ensure_csrf_cookie
@add_maintenance_banner
def student_dashboard(request):
"""
Provides the LMS dashboard view
TODO: This is lms specific and does not belong in common code.
Arguments:
request: The request object.
Returns:
The dashboard response.
"""
user = request.user
if not UserProfile.objects.filter(user=user).exists():
return redirect(reverse('account_settings'))
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
enable_verified_certificates = configuration_helpers.get_value(
'ENABLE_VERIFIED_CERTIFICATES',
settings.FEATURES.get('ENABLE_VERIFIED_CERTIFICATES')
)
display_course_modes_on_dashboard = configuration_helpers.get_value(
'DISPLAY_COURSE_MODES_ON_DASHBOARD',
settings.FEATURES.get('DISPLAY_COURSE_MODES_ON_DASHBOARD', True)
)
activation_email_support_link = configuration_helpers.get_value(
'ACTIVATION_EMAIL_SUPPORT_LINK', settings.ACTIVATION_EMAIL_SUPPORT_LINK
) or settings.SUPPORT_SITE_LINK
hide_dashboard_courses_until_activated = configuration_helpers.get_value(
'HIDE_DASHBOARD_COURSES_UNTIL_ACTIVATED',
settings.FEATURES.get('HIDE_DASHBOARD_COURSES_UNTIL_ACTIVATED', False)
)
empty_dashboard_message = configuration_helpers.get_value(
'EMPTY_DASHBOARD_MESSAGE', None
)
# Get the org whitelist or the org blacklist for the current site
site_org_whitelist, site_org_blacklist = get_org_black_and_whitelist_for_site()
course_enrollments = list(get_course_enrollments(user, site_org_whitelist, site_org_blacklist))
# Get the entitlements for the user and a mapping to all available sessions for that entitlement
# If an entitlement has no available sessions, pass through a mock course overview object
(course_entitlements,
course_entitlement_available_sessions,
unfulfilled_entitlement_pseudo_sessions) = get_filtered_course_entitlements(
user,
site_org_whitelist,
site_org_blacklist
)
# Record how many courses there are so that we can get a better
# understanding of usage patterns on prod.
monitoring_utils.accumulate('num_courses', len(course_enrollments))
# Sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in iteritems(unexpired_course_modes)
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
# Display activation message
activate_account_message = ''
if not user.is_active:
activate_account_message = Text(_(
"Check your {email_start}{email}{email_end} inbox for an account activation link from {platform_name}. "
"If you need help, contact {link_start}{platform_name} Support{link_end}."
)).format(
platform_name=platform_name,
email_start=HTML("<strong>"),
email_end=HTML("</strong>"),
email=user.email,
link_start=HTML("<a target='_blank' href='{activation_email_support_link}'>").format(
activation_email_support_link=activation_email_support_link,
),
link_end=HTML("</a>"),
)
enterprise_message = get_dashboard_consent_notification(request, user, course_enrollments)
recovery_email_message = recovery_email_activation_message = None
if is_secondary_email_feature_enabled_for_user(user=user):
try:
account_recovery_obj = AccountRecovery.objects.get(user=user)
except AccountRecovery.DoesNotExist:
recovery_email_message = Text(
_(
"Add a recovery email to retain access when single-sign on is not available. "
"Go to {link_start}your Account Settings{link_end}.")
).format(
link_start=HTML("<a target='_blank' href='{account_setting_page}'>").format(
account_setting_page=reverse('account_settings'),
),
link_end=HTML("</a>")
)
else:
if not account_recovery_obj.is_active:
recovery_email_activation_message = Text(
_(
"Recovery email is not activated yet. "
"Kindly visit your email and follow the instructions to activate it."
)
)
# Disable lookup of Enterprise consent_required_course due to ENT-727
# Will re-enable after fixing WL-1315
consent_required_courses = set()
enterprise_customer_name = None
# Account activation message
account_activation_messages = [
message for message in messages.get_messages(request) if 'account-activation' in message.tags
]
# Global staff can see what courses encountered an error on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that encountered an error on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = {
enrollment.course_id: has_access(request.user, 'load', enrollment.course_overview)
for enrollment in course_enrollments
}
# Find programs associated with course runs being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = ProgramProgressMeter(request.site, user, enrollments=course_enrollments)
ecommerce_service = EcommerceService()
inverted_programs = meter.invert_programs()
urls, programs_data = {}, {}
bundles_on_dashboard_flag = WaffleFlag(WaffleFlagNamespace(name=u'student.experiments'), u'bundles_on_dashboard')
# TODO: Delete this code and the relevant HTML code after testing LEARNER-3072 is complete
if bundles_on_dashboard_flag.is_enabled() and inverted_programs and inverted_programs.items():
if len(course_enrollments) < 4:
for program in inverted_programs.values():
try:
program_uuid = program[0]['uuid']
program_data = get_programs(request.site, uuid=program_uuid)
program_data = ProgramDataExtender(program_data, request.user).extend()
skus = program_data.get('skus')
checkout_page_url = ecommerce_service.get_checkout_page_url(*skus)
program_data['completeProgramURL'] = checkout_page_url + '&bundle=' + program_data.get('uuid')
programs_data[program_uuid] = program_data
except: # pylint: disable=bare-except
pass
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status = IDVerificationService.user_status(user)
verification_errors = get_verification_error_reasons_for_display(verification_status['error'])
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(
user,
course_org_filter=site_org_whitelist,
org_filter_out_set=site_org_blacklist
)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
elif 'access_response_error' in request.GET:
# This can be populated in a generalized way with fields from access response errors
redirect_message = request.GET['access_response_error']
else:
redirect_message = ''
valid_verification_statuses = ['approved', 'must_reverify', 'pending', 'expired']
display_sidebar_on_dashboard = (len(order_history_list) or
(verification_status['status'] in valid_verification_statuses and
verification_status['should_display']))
# Filter out any course enrollment course cards that are associated with fulfilled entitlements
for entitlement in [e for e in course_entitlements if e.enrollment_course_run is not None]:
course_enrollments = [
enr for enr in course_enrollments if entitlement.enrollment_course_run.course_id != enr.course_id
]
context = {
'urls': urls,
'programs_data': programs_data,
'enterprise_message': enterprise_message,
'consent_required_courses': consent_required_courses,
'enterprise_customer_name': enterprise_customer_name,
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'account_activation_messages': account_activation_messages,
'activate_account_message': activate_account_message,
'course_enrollments': course_enrollments,
'course_entitlements': course_entitlements,
'course_entitlement_available_sessions': course_entitlement_available_sessions,
'unfulfilled_entitlement_pseudo_sessions': unfulfilled_entitlement_pseudo_sessions,
'course_optouts': course_optouts,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_display': verification_status['should_display'],
'verification_status': verification_status['status'],
'verification_status_by_course': verify_status_by_course,
'verification_errors': verification_errors,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'inverted_programs': inverted_programs,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'show_journal_listing': journals_enabled(), # TODO: Dashboard Plugin required
'show_dashboard_tabs': True,
'disable_courseware_js': True,
'display_course_modes_on_dashboard': enable_verified_certificates and display_course_modes_on_dashboard,
'display_sidebar_on_dashboard': display_sidebar_on_dashboard,
'display_sidebar_account_activation_message': not(user.is_active or hide_dashboard_courses_until_activated),
'display_dashboard_courses': (user.is_active or not hide_dashboard_courses_until_activated),
'empty_dashboard_message': empty_dashboard_message,
'recovery_email_message': recovery_email_message,
'recovery_email_activation_message': recovery_email_activation_message,
}
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
# Gather urls for course card resume buttons.
resume_button_urls = ['' for entitlement in course_entitlements]
for url in _get_urls_for_resume_buttons(user, course_enrollments):
resume_button_urls.append(url)
# There must be enough urls for dashboard.html. Template creates course
# cards for "enrollments + entitlements".
context.update({
'resume_button_urls': resume_button_urls
})
response = render_to_response('dashboard.html', context)
set_logged_in_cookies(request, response, user)
return response
|
a-parhom/edx-platform
|
common/djangoapps/student/views/dashboard.py
|
Python
|
agpl-3.0
| 37,336
|
[
"VisIt"
] |
c92eb87647fe21fab9fec24710e464f9fb14aa282bf025fc5a829ec2e207def3
|
# -*- coding: utf-8 -*-
"""
End-to-end tests related to the cohort management on the LMS Instructor Dashboard
"""
from datetime import datetime
from pytz import UTC, utc
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from .helpers import CohortTestMixin
from ..helpers import UniqueCourseTest, EventsTestMixin, create_user_partition_json
from xmodule.partitions.partitions import Group
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage, DataDownloadPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
import os
import unicodecsv
import uuid
@attr('shard_6')
class CohortConfigurationTest(EventsTestMixin, UniqueCourseTest, CohortTestMixin):
"""
Tests for cohort management on the LMS Instructor Dashboard
"""
def setUp(self):
"""
Set up a cohorted course
"""
super(CohortConfigurationTest, self).setUp()
# create course with cohorts
self.manual_cohort_name = "ManualCohort1"
self.auto_cohort_name = "AutoCohort1"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture, auto_cohort_groups=[self.auto_cohort_name])
self.manual_cohort_id = self.add_manual_cohort(self.course_fixture, self.manual_cohort_name)
# create a non-instructor who will be registered for the course and in the manual cohort.
self.student_name, self.student_email = self._generate_unique_user_data()
self.student_id = AutoAuthPage(
self.browser, username=self.student_name, email=self.student_email,
course_id=self.course_id, staff=False
).visit().get_user_id()
self.add_user_to_cohort(self.course_fixture, self.student_name, self.manual_cohort_id)
# create a second student user
self.other_student_name, self.other_student_email = self._generate_unique_user_data()
self.other_student_id = AutoAuthPage(
self.browser, username=self.other_student_name, email=self.other_student_email,
course_id=self.course_id, staff=False
).visit().get_user_id()
# login as an instructor
self.instructor_name, self.instructor_email = self._generate_unique_user_data()
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email=self.instructor_email,
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()
def verify_cohort_description(self, cohort_name, expected_description):
"""
Selects the cohort with the given name and verifies the expected description is presented.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.assertEquals(self.cohort_management_page.get_selected_cohort(), cohort_name)
self.assertIn(expected_description, self.cohort_management_page.get_cohort_group_setup())
def test_cohort_description(self):
"""
Scenario: the cohort configuration management in the instructor dashboard specifies whether
students are automatically or manually assigned to specific cohorts.
Given I have a course with a manual cohort and an automatic cohort defined
When I view the manual cohort in the instructor dashboard
There is text specifying that students are only added to the cohort manually
And when I view the automatic cohort in the instructor dashboard
There is text specifying that students are automatically added to the cohort
"""
self.verify_cohort_description(
self.manual_cohort_name,
'Students are added to this cohort only when you provide '
'their email addresses or usernames on this page',
)
self.verify_cohort_description(
self.auto_cohort_name,
'Students are added to this cohort automatically',
)
def test_no_content_groups(self):
"""
Scenario: if the course has no content groups defined (user_partitions of type cohort),
the settings in the cohort management tab reflect this
Given I have a course with a cohort defined but no content groups
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is text stating that no content groups are defined
And I cannot select the radio button to enable content group association
And there is a link I can select to open Group settings in Studio
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertEqual(
"Warning:\nNo content groups exist. Create a content group",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.assertFalse(self.cohort_management_page.select_content_group_radio_button())
self.cohort_management_page.select_studio_group_settings()
group_settings_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_settings_page.wait_for_page()
def test_add_students_to_cohort_success(self):
"""
Scenario: When students are added to a cohort, the appropriate notification is shown.
Given I have a course with two cohorts
And there is a user in one cohort
And there is a user in neither cohort
When I add the two users to the cohort that initially had no users
Then there are 2 users in total in the cohort
And I get a notification that 2 users have been added to the cohort
And I get a notification that 1 user was moved from the other cohort
And the user input field is empty
And appropriate events have been emitted
"""
start_time = datetime.now(UTC)
self.cohort_management_page.select_cohort(self.auto_cohort_name)
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for added students'
).fulfill()
confirmation_messages = self.cohort_management_page.get_cohort_confirmation_messages()
self.assertEqual(
[
"2 students have been added to this cohort",
"1 student was removed from " + self.manual_cohort_name
],
confirmation_messages
)
self.assertEqual("", self.cohort_management_page.get_cohort_student_input_field_value())
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id), int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
2
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.instructor_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": None,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": self.manual_cohort_name,
}).count(),
1
)
def test_add_students_to_cohort_failure(self):
"""
Scenario: When errors occur when adding students to a cohort, the appropriate notification is shown.
Given I have a course with a cohort and a user already in it
When I add the user already in a cohort to that same cohort
And I add a non-existing user to that cohort
Then there is no change in the number of students in the cohort
And I get a notification that one user was already in the cohort
And I get a notification that one user is unknown
And the user input field still contains the incorrect email addresses
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, "unknown_user"])
# Wait for notification messages to appear, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == len(self.cohort_management_page.get_cohort_confirmation_messages()), 'Waiting for notification'
).fulfill()
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.assertEqual(
[
"0 students have been added to this cohort",
"1 student was already in the cohort"
],
self.cohort_management_page.get_cohort_confirmation_messages()
)
self.assertEqual(
[
"There was an error when trying to add students:",
"Unknown user: unknown_user"
],
self.cohort_management_page.get_cohort_error_messages()
)
self.assertEqual(
self.student_name + ",unknown_user,",
self.cohort_management_page.get_cohort_student_input_field_value()
)
def _verify_cohort_settings(
self,
cohort_name,
assignment_type=None,
new_cohort_name=None,
new_assignment_type=None,
verify_updated=False
):
"""
Create a new cohort and verify the new and existing settings.
"""
start_time = datetime.now(UTC)
self.assertFalse(cohort_name in self.cohort_management_page.get_cohorts())
self.cohort_management_page.add_cohort(cohort_name, assignment_type=assignment_type)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == self.cohort_management_page.get_selected_cohort(), "Waiting for new cohort to appear"
).fulfill()
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
# After adding the cohort, it should automatically be selected and its
# assignment_type should be "manual" as this is the default assignment type
_assignment_type = assignment_type or 'manual'
msg = "Waiting for currently selected cohort assignment type"
EmptyPromise(
lambda: _assignment_type == self.cohort_management_page.get_cohort_associated_assignment_type(), msg
).fulfill()
# Go back to Manage Students Tab
self.cohort_management_page.select_manage_settings()
self.cohort_management_page.add_students_to_selected_cohort([self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 1 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for student to be added'
).fulfill()
self.assertFalse(self.cohort_management_page.is_assignment_settings_disabled)
self.assertEqual('', self.cohort_management_page.assignment_settings_message)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.created",
"time": {"$gt": start_time},
"event.cohort_name": cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.creation_requested",
"time": {"$gt": start_time},
"event.cohort_name": cohort_name,
}).count(),
1
)
if verify_updated:
self.cohort_management_page.select_cohort(cohort_name)
self.cohort_management_page.select_cohort_settings()
self.cohort_management_page.set_cohort_name(new_cohort_name)
self.cohort_management_page.set_assignment_type(new_assignment_type)
self.cohort_management_page.save_cohort_settings()
# If cohort name is empty, then we should get/see an error message.
if not new_cohort_name:
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages(type='error')
self.assertEqual(
["The cohort cannot be saved", "You must specify a name for the cohort"],
confirmation_messages
)
else:
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.assertEqual(new_cohort_name, self.cohort_management_page.cohort_name_in_header)
self.assertTrue(new_cohort_name in self.cohort_management_page.get_cohorts())
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.assertEqual(
new_assignment_type,
self.cohort_management_page.get_cohort_associated_assignment_type()
)
def _create_csv_file(self, filename, csv_text_as_lists):
"""
Create a csv file with the provided list of lists.
:param filename: this is the name that will be used for the csv file. Its location will
be under the test upload data directory
:param csv_text_as_lists: provide the contents of the csv file int he form of a list of lists
"""
filename = self.instructor_dashboard_page.get_asset_path(filename)
with open(filename, 'w+') as csv_file:
writer = unicodecsv.writer(csv_file)
for line in csv_text_as_lists:
writer.writerow(line)
self.addCleanup(os.remove, filename)
def _generate_unique_user_data(self):
"""
Produce unique username and e-mail.
"""
unique_username = 'user' + str(uuid.uuid4().hex)[:12]
unique_email = unique_username + "@example.com"
return unique_username, unique_email
def test_add_new_cohort(self):
"""
Scenario: A new manual cohort can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort to "manual" because this is the default
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type=None)
def test_add_new_cohort_with_manual_assignment_type(self):
"""
Scenario: A new cohort with manual assignment type can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort with manual assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "manual"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type='manual')
def test_add_new_cohort_with_random_assignment_type(self):
"""
Scenario: A new cohort with random assignment type can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type='random')
def test_update_existing_cohort_settings(self):
"""
Scenario: Update existing cohort settings(cohort name, assignment type)
Given I have a course with a user in the course
When I add a new cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
Then I select the cohort (that you just created) from existing cohorts
Then I change its name and assignment type set to "manual"
Then I Save the settings
And cohort with new name is present in cohorts dropdown list
And cohort assignment type should be "manual"
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
new_cohort_name = '{old}__NEW'.format(old=cohort_name)
self._verify_cohort_settings(
cohort_name=cohort_name,
assignment_type='random',
new_cohort_name=new_cohort_name,
new_assignment_type='manual',
verify_updated=True
)
def test_update_existing_cohort_settings_with_empty_cohort_name(self):
"""
Scenario: Update existing cohort settings(cohort name, assignment type).
Given I have a course with a user in the course
When I add a new cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
Then I select a cohort from existing cohorts
Then I set its name as empty string and assignment type set to "manual"
And I click on Save button
Then I should see an error message
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
new_cohort_name = ''
self._verify_cohort_settings(
cohort_name=cohort_name,
assignment_type='random',
new_cohort_name=new_cohort_name,
new_assignment_type='manual',
verify_updated=True
)
def test_default_cohort_assignment_settings(self):
"""
Scenario: Cohort assignment settings are disabled for default cohort.
Given I have a course with a user in the course
And I have added a manual cohort
And I have added a random cohort
When I select the random cohort
Then cohort assignment settings are disabled
"""
self.cohort_management_page.select_cohort("AutoCohort1")
self.cohort_management_page.select_cohort_settings()
self.assertTrue(self.cohort_management_page.is_assignment_settings_disabled)
message = "There must be one cohort to which students can automatically be assigned."
self.assertEqual(message, self.cohort_management_page.assignment_settings_message)
def test_cohort_enable_disable(self):
"""
Scenario: Cohort Enable/Disable checkbox related functionality is working as intended.
Given I have a cohorted course with a user.
And I can see the `Enable Cohorts` checkbox is checked.
And cohort management controls are visible.
When I uncheck the `Enable Cohorts` checkbox.
Then cohort management controls are not visible.
And When I reload the page.
Then I can see the `Enable Cohorts` checkbox is unchecked.
And cohort management controls are not visible.
"""
self.assertTrue(self.cohort_management_page.is_cohorted)
self.assertTrue(self.cohort_management_page.cohort_management_controls_visible())
self.cohort_management_page.is_cohorted = False
self.assertFalse(self.cohort_management_page.cohort_management_controls_visible())
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.assertFalse(self.cohort_management_page.is_cohorted)
self.assertFalse(self.cohort_management_page.cohort_management_controls_visible())
def test_link_to_data_download(self):
"""
Scenario: a link is present from the cohort configuration in
the instructor dashboard to the Data Download section.
Given I have a course with a cohort defined
When I view the cohort in the LMS instructor dashboard
There is a link to take me to the Data Download section of the Instructor Dashboard.
"""
self.cohort_management_page.select_data_download()
data_download_page = DataDownloadPage(self.browser)
data_download_page.wait_for_page()
def test_cohort_by_csv_both_columns(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using both emails and usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via both usernames and emails
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['username', 'email', 'ignored_column', 'cohort'],
[self.instructor_name, '', 'June', 'ManualCohort1'],
['', self.student_email, 'Spring', 'AutoCohort1'],
[self.other_student_name, '', 'Fall', 'ManualCohort1'],
]
filename = "cohort_csv_both_columns_1.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
def test_cohort_by_csv_only_email(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only emails.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only emails
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['email', 'cohort'],
[self.instructor_email, 'ManualCohort1'],
[self.student_email, 'AutoCohort1'],
[self.other_student_email, 'ManualCohort1'],
]
filename = "cohort_csv_emails_only.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
def test_cohort_by_csv_only_username(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only usernames
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['username', 'cohort'],
[self.instructor_name, 'ManualCohort1'],
[self.student_name, 'AutoCohort1'],
[self.other_student_name, 'ManualCohort1'],
]
filename = "cohort_users_only_username1.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
def test_cohort_by_csv_unicode(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using both emails and usernames.
Given I have a course with two cohorts defined
And I add another cohort with a unicode name
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to the unicode cohort via both usernames and emails
Then I can download a file with results
TODO: refactor events verification to handle this scenario. Events verification assumes movements
between other cohorts (manual and auto).
"""
unicode_hello_in_korean = u'안녕하세요'
self._verify_cohort_settings(cohort_name=unicode_hello_in_korean, assignment_type=None)
csv_contents = [
['username', 'email', 'cohort'],
[self.instructor_name, '', unicode_hello_in_korean],
['', self.student_email, unicode_hello_in_korean],
[self.other_student_name, '', unicode_hello_in_korean]
]
filename = "cohort_unicode_name.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename, skip_events=True)
def _verify_csv_upload_acceptable_file(self, filename, skip_events=None):
"""
Helper method to verify cohort assignments after a successful CSV upload.
When skip_events is specified, no assertions are made on events.
"""
start_time = datetime.now(UTC)
self.cohort_management_page.upload_cohort_file(filename)
self._verify_cohort_by_csv_notification(
"Your file '{}' has been uploaded. Allow a few minutes for processing.".format(filename)
)
if not skip_events:
# student_user is moved from manual cohort to auto cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# instructor_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# other_student_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.other_student_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# Verify the results can be downloaded.
data_download = self.instructor_dashboard_page.select_data_download()
data_download.wait_for_available_report()
report = data_download.get_available_reports_for_download()[0]
base_file_name = "cohort_results_"
self.assertIn("{}_{}".format(
'_'.join([self.course_info['org'], self.course_info['number'], self.course_info['run']]), base_file_name
), report)
report_datetime = datetime.strptime(
report[report.index(base_file_name) + len(base_file_name):-len(".csv")],
"%Y-%m-%d-%H%M"
)
self.assertLessEqual(start_time.replace(second=0, microsecond=0), utc.localize(report_datetime))
def test_cohort_by_csv_wrong_file_type(self):
"""
Scenario: if the instructor uploads a non-csv file, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a file without the CSV extension
Then I get an error message stating that the file must have a CSV extension
"""
self.cohort_management_page.upload_cohort_file("image.jpg")
self._verify_cohort_by_csv_notification("The file must end with the extension '.csv'.")
def test_cohort_by_csv_missing_cohort(self):
"""
Scenario: if the instructor uploads a csv file with no cohort column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing the cohort column
Then I get an error message stating that the file must have a cohort column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_cohort_column.csv")
self._verify_cohort_by_csv_notification("The file must contain a 'cohort' column containing cohort names.")
def test_cohort_by_csv_missing_user(self):
"""
Scenario: if the instructor uploads a csv file with no username or email column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing both the username and email columns
Then I get an error message stating that the file must have either a username or email column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_user_columns.csv")
self._verify_cohort_by_csv_notification(
"The file must contain a 'username' column, an 'email' column, or both."
)
def _verify_cohort_by_csv_notification(self, expected_message):
"""
Helper method to check the CSV file upload notification message.
"""
# Wait for notification message to appear, indicating file has been uploaded.
EmptyPromise(
lambda: 1 == len(self.cohort_management_page.get_csv_messages()), 'Waiting for notification'
).fulfill()
messages = self.cohort_management_page.get_csv_messages()
self.assertEquals(expected_message, messages[0])
@attr('shard_6')
class CohortDiscussionTopicsTest(UniqueCourseTest, CohortTestMixin):
"""
Tests for cohorting the inline and course-wide discussion topics.
"""
def setUp(self):
"""
Set up a discussion topics
"""
super(CohortDiscussionTopicsTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid.uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
# create course with single cohort and two content groups (user_partition of type "cohort")
self.cohort_name = "OnlyCohort"
self.setup_cohort_config(self.course_fixture)
self.cohort_id = self.add_manual_cohort(self.course_fixture, self.cohort_name)
# login as an instructor
self.instructor_name = "instructor_user"
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email="instructor_user@example.com",
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()
self.cohort_management_page.wait_for_page()
self.course_wide_key = 'course-wide'
self.inline_key = 'inline'
def cohort_discussion_topics_are_visible(self):
"""
Assert that discussion topics are visible with appropriate content.
"""
self.cohort_management_page.toggles_showing_of_discussion_topics()
self.assertTrue(self.cohort_management_page.discussion_topics_visible())
self.assertEqual(
"Course-Wide Discussion Topics",
self.cohort_management_page.cohort_discussion_heading_is_visible(self.course_wide_key)
)
self.assertTrue(self.cohort_management_page.is_save_button_disabled(self.course_wide_key))
self.assertEqual(
"Content-Specific Discussion Topics",
self.cohort_management_page.cohort_discussion_heading_is_visible(self.inline_key)
)
self.assertTrue(self.cohort_management_page.is_save_button_disabled(self.inline_key))
def save_and_verify_discussion_topics(self, key):
"""
Saves the discussion topics and the verify the changes.
"""
# click on the inline save button.
self.cohort_management_page.save_discussion_topics(key)
# verifies that changes saved successfully.
confirmation_message = self.cohort_management_page.get_cohort_discussions_message(key=key)
self.assertEqual("Your changes have been saved.", confirmation_message)
# save button disabled again.
self.assertTrue(self.cohort_management_page.is_save_button_disabled(key))
def reload_page(self):
"""
Refresh the page.
"""
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.instructor_dashboard_page.select_cohort_management()
self.cohort_management_page.wait_for_page()
self.cohort_discussion_topics_are_visible()
def verify_discussion_topics_after_reload(self, key, cohorted_topics):
"""
Verifies the changed topics.
"""
self.reload_page()
self.assertEqual(self.cohort_management_page.get_cohorted_topics_count(key), cohorted_topics)
def test_cohort_course_wide_discussion_topic(self):
"""
Scenario: cohort a course-wide discussion topic.
Given I have a course with a cohort defined,
And a course-wide discussion with disabled Save button.
When I click on the course-wide discussion topic
Then I see the enabled save button
When I click on save button
Then I see success message
When I reload the page
Then I see the discussion topic selected
"""
self.cohort_discussion_topics_are_visible()
cohorted_topics_before = self.cohort_management_page.get_cohorted_topics_count(self.course_wide_key)
self.cohort_management_page.select_discussion_topic(self.course_wide_key)
self.assertFalse(self.cohort_management_page.is_save_button_disabled(self.course_wide_key))
self.save_and_verify_discussion_topics(key=self.course_wide_key)
cohorted_topics_after = self.cohort_management_page.get_cohorted_topics_count(self.course_wide_key)
self.assertNotEqual(cohorted_topics_before, cohorted_topics_after)
self.verify_discussion_topics_after_reload(self.course_wide_key, cohorted_topics_after)
def test_always_cohort_inline_topic_enabled(self):
"""
Scenario: Select the always_cohort_inline_topics radio button
Given I have a course with a cohort defined,
And a inline discussion topic with disabled Save button.
When I click on always_cohort_inline_topics
Then I see enabled save button
And I see disabled inline discussion topics
When I reload the page
Then I see the option enabled
"""
self.cohort_discussion_topics_are_visible()
# enable always inline discussion topics.
self.cohort_management_page.select_always_inline_discussion()
self.assertTrue(self.cohort_management_page.inline_discussion_topics_disabled())
self.reload_page()
self.assertIsNotNone(self.cohort_management_page.always_inline_discussion_selected())
def test_cohort_some_inline_topics_enabled(self):
"""
Scenario: Select the cohort_some_inline_topics radio button
Given I have a course with a cohort defined,
And a inline discussion topic with disabled Save button.
When I click on cohort_some_inline_topics
Then I see enabled save button
And I see enabled inline discussion topics
When I reload the page
Then I see the option enabled
"""
self.cohort_discussion_topics_are_visible()
# enable some inline discussion topic radio button.
self.cohort_management_page.select_cohort_some_inline_discussion()
# I see that save button is enabled
self.assertFalse(self.cohort_management_page.is_save_button_disabled(self.inline_key))
# I see that inline discussion topics are enabled
self.assertFalse(self.cohort_management_page.inline_discussion_topics_disabled())
self.reload_page()
self.assertIsNotNone(self.cohort_management_page.cohort_some_inline_discussion_selected())
def test_cohort_inline_discussion_topic(self):
"""
Scenario: cohort inline discussion topic.
Given I have a course with a cohort defined,
And a inline discussion topic with disabled Save button.
When I click on cohort_some_inline_discussion_topics
Then I see enabled saved button
And When I click on inline discussion topic
And I see enabled save button
And When i click save button
Then I see success message
When I reload the page
Then I see the discussion topic selected
"""
self.cohort_discussion_topics_are_visible()
# select some inline discussion topics radio button.
self.cohort_management_page.select_cohort_some_inline_discussion()
cohorted_topics_before = self.cohort_management_page.get_cohorted_topics_count(self.inline_key)
# check the discussion topic.
self.cohort_management_page.select_discussion_topic(self.inline_key)
# Save button enabled.
self.assertFalse(self.cohort_management_page.is_save_button_disabled(self.inline_key))
# verifies that changes saved successfully.
self.save_and_verify_discussion_topics(key=self.inline_key)
cohorted_topics_after = self.cohort_management_page.get_cohorted_topics_count(self.inline_key)
self.assertNotEqual(cohorted_topics_before, cohorted_topics_after)
self.verify_discussion_topics_after_reload(self.inline_key, cohorted_topics_after)
def test_verify_that_selecting_the_final_child_selects_category(self):
"""
Scenario: Category should be selected on selecting final child.
Given I have a course with a cohort defined,
And a inline discussion with disabled Save button.
When I click on child topics
Then I see enabled saved button
Then I see parent category to be checked.
"""
self.cohort_discussion_topics_are_visible()
# enable some inline discussion topics.
self.cohort_management_page.select_cohort_some_inline_discussion()
# category should not be selected.
self.assertFalse(self.cohort_management_page.is_category_selected())
# check the discussion topic.
self.cohort_management_page.select_discussion_topic(self.inline_key)
# verify that category is selected.
self.assertTrue(self.cohort_management_page.is_category_selected())
def test_verify_that_deselecting_the_final_child_deselects_category(self):
"""
Scenario: Category should be deselected on deselecting final child.
Given I have a course with a cohort defined,
And a inline discussion with disabled Save button.
When I click on final child topics
Then I see enabled saved button
Then I see parent category to be deselected.
"""
self.cohort_discussion_topics_are_visible()
# enable some inline discussion topics.
self.cohort_management_page.select_cohort_some_inline_discussion()
# category should not be selected.
self.assertFalse(self.cohort_management_page.is_category_selected())
# check the discussion topic.
self.cohort_management_page.select_discussion_topic(self.inline_key)
# verify that category is selected.
self.assertTrue(self.cohort_management_page.is_category_selected())
# un-check the discussion topic.
self.cohort_management_page.select_discussion_topic(self.inline_key)
# category should not be selected.
self.assertFalse(self.cohort_management_page.is_category_selected())
def test_verify_that_correct_subset_of_category_being_selected_after_save(self):
"""
Scenario: Category should be selected on selecting final child.
Given I have a course with a cohort defined,
And a inline discussion with disabled Save button.
When I click on child topics
Then I see enabled saved button
When I select subset of category
And I click on save button
Then I see success message with
same sub-category being selected
"""
self.cohort_discussion_topics_are_visible()
# enable some inline discussion topics.
self.cohort_management_page.select_cohort_some_inline_discussion()
# category should not be selected.
self.assertFalse(self.cohort_management_page.is_category_selected())
cohorted_topics_after = self.cohort_management_page.get_cohorted_topics_count(self.inline_key)
# verifies that changes saved successfully.
self.save_and_verify_discussion_topics(key=self.inline_key)
# verify changes after reload.
self.verify_discussion_topics_after_reload(self.inline_key, cohorted_topics_after)
@attr('shard_6')
class CohortContentGroupAssociationTest(UniqueCourseTest, CohortTestMixin):
"""
Tests for linking between content groups and cohort in the instructor dashboard.
"""
def setUp(self):
"""
Set up a cohorted course with a user_partition of scheme "cohort".
"""
super(CohortContentGroupAssociationTest, self).setUp()
# create course with single cohort and two content groups (user_partition of type "cohort")
self.cohort_name = "OnlyCohort"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture)
self.cohort_id = self.add_manual_cohort(self.course_fixture, self.cohort_name)
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("0", 'Apples'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
# login as an instructor
self.instructor_name = "instructor_user"
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email="instructor_user@example.com",
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()
def test_no_content_group_linked(self):
"""
Scenario: In a course with content groups, cohorts are initially not linked to a content group
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is no text stating that content groups are undefined
And the content groups are listed in the selector
"""
self.cohort_management_page.select_cohort(self.cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Apples", "Bananas"], self.cohort_management_page.get_all_content_groups())
def test_link_to_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be linked to content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.assertEqual("Bananas", self.cohort_management_page.get_cohort_associated_content_group())
def test_unlink_from_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be unlinked from content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And I reload the page
And I view the cohort in the instructor dashboard and select settings
And I unlink the cohort from any content group and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to any content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.cohort_management_page.set_cohort_associated_content_group(None)
self._verify_settings_saved_and_reload(self.cohort_name)
self.assertEqual(None, self.cohort_management_page.get_cohort_associated_content_group())
def test_create_new_cohort_linked_to_content_group(self):
"""
Scenario: In a course with content groups, a new cohort can be linked to a content group
at time of creation.
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
Then when I select settings I see that the cohort is linked to the content group
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
new_cohort = "correctly linked cohort"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Apples", self.cohort_management_page.get_cohort_associated_content_group())
def test_missing_content_group(self):
"""
Scenario: In a course with content groups, if a cohort is associated with a content group that no longer
exists, a warning message is shown
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
And I delete that content group from the course
And I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the settings display a message that the content group no longer exists
And when I select a different content group and save
Then the error message goes away
"""
new_cohort = "linked to missing content group"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("2", 'Pears'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Deleted Content Group", self.cohort_management_page.get_cohort_associated_content_group())
self.assertEquals(
["Bananas", "Pears", "Deleted Content Group"],
self.cohort_management_page.get_all_content_groups()
)
self.assertEqual(
"Warning:\nThe previously selected content group was deleted. Select another content group.",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.cohort_management_page.set_cohort_associated_content_group("Pears")
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Bananas", "Pears"], self.cohort_management_page.get_all_content_groups())
def _create_new_cohort_linked_to_content_group(self, new_cohort, cohort_group):
"""
Creates a new cohort linked to a content group.
"""
self.cohort_management_page.add_cohort(new_cohort, content_group=cohort_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: new_cohort == self.cohort_management_page.get_selected_cohort(), "Waiting for new cohort to appear"
).fulfill()
self.assertEqual(cohort_group, self.cohort_management_page.get_cohort_associated_content_group())
def _link_cohort_to_content_group(self, cohort_name, content_group):
"""
Links a cohort to a content group. Saves the changes and verifies the cohort updated properly.
Then refreshes the page and selects the cohort.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.cohort_management_page.set_cohort_associated_content_group(content_group)
self._verify_settings_saved_and_reload(cohort_name)
def _verify_settings_saved_and_reload(self, cohort_name):
"""
Verifies the confirmation message indicating that a cohort's settings have been updated.
Then refreshes the page and selects the cohort.
"""
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(cohort_name)
|
simbs/edx-platform
|
common/test/acceptance/tests/discussion/test_cohort_management.py
|
Python
|
agpl-3.0
| 54,312
|
[
"VisIt"
] |
6e3a8b6a2c38955b860cdaa8b0bc4f2f7f9eb5c682d0c3d95e92f205b37d0235
|
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Classes and tools for storing and handling parsed data"""
import numpy
class ccData(object):
"""Stores data extracted by cclib parsers
Description of cclib attributes:
aonames -- atomic orbital names (list of strings)
aooverlaps -- atomic orbital overlap matrix (array[2])
atombasis -- indices of atomic orbitals on each atom (list of lists)
atomcharges -- atomic partial charges (dict of arrays[1])
atomcoords -- atom coordinates (array[3], angstroms)
atommasses -- atom masses (array[1], daltons)
atomnos -- atomic numbers (array[1])
atomspins -- atomic spin densities (dict of arrays[1])
charge -- net charge of the system (integer)
ccenergies -- molecular energies with Coupled-Cluster corrections (array[2], eV)
coreelectrons -- number of core electrons in atom pseudopotentials (array[1])
enthalpy -- sum of electronic and thermal enthalpies (float, hartree/particle)
entropy -- entropy (float, hartree/particle)
etenergies -- energies of electronic transitions (array[1], 1/cm)
etoscs -- oscillator strengths of electronic transitions (array[1])
etrotats -- rotatory strengths of electronic transitions (array[1], ??)
etsecs -- singly-excited configurations for electronic transitions (list of lists)
etsyms -- symmetries of electronic transitions (list of string)
freeenergy -- sum of electronic and thermal free energies (float, hartree/particle)
fonames -- fragment orbital names (list of strings)
fooverlaps -- fragment orbital overlap matrix (array[2])
fragnames -- names of fragments (list of strings)
frags -- indices of atoms in a fragment (list of lists)
gbasis -- coefficients and exponents of Gaussian basis functions (PyQuante format)
geotargets -- targets for convergence of geometry optimization (array[1])
geovalues -- current values for convergence of geometry optmization (array[1])
grads -- current values of forces (gradients) in geometry optimization (array[3])
hessian -- elements of the force constant matrix (array[1])
homos -- molecular orbital indices of HOMO(s) (array[1])
mocoeffs -- molecular orbital coefficients (list of arrays[2])
moenergies -- molecular orbital energies (list of arrays[1], eV)
moments -- molecular multipole moments (list of arrays[], a.u.)
mosyms -- orbital symmetries (list of lists)
mpenergies -- molecular electronic energies with Möller-Plesset corrections (array[2], eV)
mult -- multiplicity of the system (integer)
natom -- number of atoms (integer)
nbasis -- number of basis functions (integer)
nbo_occupancy -- occupancy of natural nod orbitals
nmo -- number of molecular orbitals (integer)
nocoeffs -- natural orbital coefficients (array[2])
optdone -- flags whether an optimization has converged (Boolean)
scancoords -- geometries of each scan step (array[3], angstroms)
scanenergies -- energies of potential energy surface (list)
scannames -- names of varaibles scanned (list of strings)
scanparm -- values of parameters in potential energy surface (list of tuples)
scfenergies -- molecular electronic energies after SCF (Hartree-Fock, DFT) (array[1], eV)
scftargets -- targets for convergence of the SCF (array[2])
scfvalues -- current values for convergence of the SCF (list of arrays[2])
temperature -- tempature used for Thermochemistry (float, kelvin)
vibanharms -- vibrational anharmonicity constants (array[2], 1/cm)
vibdisps -- cartesian displacement vectors (array[3], delta angstrom)
vibfreqs -- vibrational frequencies (array[1], 1/cm)
vibirs -- IR intensities (array[1], km/mol)
vibramans -- Raman intensities (array[1], A^4/Da)
vibsyms -- symmetries of vibrations (list of strings)
(1) The term 'array' refers to a numpy array
(2) The number of dimensions of an array is given in square brackets
(3) Python indexes arrays/lists starting at zero, so if homos==[10], then
the 11th molecular orbital is the HOMO
"""
# CJS added some attributes; basis_descript, nscans, sopt, run_error
# The expected types for all supported attributes.
_attrtypes = {
"aonames": list,
"aooverlaps": numpy.ndarray,
"atombasis": list,
"atomcharges": dict,
"atomcoords": numpy.ndarray,
"atommasses": numpy.ndarray,
"atomnos": numpy.ndarray,
"atomspins": dict,
"basis_descript": str,
"ccenergies": numpy.ndarray,
"charge": int,
"coreelectrons": numpy.ndarray,
"enthalpy": float,
"entropy": float,
"etenergies": numpy.ndarray,
"etoscs": numpy.ndarray,
"etrotats": numpy.ndarray,
"etsecs": list,
"etsyms": list,
"freeenergy": float,
"fonames": list,
"fooverlaps": numpy.ndarray,
"fragnames": list,
"frags": list,
'gbasis': list,
"geotargets": numpy.ndarray,
"geovalues": numpy.ndarray,
"grads": numpy.ndarray,
"hessian": numpy.ndarray,
"homos": numpy.ndarray,
"mocoeffs": list,
"moenergies": list,
"moments": list,
"mosyms": list,
"mpenergies": numpy.ndarray,
"mult": int,
"natom": int,
"nbasis": int,
"nbo_occupancy": list,
"nmo": int,
"nocoeffs": numpy.ndarray,
"nscans": int,
"optdone": bool,
"run_error": bool,
"scancoords": numpy.ndarray,
"scanenergies": list,
"scannames": list,
"scanparm": list,
"scfenergies": numpy.ndarray,
"scftargets": numpy.ndarray,
"scfvalues": list,
"sopt": list,
"temperature": float,
"vibanharms": numpy.ndarray,
"vibdisps": numpy.ndarray,
"vibfreqs": numpy.ndarray,
"vibirs": numpy.ndarray,
"vibramans": numpy.ndarray,
"vibsyms": list,
"zeropt_energy": float,
}
# The name of all attributes can be generated from the dictionary above.
_attrlist = sorted(_attrtypes.keys())
# Arrays are double precision by default, but these will be integer arrays.
_intarrays = ['atomnos', 'coreelectrons', 'homos']
# Attributes that should be lists of arrays (double precision).
_listsofarrays = ['mocoeffs', 'moenergies', 'moments', 'scfvalues']
# Attributes that should be dictionaries of arrays (double precision).
_dictsofarrays = ["atomcharges", "atomspins"]
def __init__(self, attributes={}):
"""Initialize the cclibData object.
Normally called in the parse() method of a Logfile subclass.
Inputs:
attributes - optional dictionary of attributes to load as data
"""
if attributes:
self.setattributes(attributes)
def listify(self):
"""Converts all attributes that are arrays or lists/dicts of arrays to lists."""
attrlist = [k for k in self._attrlist if hasattr(self, k)]
for k in attrlist:
v = self._attrtypes[k]
if v == numpy.ndarray:
setattr(self, k, getattr(self, k).tolist())
elif v == list and k in self._listsofarrays:
setattr(self, k, [x.tolist() for x in getattr(self, k)])
elif v == dict and k in self._dictsofarrays:
items = getattr(self, k).iteritems()
pairs = [(key, val.tolist()) for key, val in items]
setattr(self, k, dict(pairs))
def arrayify(self):
"""Converts appropriate attributes to arrays or lists/dicts of arrays."""
attrlist = [k for k in self._attrlist if hasattr(self, k)]
for k in attrlist:
v = self._attrtypes[k]
precision = 'd'
if k in self._intarrays:
precision = 'i'
if v == numpy.ndarray:
a = getattr(self, k)
setattr(self, k, numpy.array(getattr(self, k), precision))
elif v == list and k in self._listsofarrays:
setattr(self, k, [numpy.array(x, precision) for x in getattr(self, k)])
elif v == dict and k in self._dictsofarrays:
items = getattr(self, k).items()
pairs = [(key, numpy.array(val, precision)) for key, val in items]
setattr(self, k, dict(pairs))
def getattributes(self, tolists=False):
"""Returns a dictionary of existing data attributes.
Inputs:
tolists - flag to convert attributes to lists where applicable
"""
if tolists:
self.listify()
attributes = {}
for attr in self._attrlist:
if hasattr(self, attr):
attributes[attr] = getattr(self, attr)
if tolists:
self.arrayify()
return attributes
def setattributes(self, attributes):
"""Sets data attributes given in a dictionary.
Inputs:
attributes - dictionary of attributes to set
Outputs:
invalid - list of attributes names that were not set, which
means they are not specified in self._attrlist
"""
if type(attributes) is not dict:
raise TypeError("attributes must be in a dictionary")
valid = [a for a in attributes if a in self._attrlist]
invalid = [a for a in attributes if a not in self._attrlist]
for attr in valid:
setattr(self, attr, attributes[attr])
self.arrayify()
self.typecheck()
return invalid
def typecheck(self):
"""Check the types of all attributes.
If an attribute does not match the expected type, then attempt to
convert; if that fails, only then raise a TypeError.
"""
self.arrayify()
for attr in [a for a in self._attrlist if hasattr(self, a)]:
val = getattr(self, attr)
if type(val) == self._attrtypes[attr]:
continue
try:
val = self._attrtypes[attr](val)
except ValueError:
args = (attr, type(val), self._attrtypes[attr])
raise TypeError("attribute %s is %s instead of %s and could not be converted" % args)
class ccData_optdone_bool(ccData):
"""This is the version of ccData where optdone is a Boolean."""
def __init__(self, *args, **kwargs):
super(ccData_optdone_bool, self).__init__(*args, **kwargs)
self._attrtypes['optdone'] = bool
def setattributes(self, *args, **kwargs):
invalid = super(ccData_optdone_bool, self).setattributes(*args, **kwargs)
# Reduce optdone to a Boolean, because it will be parsed as a list. If this list has any element,
# it means that there was an optimized structure and optdone should be True.
if hasattr(self, 'optdone'):
self.optdone = len(self.optdone) > 0
|
chrisjsewell/PyGauss
|
pygauss/cclib_patch/parser/data.py
|
Python
|
gpl-3.0
| 12,144
|
[
"Gaussian",
"cclib"
] |
a94a3495d3885f91602169be994d376dd6ecb3f55ad004efc2743cd5a179b75f
|
#!/usr/bin/python
import subprocess
import re
import argparse
def stripper(fasta):
result = {}
with open(fasta) as f:
for name, seq in fastq_fasta_parser.read_fasta(f):
bases = list(seq)
end1 = bases[-3:]
end1 = ''.join(end1)
if end1 == "CCA":
tmpseq = bases[:-3]
seq = ''.join(tmpseq)
end2 = bases[-4:]
end2 = ''.join(end2)
if end2 == "CCAC":
tmpseq = bases[:-4]
seq = ''.join(tmpseq)
end3 = bases[-5:]
end3 = ''.join(end3)
if end3 == "CCACC":
tmpseq = bases[:-5]
seq = ''.join(tmpseq)
end4 = bases[-6:]
end4 = ''.join(end4)
if end4 == "CCACCA":
tmpseq = bases[:-6]
seq = ''.join(tmpseq)
result[name] = seq
return result
def strip_ends(paired):
if paired == True:
output1 = open("clipped_1.fa", "w")
output2 = open("clipped_2.fa", "w")
data1 = stripper("unclipped_multi_unmapped_1.fa")
data2 = stripper("unclipped_multi_unmapped_2.fa")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])),
for key in sorted(data2.keys()):
output2.write("{}\n{}\n".format(key, data2[key])),
else:
data1 = stripper("unclipped_multi_unmapped.fa")
output1 = open("clipped_fasta.fa", "w")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])),
def paired_bowtie(index, clipped=False):
if clipped==False:
sam1_o = open("unclipped_unique.sam", "wb")
report1_o = open("unclipped_unique_report.txt", "wb")
sam2_o = open("unclipped_multimap.sam", "wb")
report2_o = open("unclipped_multi_report.txt", "wb")
uniq = "bowtie --best -f -m 1 -v 2 --sam --un unclipped_unique_unmapped.fa {0} -1 original_fasta_1.fa -2 original_fasta_2.fa".format(index)
multi= "bowtie --best -k 10 -f -m 500 -v 2 --sam --un unclipped_multi_unmapped.fa {0} -1 unclipped_unique_unmapped_1.fa -2 unclipped_unique_unmapped_2.fa".format(index)
p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o)
p.communicate()
p = subprocess.Popen(multi.split(), stdout = sam2_o, stderr=report2_o)
p.communicate()
else:
sam1_o = open("clipped_unique.sam", "wb")
report1_o = open("clipped_unique_report.txt", "wb")
sam2_o = open("clipped_multimap.sam", "wb")
report2_o = open("clipped_multimap_report.txt", "wb")
uniq = "bowtie --best -f -m 1 -v 2 --sam --un clipped_unmapped.fa {0} -1 clipped_1.fa -2 clipped_2.fa".format(index)
multi= "bowtie --best -k 10 -f -m 500 -v 2 --sam {0} -1 clipped_unmapped_1.fa -2 clipped_unmapped_2.fa".format(index)
p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o)
p.communicate()
p = subprocess.Popen(multi.split(), stdout = sam2_o, stderr=report2_o)
p.communicate()
def single_bowtie(index, clipped=False):
if clipped==False:
sam1_o = open("unclipped_unique.sam", "wb")
report1_o = open("unclipped_unique_report.txt", "wb")
sam2_o = open("unclipped_multimap.sam", "wb")
report2_o = open("unclipped_multi_report.txt", "wb")
uniq = "bowtie --best -f -m 1 -v 2 --sam --un unclipped_unique_unmapped.fa {0} original_fasta.fa".format(index)
multi= "bowtie --best -k 10 -f -m 500 -v 2 --sam --un unclipped_multi_unmapped.fa {0} unclipped_unique_unmapped.fa".format(index)
p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o)
p.communicate()
p = subprocess.Popen(multi.split(), stdout = sam2_o, stderr=report2_o)
p.communicate()
else:
sam1_o = open("clipped_unique.sam", "wb")
report1_o = open("clipped_unique_report.txt", "wb")
sam2_o = open("clipped_multimap.sam", "wb")
report2_o = open("clipped_multimap_report.txt", "wb")
uniq = "bowtie --best -f -m 1 -v 2 --sam --un clipped_unique_unmapped.fa {0} clipped_fasta.fa".format(index)
multi= "bowtie --best -k 10 -f -m 500 -v 2 --sam {0} clipped_unique_unmapped.fa".format(index)
p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o)
p.communicate()
p = subprocess.Popen(multi.split(), stdout = sam2_o, stderr=report2_o)
p.communicate()
def grep_unique(samfile):
out = re.sub(".sam", ".unique.sam", samfile)
out2 = re.sub(".sam", ".multi.sam", samfile)
output= open(out, "w")
output2= open(out2, "w")
with open(samfile) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
if line.startswith("@"):
output.write("{}\n".format(line)),
output2.write("{}\n".format(line)),
continue
if len(word) > 12:
m = re.match("XS:i:", word[12])
if m:
if int(word[1]) == 147 or int(word[1]) == 83 or int(word[1]) == 99 or int(word[1]) == 163 or int(word[1]) == 81 or int(word[1]) == 97 or int(word[1]) == 145 or int(word[1]) == 161:
output2.write("{}\n".format(line)),
else:
if int(word[1]) == 147 or int(word[1]) == 83 or int(word[1]) == 99 or int(word[1]) == 163 or int(word[1]) == 81 or int(word[1]) == 97 or int(word[1]) == 145 or int(word[1]) == 161:
output.write("{}\n".format(line)),
def grep_single_unique(samfile):
out = re.sub(".sam", ".unique.sam", samfile)
out2 = re.sub(".sam", ".multi.sam", samfile)
output= open(out, "w")
output2= open(out2, "w")
with open(samfile) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
if line.startswith("@"):
output.write("{}\n".format(line)),
output2.write("{}\n".format(line)),
continue
if len(word) > 12:
m = re.match("XS:i:", word[12])
if m:
if int(word[1]) == 0 or int(word[1]) == 16:
output2.write("{}\n".format(line)),
else:
if int(word[1]) == 0 or int(word[1]) == 16:
output.write("{}\n".format(line)),
def paired_bowtie2(index, clipped=False):
if clipped==False:
report1_o = open("unclipped_unique_report.txt", "wb")
uniq = "bowtie2 -k 10 -N 1 -f -p 12 --no-mixed --no-discordant --un-conc unmapped_round1.fa -x {0} -1 fasta_1.fa -2 fasta_2.fa -S tmp.sam".format(index)
p = subprocess.Popen(uniq.split(), stderr=report1_o)
p.communicate()
grep_unique("tmp.sam")
subprocess.call(["mv", "tmp.unique.sam", "bowtie2.uc.unique.sam"])
subprocess.call(["mv", "tmp.multi.sam", "bowtie2.uc.multi.sam"])
else:
report1_o = open("clipped_unique_report.txt", "wb")
uniq = "bowtie2 -k 10 -N 1 -f -p 12 --no-mixed --no-discordant --un-conc unmapped_round2.fa -x {0} -1 clipped_1.fa -2 clipped_2.fa -S tmp.sam".format(index)
p = subprocess.Popen(uniq.split(), stderr=report1_o)
p.communicate()
grep_unique("tmp.sam")
subprocess.call(["mv", "tmp.unique.sam", "bowtie2.c.unique.sam"])
subprocess.call(["mv", "tmp.multi.sam", "bowtie2.c.multi.sam"])
def single_bowtie2(index, clipped=False):
if clipped==False:
report1_o = open("unclipped_unique_report.txt", "wb")
uniq = "bowtie2 -k 10 -N 1 -f -p 12 --un unmapped_round1.fa -x {0} -U fasta.fa -S tmp.sam".format(index)
p = subprocess.Popen(uniq.split(), stderr=report1_o)
p.communicate()
grep_single_unique("tmp.sam")
subprocess.call(["mv", "tmp.unique.sam", "bowtie2.uc.unique.sam"])
subprocess.call(["mv", "tmp.multi.sam", "bowtie2.uc.multi.sam"])
else:
report1_o = open("clipped_unique_report.txt", "wb")
uniq = "bowtie2 -k 10 -N 1 -f -p 12 --un unmapped_round2.fa -x {0} -U clipped_fasta.fa -S tmp.sam".format(index)
p = subprocess.Popen(uniq.split(), stderr=report1_o)
p.communicate()
grep_single_unique("tmp.sam")
subprocess.call(["mv", "tmp.unique.sam", "bowtie2.c.unique.sam"])
subprocess.call(["mv", "tmp.multi.sam", "bowtie2.c.multi.sam"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Runs bowtie\n')
parser.add_argument('-p','--paired', help='Options True/False, is sample paired end?', required=False)
parser.add_argument('-i','--index', help='Bowtie index', required=True)
parser.add_argument('-c','--clipped', help='Options True/False, has sample been clipped?', required=False)
args = vars(parser.parse_args())
index = args["index"]
if args["paired"] == True:
if args["clipped"] == True:
paired_bowtie(index, True)
else:
paired_bowtie(index, False)
else:
if args["clipped"] == True:
single_bowtie(index, True)
else:
single_bowtie(index, False)
|
pdl30/pynoncode
|
pynoncode/run_bowtie.py
|
Python
|
gpl-2.0
| 8,057
|
[
"Bowtie"
] |
2d28936a18fafa4d6ef82632c7c19ffc22ecbab33c3c7772c4737861deacb85f
|
"""Tests of the makeStr.py module."""
import unittest as ut
import os
import pytest
class TestMakeStructures(ut.TestCase):
"""Tests of the _make_structures subroutine."""
def _compare_files(self,file1,file2):
out1 = []
out2 = []
with open(file1,"r") as o1:
for line in o1:
out1.append(line.strip().split())
with open(file2,"r") as o2:
for line in o2:
out2.append(line.strip().split())
self.assertEqual(out1,out2)
def test_str1(self):
from makeStr import _make_structures
from os import system
args = {"structures":[1],
"debug":False,
"examples":False,
"displace":0.0,
"input":"test_files/struct_enum.out_quaternary",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"remove_zeros":False,
"config":"f",
"mapping":None
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),
"test_files/quaternary_vasp.1")
system("rm vasp*")
def test_str2(self):
from makeStr import _make_structures
from os import system
args = {"structures":[11],
"debug":False,
"examples":False,
"displace":0.0,
"input":"test_files/struct_enum.out_quaternary",
"mink":True,
"species":["Al","Ni","Ti","Cu"],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"remove_zeros":False,
"config":"f",
"mapping":None
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),
"test_files/quaternary_vasp.2")
system("rm vasp*")
def test_str3(self):
from makeStr import _make_structures
from os import system
args = {"structures":[11],
"debug":False,
"examples":False,
"displace":0.0,
"input":"test_files/struct_enum.out_quaternary",
"mink":True,
"species":["Al","Ni","Ti","Cu"],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"remove_zeros":True,
"config":"f",
"mapping":None
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),
"test_files/quaternary_vasp.3")
system("rm vasp*")
def test_str4(self):
from makeStr import _make_structures
from os import system
args = {"structures":[1,2,3,4,5,6,7,8,9,10,11],
"debug":False,
"examples":False,
"displace":0.0,
"input":"test_files/struct_enum.out_quaternary",
"mink":True,
"species":["Al","Ni","Ti","Cu"],
"verbose":None,
"outfile":"train.cfg",
"rattle":0.0,
"remove_zeros":False,
"config":"t",
"mapping":None
}
_make_structures(args)
self._compare_files("train.cfg","test_files/quaternary.cfg.1")
system("rm train.cfg")
|
msg-byu/enumlib
|
aux_src/test_makeStr.py
|
Python
|
mit
| 3,625
|
[
"VASP"
] |
b799811170ed90cd38aac91a24c3e2d5fa1a838a01621a7316935106c612dcab
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" StateVerifier returns an action that executes state verification (sv).
Stateverifier is kind of like a Guidance, except:
- it only has two methods: getAction and markExecuted
(no setTestModel, addRequirement, etc.)
- if there are no state verifications to execute,
getAction() returns None.
(In that case some other Guidance must decide what action to execute.)
"""
def _is_start_sv(action):
return 'start_sv' in str(action)
def _is_end_sv(action):
return 'end_sv' in str(action)
class StateVerifier:
def __init__(self):
self._currSv = None # the sv we're currently verifying
self._svsChecked = set() # the svs we've already checked in this state
def getAction(self,current_state):
""" Returns an action that executes a state verification.
This only executes an sv once during one visit to a state.
If no new sv can be executed from the given state, returns None
-> some (other) guidance should choose the action.
"""
acts = [t.getAction() for t in current_state.getOutTransitions()]
if self._currSv is not None:
# on a verification loop
if len(acts) != 1:
# TBD: dunno if verification loop could branch out...
# in such case, let some other guidance decide what to do
return None
return acts[0]
else:
# not on a verification loop, see if there are sv's to check
svs = [a for a in acts if _is_start_sv(a) and
a not in self._svsChecked]
return svs[0] if svs else None
def markExecuted(self,transition):
""" Must be called after every transition execution."""
if _is_start_sv(transition.getAction()):
# verification loop started
self._currSv = transition.getAction()
elif _is_end_sv(transition.getAction()):
# verification loop ended
self._svsChecked.add(self._currSv)
self._currSv = None
elif self._currSv is None:
# executed a transition outside a verification loop
self._svsChecked.clear()
|
tema-tut/tema-tg
|
TemaLib/tema/guidance/stateverifier.py
|
Python
|
mit
| 3,362
|
[
"VisIt"
] |
ec3110ee32e2a08b2cc7368496ddb7a69a178ca5f9bac0d264f958f7f021bb49
|
"""
Geckoboard decorators.
"""
import base64
from types import ListType, TupleType
from xml.dom.minidom import Document
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import SortedDict
from django.utils.decorators import available_attrs
from django.utils import simplejson
TEXT_NONE = 0
TEXT_INFO = 2
TEXT_WARN = 1
class WidgetDecorator(object):
"""
Geckoboard widget decorator.
The decorated view must return a data structure suitable for
serialization to XML or JSON for Geckoboard. See the Geckoboard
API docs or the source of extending classes for details.
If the ``GECKOBOARD_API_KEY`` setting is used, the request must
contain the correct API key, or a 403 Forbidden response is
returned.
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.data = kwargs
try:
return obj(args[0])
except IndexError:
return obj
def __call__(self, view_func):
def _wrapped_view(request, *args, **kwargs):
if not _is_api_key_correct(request):
return HttpResponseForbidden("Geckoboard API key incorrect")
view_result = view_func(request, *args, **kwargs)
data = self._convert_view_result(view_result)
try:
self.data.update(data)
except ValueError:
self.data = data
content, content_type = _render(request, self.data)
return HttpResponse(content, content_type=content_type)
wrapper = wraps(view_func, assigned=available_attrs(view_func))
return csrf_exempt(wrapper(_wrapped_view))
def _convert_view_result(self, data):
# Extending classes do view result mangling here.
return data
widget = WidgetDecorator
class NumberWidgetDecorator(WidgetDecorator):
"""
Geckoboard Number widget decorator.
The decorated view must return a tuple `(current, [previous])`, where
`current` is the current value and `previous` is the previous value
of the measured quantity..
"""
def _convert_view_result(self, result):
if not isinstance(result, (tuple, list)):
result = [result]
result = list(result)
for k, v in enumerate(result):
result[k] = v if isinstance(v, dict) else {'value': v}
return {'item': result}
number_widget = NumberWidgetDecorator
class RAGWidgetDecorator(WidgetDecorator):
"""
Geckoboard Red-Amber-Green (RAG) widget decorator.
The decorated view must return a tuple with three tuples `(value,
[text])`. The `value` parameters are the numbers shown in red,
amber and green (in that order). The `text` parameters are optional
and will be displayed next to the respective values in the
dashboard.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
if elem[0] is None:
item['value'] = ''
else:
item['value'] = elem[0]
if len(elem) > 1:
item['text'] = elem[1]
items.append(item)
return {'item': items}
rag_widget = RAGWidgetDecorator
class TextWidgetDecorator(WidgetDecorator):
"""
Geckoboard Text widget decorator.
The decorated view must return a list of tuples `(message, [type])`.
The `message` parameters are strings that will be shown in the
widget. The `type` parameters are optional and tell Geckoboard how
to annotate the messages. Use ``TEXT_INFO`` for informational
messages, ``TEXT_WARN`` for for warnings and ``TEXT_NONE`` for plain
text (the default).
"""
def _convert_view_result(self, result):
items = []
if not isinstance(result, (tuple, list)):
result = [result]
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
item['text'] = elem[0]
if len(elem) > 1 and elem[1] is not None:
item['type'] = elem[1]
else:
item['type'] = TEXT_NONE
items.append(item)
return {'item': items}
text_widget = TextWidgetDecorator
class PieChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Pie chart decorator.
The decorated view must return a list of tuples `(value, label,
color)`. The color parameter is a string 'RRGGBB[TT]' representing
red, green, blue and optionally transparency.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = SortedDict()
item['value'] = elem[0]
if len(elem) > 1:
item['label'] = elem[1]
if len(elem) > 2:
item['colour'] = elem[2]
items.append(item)
return {'item': items}
pie_chart = PieChartWidgetDecorator
class LineChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Line chart decorator.
The decorated view must return a tuple `(values, x_axis, y_axis,
[color])`. The `values` parameter is a list of data points. The
`x-axis` parameter is a label string or a list of strings, that will
be placed on the X-axis. The `y-axis` parameter works similarly for
the Y-axis. If there are more than one axis label, they are placed
evenly along the axis. The optional `color` parameter is a string
``'RRGGBB[TT]'`` representing red, green, blue and optionally
transparency.
"""
def _convert_view_result(self, result):
data = SortedDict()
data['item'] = list(result[0])
data['settings'] = SortedDict()
if len(result) > 1:
x_axis = result[1]
if x_axis is None:
x_axis = ''
if not isinstance(x_axis, (tuple, list)):
x_axis = [x_axis]
data['settings']['axisx'] = x_axis
if len(result) > 2:
y_axis = result[2]
if y_axis is None:
y_axis = ''
if not isinstance(y_axis, (tuple, list)):
y_axis = [y_axis]
data['settings']['axisy'] = y_axis
if len(result) > 3:
data['settings']['colour'] = result[3]
return data
line_chart = LineChartWidgetDecorator
class GeckOMeterWidgetDecorator(WidgetDecorator):
"""
Geckoboard Geck-O-Meter decorator.
The decorated view must return a tuple `(value, min, max)`. The
`value` parameter represents the current value. The `min` and `max`
parameters represent the minimum and maximum value respectively.
They are either a value, or a tuple `(value, text)`. If used, the
`text` parameter will be displayed next to the minimum or maximum
value.
"""
def _convert_view_result(self, result):
value, min, max = result
data = SortedDict()
data['item'] = value
data['max'] = SortedDict()
data['min'] = SortedDict()
if not isinstance(max, (tuple, list)):
max = [max]
data['max']['value'] = max[0]
if len(max) > 1:
data['max']['text'] = max[1]
if not isinstance(min, (tuple, list)):
min = [min]
data['min']['value'] = min[0]
if len(min) > 1:
data['min']['text'] = min[1]
return data
geck_o_meter = GeckOMeterWidgetDecorator
class FunnelWidgetDecorator(WidgetDecorator):
"""
Geckoboard Funnel decorator.
The decorated view must return a dictionary with at least an `items`
entry: `{'items': [(100, '100 %'), (50, '50 %')]}`.
Optional keys are:
type: 'standard' (default) or 'reverse'. Determines the
order of the colours.
percentage: 'show' (default) or 'hide'. Determines whether or
not the percentage value is shown.
sort: `False` (default) or `True`. Sort the entries by
value or not.
"""
def _convert_view_result(self, result):
data = SortedDict()
items = result.get('items', [])
# sort the items in order if so desired
if result.get('sort'):
items.sort(reverse=True)
data["item"] = [dict(zip(("value","label"), item)) for item in items]
data["type"] = result.get('type', 'standard')
data["percentage"] = result.get('percentage','show')
return data
funnel = FunnelWidgetDecorator
class BulletWidgetDecorator(WidgetDecorator):
"""
See http://support.geckoboard.com/entries/274940-custom-chart-widget-type-definitions
for more information.
The decorated method must return a dictionary containing these keys:
Required keys:
label: Main label, eg. "Revenue 2011 YTD".
axis_points: Points on the axis, eg. [0, 200, 400, 600, 800, 1000].
current: Current value range, eg. 500 or [100, 500]. A singleton
500 is internally converted to [0, 500].
comparative: Comparative value, eg. 600.
Optional keys:
orientation: One of 'horizontal' or 'vertical'. Defaults to horizontal.
sublabel: Appears below main label.
red: Red start and end, eg. [0,100]. Defaults are calculated
from axis_points.
amber: Amber start and end, eg. [0,100]. Defaults are calculated
from axis_points.
green: Green start and end, eg. [0,100]. Defaults are calculated
from axis_points.
projected: Projected value range, eg. 900 or [100, 900]. A singleton
900 is internally converted to [0, 900].
auto_scale: If true then values will be scaled down if they
do not fit into Geckoboard's UI, eg. a value of 1100
is represented as 1.1. If scaling takes place the sublabel
is suffixed with that information. Default is true.
"""
def _convert_view_result(self, result):
# Check required keys. We do not do type checking since this level of
# competence is assumed.
for key in ('label', 'axis_points', 'current', 'comparative'):
if not result.has_key(key):
raise RuntimeError, "Key %s is required" % key
# Handle singleton current and projected
current = result['current']
projected = result.get('projected', None)
if not isinstance(current, (ListType, TupleType)):
current = [0, current]
if (projected is not None) and not isinstance(projected, (ListType,
TupleType)):
projected = [0, projected]
# If red, amber and green are not *all* supplied calculate defaults
axis_points = result['axis_points']
red = result.get('red', None)
amber = result.get('amber', None)
green = result.get('green', None)
if (red is None) or (amber is None) or (green is None):
if axis_points:
max_point = max(axis_points)
min_point = min(axis_points)
third = (max_point - min_point) / 3
red = (min_point, min_point + third - 1)
amber = (min_point + third, max_point - third - 1)
green = (max_point - third, max_point)
else:
red = amber = green = (0, 0)
# Scan axis points for largest value and scale to avoid overflow in
# Geckoboard's UI.
auto_scale = result.get('auto_scale', True)
if auto_scale and axis_points:
scale_label_map = {1000000000: 'billions', 1000000: 'millions',
1000: 'thousands'}
scale = 1
value = max(axis_points)
for n in (1000000000, 1000000, 1000):
if value >= n:
scale = n
break
# Little fixedpoint helper.
# todo: use a fixedpoint library
def scaler(value, scale):
return float('%.2f' % (value*1.0 / scale))
# Apply scale to all values
if scale > 1:
axis_points = [scaler(v, scale) for v in axis_points]
current = (scaler(current[0], scale), scaler(current[1], scale))
if projected is not None:
projected = (scaler(projected[0], scale),
scaler(projected[1], scale))
red = (scaler(red[0], scale), scaler(red[1], scale))
amber = (scaler(amber[0], scale), scaler(amber[1], scale))
green = (scaler(green[0], scale), scaler(green[1], scale))
result['comparative'] = scaler(result['comparative'], scale)
# Suffix sublabel
sublabel = result.get('sublabel', '')
if sublabel:
result['sublabel'] = '%s (%s)' % \
(sublabel, scale_label_map[scale])
else:
result['sublabel'] = scale_label_map[scale].capitalize()
# Assemble structure
data = dict(
orientation=result.get('orientation', 'horizontal'),
item=dict(
label=result['label'],
axis=dict(point=axis_points),
range=dict(
red=dict(start=red[0], end=red[1]),
amber=dict(start=amber[0], end=amber[1]),
green=dict(start=green[0], end=green[1])
),
measure=dict(current=dict(start=current[0], end=current[1])),
comparative=dict(point=result['comparative'])
)
)
# Add optional items
if result.has_key('sublabel'):
data['item']['sublabel'] = result['sublabel']
if projected is not None:
data['item']['measure']['projected'] = dict(start=projected[0],
end=projected[1])
return data
bullet = BulletWidgetDecorator
def _is_api_key_correct(request):
"""Return whether the Geckoboard API key on the request is correct."""
api_key = getattr(settings, 'GECKOBOARD_API_KEY', None)
if api_key is None:
return True
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
request_key = base64.b64decode(auth[1]).split(':')[0]
return request_key == api_key
return False
def _render(request, data):
"""Render the data to Geckoboard based on the format request parameter."""
format = request.POST.get('format', '')
if not format:
format = request.GET.get('format', '')
if format == '2':
return _render_json(data)
else:
return _render_xml(data)
def _render_json(data):
return simplejson.dumps(data), 'application/json'
def _render_xml(data):
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
_build_xml(doc, root, data)
return doc.toxml(), 'application/xml'
def _build_xml(doc, parent, data):
if isinstance(data, (tuple, list)):
_build_list_xml(doc, parent, data)
elif isinstance(data, dict):
_build_dict_xml(doc, parent, data)
else:
_build_str_xml(doc, parent, data)
def _build_str_xml(doc, parent, data):
parent.appendChild(doc.createTextNode(unicode(data)))
def _build_list_xml(doc, parent, data):
for item in data:
_build_xml(doc, parent, item)
def _build_dict_xml(doc, parent, data):
for tag, item in data.items():
if isinstance(item, (list, tuple)):
for subitem in item:
elem = doc.createElement(tag)
_build_xml(doc, elem, subitem)
parent.appendChild(elem)
else:
elem = doc.createElement(tag)
_build_xml(doc, elem, item)
parent.appendChild(elem)
class GeckoboardException(Exception):
"""
Represents an error with the Geckoboard decorators.
"""
|
geckoboard/django-geckoboard
|
django_geckoboard/decorators.py
|
Python
|
mit
| 16,565
|
[
"Amber"
] |
e0c8bb9fd3f41e62f9de14e4f09481a1cf6de5131f17dfa27f7490338f8dddb7
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
#
#
# Authors: Lester Carballo Pérez <lestcape@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
######################################################
#installNewVersion redefine this to the new installer.
#######################################################
from __future__ import print_function
import sys, os, shutil, tarfile, argparse, stat
from threading import Thread
from gi.repository import Gtk, Gdk, GObject, GLib, Pango
import urllib.request
'''Important Constants'''
PROGRAM_NAME = "cinnamon-installer"
SELF_NAME = "Updater"
VERSION_FILE = "ver"
EXTENSION = ".tar.gz"
VERSION_URL = "https://raw.githubusercontent.com/lestcape/Cinnamon-Installer/master/ver"
WEB_SITE_URL = "https://github.com/lestcape/Cinnamon-Installer"
LAST_VERSION_URL = "https://github.com/lestcape/Cinnamon-Installer/archive/"
TEMP = "/tmp/"
ABS_PATH = os.path.abspath(__file__)
DIR_PATH = os.path.dirname(os.path.dirname(ABS_PATH)) + "/"
INSTALL_DIR = os.path.expanduser("~") + "/.local/share/" + PROGRAM_NAME + "/"
class MainApp():
"""Graphical updater for update Cinnamon Installer directly from github"""
def __init__(self, currentVersion, fileD):
self.currentVersion = currentVersion
print("Current version: " + self.currentVersion)
self._fileD = fileD
self.interface = Gtk.Builder()
self.interface.set_translation_domain(PROGRAM_NAME)
self.interface.add_from_file(DIR_PATH + "gui/mainUpdater.ui")
self._mainWindow = self.interface.get_object("mainWindow")
self._appNameLabel = self.interface.get_object("appNameLabel")
self._statusLabel = self.interface.get_object("statusLabel")
self.progressBar = self.interface.get_object("progressBar")
self._mainWindow.connect("destroy", self.closeWindows)
self.loop = GObject.MainLoop()
self.newVersion = 0.0
def show(self):
self._mainWindow.show_all()
self._mainWindow.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.WATCH))
self.refresh()
def tryUpdaterGUI(self):
# question dialog
self._statusLabel.set_text("Starting")
if float(self.currentVersion) == 0.0:
question_title = "Do you like to install <i>Cinnamon Installer</i>?"
question_description = "This is a requiere tools for install package on " + \
"<i>Configurable Menu</i>. \n" + \
"Note that your linux distribution might not be supported.\n" + \
"If you wish to contribute, please visit: " + \
"<a href='" + WEB_SITE_URL + "'>" + \
"Cinnamon Installer</a>."
response = self._question_dialog(question_title, question_description)
if response == Gtk.ResponseType.YES:
self.forceUpdaterGUI()
else:
result = []
self.show()
print("running GUI")
thread = Thread(target = self._checkNewVersionGUI, args=(result,))
thread.start()
self.loop.run()
self._handdledErrors(result)
if self._isUpdateNeeded():
self._statusLabel.set_text("Update needed")
question_title = "You have the oldest version <i>Cinnamon Installer "+ self.currentVersion +"</i>.\n" + \
"Do you like to update to the lasted version <i>Cinnamon Installer "+ self.newVersion +"</i>?"
question_description = "This is a requiere tools for install package on " + \
"<i>Configurable Menu</i>. \n" + \
"Note that your linux distribution might not be supported.\n" + \
"If you wish to contribute, please visit: " + \
"<a href='" + WEB_SITE_URL + "'>" + \
"Cinnamon Installer</a>."
response = self._question_dialog(question_title, question_description)
if response == Gtk.ResponseType.YES:
self.forceUpdaterGUI()
else:
self._statusLabel.set_text("Not necessary update")
question_title = "You have the latest version of <i>Cinnamon Installer "+ self.currentVersion +"</i>...\n" + \
"Do you want a reinstallation anyway?"
question_description = "Note that your linux distribution might not be supported.\n" + \
"If you wish to contribute, please visit: " + \
"<a href='" + WEB_SITE_URL + "'>" + \
"Cinnamon Installer</a>."
response = self._question_dialog(question_title, question_description)
if response == Gtk.ResponseType.YES:
self.forceUpdaterGUI()
print("Stop")
def forceUpdaterGUI(self):
self._statusLabel.set_text("Starting")
result = []
self.show()
self._fileD.readFile(VERSION_URL, self.chunk_report, VERSION_FILE)
self._isUpdateNeeded()
thread = Thread(target = self._performUpdaterGUI, args=(result,))
thread.start()
self.loop.run()
self._handdledErrors(result)
def uninstallGUI(self):
self._statusLabel.set_text("Starting")
self.progressBar.set_fraction(10)
result = []
self.show()
thread = Thread(target = self._performUninstall, args=(result,))
thread.start()
self.loop.run()
def _checkNewVersionGUI(self, outList):
self._statusLabel.set_text("Checking for a new version")
try:
self._fileD.readFile(VERSION_URL, self.chunk_report, VERSION_FILE)
outList.append(0)
except urllib.error.URLError:
outList.append(1)
except IOError:
outList.append(2)
print("Fail to download")
self.closeWindows(None)
def _performUpdaterGUI(self, outList):
try:
self.progressBar.set_fraction(0)
urlD = LAST_VERSION_URL + self.newVersion + EXTENSION
self._fileD.readFile(urlD, self.chunk_report, "out" + EXTENSION)
self.installNewVersion()
self.setPermissionToExecute()
outList.append(0)
except urllib.error.URLError:
outList.append(1)
except IOError:
outList.append(2)
self.closeWindows(None)
def _performUninstall(self, outList):
self.progressBar.set_fraction(50)
if os.path.exists(INSTALL_DIR):
shutil.rmtree(INSTALL_DIR, onerror=self._del_rw)
outList.append(0)
self.progressBar.set_fraction(80)
self.closeWindows(None)
def installNewVersion(self):
self.progressBar.set_fraction(0)
self._statusLabel.set_text("Installing new version")
if os.path.exists(INSTALL_DIR):
shutil.rmtree(INSTALL_DIR, onerror=self._del_rw)
if os.path.exists(INSTALL_DIR):
return False
print("Old version removed")
if os.path.exists(TEMP + PROGRAM_NAME + "-" + self.newVersion):
shutil.rmtree(TEMP + PROGRAM_NAME + "-" + self.newVersion, onerror=self._del_rw)
self.progressBar.set_fraction(0.2)
tar = tarfile.open(TEMP + self._fileD.fileName)
tar.extractall(TEMP)
tar.close()
self.progressBar.set_fraction(0.3)
print("Uncompress " + TEMP + PROGRAM_NAME + "-" + self.newVersion)
root_src_dir = TEMP + PROGRAM_NAME + "-" + self.newVersion
root_target_dir = INSTALL_DIR
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_target_dir)
print("Create: " + dst_dir)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(src_file, dst_dir)
self.progressBar.set_fraction(0.6)
if os.path.exists(TEMP + self._fileD.fileName):
os.remove(TEMP + self._fileD.fileName)
if os.path.exists(TEMP + PROGRAM_NAME + "-" + self.newVersion):
shutil.rmtree(TEMP + PROGRAM_NAME + "-" + self.newVersion, onerror=self._del_rw)
self.progressBar.set_fraction(0.8)
print("New version installed")
return True
def setPermissionToExecute(self):
self.progressBar.set_fraction(1)
self._statusLabel.set_text("Finalized and validated")
stO = os.stat(INSTALL_DIR + PROGRAM_NAME + ".py")
stS = os.stat(INSTALL_DIR + SELF_NAME + ".py")
os.chmod(INSTALL_DIR + PROGRAM_NAME + ".py", stO.st_mode | stat.S_IEXEC)
os.chmod(INSTALL_DIR + SELF_NAME + ".py", stS.st_mode | stat.S_IEXEC)
print("Set permission to execute : " + INSTALL_DIR + PROGRAM_NAME)
print("Set permission to execute : " + INSTALL_DIR + SELF_NAME)
def _del_rw(self):
print("Error on dir removed")
def _handdledErrors(self, result):
if result[0] == 0:
print("All finished with good result")
if result[0] == 1:
self._statusLabel.set_text("Found errors")
title = "Can not be perform the installation."
message = "Appear that you don't have internet connection...\n" + \
"Please try later."
self._custom_dialog(Gtk.MessageType.ERROR, title, message)
elif result[0] == 2:
self._statusLabel.set_text("Found errors")
title = "Can not be perform the installation."
message = "Appear that you don't have permission to write files on " + TEMP + "\n"\
"Please try later."
self._custom_dialog(Gtk.MessageType.ERROR, title, message)
def _on_clicked_cancelButton(self, button, transaction):
self._fileD.cancel()
self.loop.quit()
def closeWindows(self, windows):
self.loop.quit()
def refresh(self, force_update = False):
while Gtk.events_pending():
Gtk.main_iteration()
while Gtk.events_pending():
Gtk.main_iteration()
#Refresh(force_update)
def chunk_report(self, bytes_so_far, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
if total_size != -1:
if percent > 100:
self.progressBar.pulse()
else:
self.progressBar.set_fraction(percent / 100.0)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
else:
self.progressBar.pulse()
sys.stdout.write("Downloaded %d of %s bytes (%s)%%\r" %
(bytes_so_far, "Unknow", "Unknow"))
if bytes_so_far >= total_size:
sys.stdout.write("\n")
def _isUpdateNeeded(self):
self.newVersion = self.readVersionFromFile(TEMP + VERSION_FILE)
self._fileD.deleteFile(TEMP + VERSION_FILE)
print("New version: " + self.newVersion)
if float(self.newVersion) > float(self.currentVersion):
return True
return False
def _custom_dialog(self, dialog_type, title, message):
'''
This is a generic Gtk Message Dialog function.
dialog_type = this is a Gtk type.
'''
dialog = Gtk.MessageDialog(self._mainWindow, 0, dialog_type,
Gtk.ButtonsType.OK, "")
dialog.set_markup("<b>%s</b>" % title)
dialog.format_secondary_markup(message)
dialog.run()
dialog.destroy()
def _question_dialog(self, title, message):
'''
This is a generic Gtk Message Dialog function
for questions.
'''
dialog = Gtk.MessageDialog(self._mainWindow, 0, Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO, "")
dialog.set_markup("<b>%s</b>" % title)
dialog.format_secondary_markup(message)
response = dialog.run()
dialog.destroy()
return response
def readVersionFromFile(self, path):
try:
if os.path.isfile(path):
infile = open(path, "r")
result = infile.readline().rstrip("\r\n")
float(result) #Test info
return result
except Exception:
pass
return "0.0"
class Download():
def __init__(self):
self.executeAction = True
self.fileName = ""
self.total_size = -1
self.total_size = -1
def readFile(self, url, report=None, defaultFileName=""):
try:
self.fileName = defaultFileName
response = urllib.request.urlopen(url);
#if report is None:
# report = self.chunk_report
self.initializeRequest(response)
self.chunk_read(response, report_hook=report) is not None
except Exception:
e = sys.exc_info()[1]
print(str(e))
def chunk_report(self, bytes_so_far, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
if total_size != -1:
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
else:
sys.stdout.write("Downloaded %d of %s bytes (%s)%%\r" %
(bytes_so_far, "Unknow", "Unknow"))
if bytes_so_far >= total_size:
sys.stdout.write("\n")
def cancel():
self.cancelAction = False
def initializeRequest(self, response):
self.total_size = -1
if "Content-Length" in response.info():
self.total_size = response.info()["Content-Length"].strip()
self.total_size = int(self.total_size)
self.bytes_so_far = 0
try:
if "Content-Disposition" in response.info():
realFileName = response.info()["Content-Disposition"].split("filename=")[1]
if realFileName[0] == '"' or realFileName[0] == "'":
realFileName = realFileName[1:-1]
if realFileName is not None:
self.fileName = realFileName
except Exception:
pass
self.deleteFile(TEMP + self.fileName)
def chunk_read(self, response, chunk_size=8192, report_hook=None):
while self.executeAction:
chunk = response.read(chunk_size)
if not chunk:
break
self.bytes_so_far += len(chunk)
self.saveData(chunk)
if report_hook:
report_hook(self.bytes_so_far, self.total_size)
self.executeAction = True
return self.bytes_so_far
def saveData(self, data):
if (len(data) > 0):
f = open(TEMP + self.fileName, "ab")
f.write(data)
f.close()
def deleteFile(self, path):
try:
if os.path.isfile(path):
os.remove(path)
#print("Clean file:" + path)
except Exception:
pass
class Updater:
def __init__(self):
self.currentVersion = self.readVersionFromFile(INSTALL_DIR + VERSION_FILE)
self.newVersion = 0;
self._fileD = Download()
#print("Current version: " + self.currentVersion)
def checkNewVersionGUI(self):
self.mainW = MainApp(self.currentVersion, self._fileD)
self.mainW.tryUpdaterGUI();
self.executeTest()
def checkNewVersionSilent(self):
try:
self._fileD.readFile(VERSION_URL, None, VERSION_FILE)
if self.isUpdateNeeded():
print("update")
print("New version:" + str(self.newVersion))
else:
print("ready")
except Exception:
print("internet")
pass
def forceUpdaterGUI(self):
self.mainW = MainApp(self.currentVersion, self._fileD)
self.mainW.forceUpdaterGUI();
self.executeTest()
def uninstallGUI(self):
self.mainW = MainApp(self.currentVersion, self._fileD)
self.mainW.uninstallGUI();
def uninstallSilent(self):
if os.path.exists(INSTALL_DIR):
shutil.rmtree(INSTALL_DIR, onerror=self._del_rw)
def executeTest(self):
print("Run test package")
os.system("python3 " + INSTALL_DIR + PROGRAM_NAME + ".py --qtest package")
def isUpdateNeeded(self):
self.newVersion = self.readVersionFromFile(TEMP + VERSION_FILE)
self._fileD.deleteFile(TEMP + VERSION_FILE)
if float(self.newVersion) > float(self.currentVersion):
return True
return False
def readVersionFromFile(self, path):
try:
if os.path.isfile(path):
infile = open(path, "r")
result = infile.readline().rstrip("\r\n")
float(result) #Test info
return result
except Exception:
pass
return "0.0"
def _del_rw(self):
print("Error on dir removed")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process the updater options.")
group_action = parser.add_mutually_exclusive_group(required=True)
group_action.add_argument("--qupdate", nargs="?", action="store", type=str, help="Query for update Cinnamon Installer[silent/gui/forced/test]")
group_action.add_argument("--uninstall", nargs="?", action="store", type=str, help="Uninstall Cinnamon Installer[silent/gui]")
args = parser.parse_args()
updater = Updater()
if args.qupdate:
if args.qupdate == "silent":
updater.checkNewVersionSilent()
elif args.qupdate == "gui":
updater.checkNewVersionGUI()
elif args.qupdate == "forced":
updater.forceUpdaterGUI()
elif args.qupdate == "test":
updater.executeTest()
elif args.uninstall:
if args.uninstall == "gui":
updater.uninstallGUI()
elif args.uninstall == "silent":
updater.uninstallSilent()
|
lestcape/Cinnamon-Installer
|
tools/updater.py
|
Python
|
gpl-3.0
| 19,160
|
[
"VisIt"
] |
ace784c714295793dbb60d3a6f91b6a01fcb5278d87a53045a8e4fd640bc857f
|
"""Gradient descent
"""
import numpy as np
from frankenstein.tools.perf_utils import TIMER
from pyscf.lib import logger
""" Helper functions
"""
def get_gHp_fd(get_grad, p, order=1, eps=1.E-4):
""" Compute gradient-Hessian product using finite difference
Inps:
get_grad (callable):
grad(p) --> gradient given a direction p
p (np.ndarray):
initial gradient
order (int, default=1):
order 1 --> foward FD (err ~ O(eps))
order 2 --> central FD (err ~ O(eps^2))
eps (float, default=1.E-4):
strength of perturbation
"""
p_f = get_grad(eps*p)
if order == 1:
return 2. * (p_f-p) / eps
elif order == 2:
p_b = get_grad(-eps*p)
return (p_f-p_b) / eps
else:
raise ValueError("Invalid order (must be 1 or 2)!")
# Newton-raphson (for debug)
class NR:
def __init__(self, mf, eps=1.E-3, fd=2):
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
self.eps = eps
self.fd = fd
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
f = mf.get_value_gdm()
g = mf.get_grad_gdm()
# build fd hessian
def dphi(i, eps):
mf.back_to_origin()
mf.ov = np.zeros([mf.ov_size])
mf.ov[i] = eps
mf.update_all()
mf.ov[i] = 0.
return mf.get_grad_gdm()
self.timer.start(0)
mf.save_new_origin()
H = np.zeros([mf.ov_size]*2)
for i in range(mf.ov_size):
if self.fd == 1:
H[i] = (dphi(i,self.eps) - g) / self.eps
elif self.fd == 2:
H[i] = (dphi(i,self.eps) - dphi(i,-self.eps)) / (2.*self.eps)
else:
raise ValueError("fd must be 1 or 2.")
mf.back_to_origin()
self.timer.stop(0)
# get raw NR step
self.timer.start(1)
lbd = 1.E-5
du = -np.linalg.solve(H+lbd*np.eye(H.shape[1]), g)
self.timer.stop(1)
# line search
fc = [0]
def phi(alp):
fc[0] += 1
mf.back_to_origin()
mf.ov = alp * mf.regularize_step_gdm(du)
mf.update_all(skip_grad=True)
return mf.get_value_gdm()
self.timer.start(2)
mf.save_new_origin()
fold = f
dphi0 = g @ du
alp, fnew = scopt_linsrc.scalar_search_armijo(
phi, fold, dphi0, c1=1.E-4, alpha0=1.)
self.timer.stop(2)
fc = fc[0]
if alp is None:
raise RuntimeError("Line search failed.")
if fc == 1:
self.comment = "NR"
else:
self.comment = "LnSr (%d,%.2f)"%(fc,alp)
self.timer.start(3)
mf.update_gdm()
self.timer.stop(3)
self.iteration += 1
def report_timing(self):
self.timer.report(tnames=["hess", "linsolve", "linsrch", "grad"])
# Direct minimization (for debug)
class DM:
def __init__(self, mf, bounds=[-1,0], method="bf", plot=False):
if method == "bf":
self.alps = np.arange(*bounds, 0.05)
elif method == "interpolate":
self.amin = min(bounds)
self.amax = max(bounds)
self.ninter = 5
self.neval = 100
else:
raise ValueError("Unknown method '%s'." % method)
self.method = method
self.plot = plot
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(2, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
from scipy import interpolate as itplt
from matplotlib import pyplot as plt
g = mf.get_grad_gdm()
def phi(alp):
mf.back_to_origin()
mf.ov = alp * g
mf.update_all(skip_grad=True)
mf.ov = np.zeros(mf.ov_size)
return mf.get_value_gdm()
mf.save_new_origin()
E0 = mf.get_value_gdm()
self.timer.start(0)
if self.method == "bf":
alps = self.alps
Es = np.asarray([phi(alp) for alp in alps]) - E0
elif self.method == "interpolate":
amin = self.amin
amax = self.amax
err_g = np.mean(g**2)**0.5
if err_g > 1.E-3:
xs = np.linspace(amin, amax, self.ninter)
ys = np.asarray([phi(x) for x in xs])
xyrep = itplt.splrep(xs, ys)
fp = lambda x: itplt.splev(x, xyrep)
else:
xs = np.linspace(amin, amax, 3)
ys = np.asarray([phi(x) for x in xs])
p = np.polyfit(xs, ys, 2)
fp = np.poly1d(p)
alps = np.linspace(amin, amax, self.neval)
Es = fp(alps)
idmin = np.argmin(Es)
alp = alps[idmin]
E = Es[idmin]
self.timer.stop(0)
if self.plot:
plt.plot(alps, Es, "-")
if self.method == "interpolate": plt.plot(xs, ys, "o")
plt.plot(alp, E, "rx")
plt.show()
self.comment = "alp = % .2f" % alp
self.timer.start(1)
mf.back_to_origin()
mf.ov = alp * g
mf.update_all()
self.timer.stop(1)
self.iteration += 1
def report_timing(self):
self.timer.report(["lnsrch", "update me"])
# Direct inversion of iterative subspace (DIIS)
from pyscf.lib.diis import DIIS as pyDIIS
class DIIS:
def __init__(self, mf, ndiis=50, diis_start=1):
self.adiis = pyDIIS()
self.adiis.space = ndiis
self.adiis.min_space = diis_start
self.iteration = 0
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
def next_step(self, mf):
self.iteration += 1
self.timer.start(0)
f = mf.get_fock_diis()
ferr = mf.get_err_diis()
self.timer.stop(0)
self.timer.start(1)
f = self.adiis.update(f, ferr)
self.timer.stop(1)
self.timer.start(2)
if hasattr(mf, "mom_start"):
mom = self.iteration >= mf.mom_start
else:
mom = False
comment = mf.update_diis(f, mom=mom)
self.timer.stop(2)
self.timer.start(3)
mf.update_all()
self.timer.stop(3)
self.comment = "DIIS" if self.iteration > 0 else "Roothaan"
self.comment += " %s" % comment
def report_timing(self):
self.timer.report(tnames=["diis prep", "diis extrap", "roothaan",
"fock build"])
|
hongzhouye/frankenstein
|
sgscf/sgopt.py
|
Python
|
bsd-3-clause
| 6,925
|
[
"PySCF"
] |
51732579a092d5c6cceca4062fd91336486d282e089eb3fac80b6db2a2592d15
|
"""Known matrices related to physics"""
from __future__ import print_function, division
from sympy import Matrix, I, pi, sqrt
from sympy.functions import exp
from sympy.core.compatibility import xrange
def msigma(i):
r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`
References
==========
.. [1] http://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
Matrix([
[0, 1],
[1, 0]])
"""
if i == 1:
mat = ( (
(0, 1),
(1, 0)
) )
elif i == 2:
mat = ( (
(0, -I),
(I, 0)
) )
elif i == 3:
mat = ( (
(1, 0),
(0, -1)
) )
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of `(dx, dy, dz)` for a body of mass m.
Examples
========
To translate a body having a mass of 2 units a distance of 1 unit along
the `x`-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2, 1, 0, 0)
Matrix([
[0, 0, 0],
[0, 2, 0],
[0, 0, 2]])
"""
dxdy = -dx*dy
dydz = -dy*dz
dzdx = -dz*dx
dxdx = dx**2
dydy = dy**2
dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu, lower=False):
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
(Dirac) representation.
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
We use a convention:
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0]])
"""
if not mu in [0, 1, 2, 3, 5]:
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
)
elif mu == 1:
mat = (
(0, 0, 0, 1),
(0, 0, 1, 0),
(0, -1, 0, 0),
(-1, 0, 0, 0)
)
elif mu == 2:
mat = (
(0, 0, 0, -I),
(0, 0, I, 0),
(0, I, 0, 0),
(-I, 0, 0, 0)
)
elif mu == 3:
mat = (
(0, 0, 1, 0),
(0, 0, 0, -1),
(-1, 0, 0, 0),
(0, 1, 0, 0)
)
elif mu == 5:
mat = (
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 0),
(0, 1, 0, 0)
)
m = Matrix(mat)
if lower:
if mu in [1, 2, 3, 5]:
m = -m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
))
def mdft(n):
r"""
Returns an expression of a discrete Fourier transform as a matrix multiplication.
It is an n X n matrix.
References
==========
.. [1] https://en.wikipedia.org/wiki/DFT_matrix
Examples
========
>>> from sympy.physics.matrices import mdft
>>> mdft(3)
Matrix([
[sqrt(3)/3, sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(-4*I*pi/3)/3],
[sqrt(3)/3, sqrt(3)*exp(-4*I*pi/3)/3, sqrt(3)*exp(-8*I*pi/3)/3]])
"""
mat = [[None for x in xrange(n)] for y in xrange(n)]
base = exp(-2*pi*I/n)
mat[0] = [1]*n
for i in range(n):
mat[i][0] = 1
for i in xrange(1, n):
for j in xrange(i, n):
mat[i][j] = mat[j][i] = base**(i*j)
return (1/sqrt(n))*Matrix(mat)
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/physics/matrices.py
|
Python
|
mit
| 4,145
|
[
"DIRAC"
] |
b0cdff8aeea9221b5a6504dfbda3964308e57ef6a0c3b350baefaa20aaac2c2b
|
"""URL opener.
Copyright 2004-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import os, urllib2, bisect, httplib, types, tempfile
from _request import Request
import _response
import _rfc3986
import _sockettimeout
import _urllib2_fork
from _util import isstringlike
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
set
except NameError:
import sets
set = sets.Set # @ReservedAssignment
open_file = open
class ContentTooShortError(urllib2.URLError):
def __init__(self, reason, result):
urllib2.URLError.__init__(self, reason)
self.result = result
def set_request_attr(req, name, value, default):
try:
getattr(req, name)
except AttributeError:
setattr(req, name, default)
if value is not default:
setattr(req, name, value)
class OpenerDirector(_urllib2_fork.OpenerDirector):
def __init__(self):
_urllib2_fork.OpenerDirector.__init__(self)
# really none of these are (sanely) public -- the lack of initial
# underscore on some is just due to following urllib2
self.process_response = {}
self.process_request = {}
self._any_request = {}
self._any_response = {}
self._handler_index_valid = True
self._tempfiles = []
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
if handler in self.handlers:
return
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
self._handler_index_valid = False
def _maybe_reindex_handlers(self):
if self._handler_index_valid:
return
handle_error = {}
handle_open = {}
process_request = {}
process_response = {}
any_request = set()
any_response = set()
unwanted = []
for handler in self.handlers:
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
if meth == "any_request":
any_request.add(handler)
added = True
continue
elif meth == "any_response":
any_response.add(handler)
added = True
continue
ii = meth.find("_")
scheme = meth[:ii]
condition = meth[ii + 1:]
if condition.startswith("error"):
jj = meth[ii + 1:].find("_") + ii + 1
kind = meth[jj + 1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = handle_error.setdefault(scheme, {})
elif condition == "open":
kind = scheme
lookup = handle_open
elif condition == "request":
kind = scheme
lookup = process_request
elif condition == "response":
kind = scheme
lookup = process_response
else:
continue
lookup.setdefault(kind, set()).add(handler)
added = True
if not added:
unwanted.append(handler)
for handler in unwanted:
self.handlers.remove(handler)
# sort indexed methods
# XXX could be cleaned up
for lookup in [process_request, process_response]:
for scheme, handlers in lookup.iteritems():
lookup[scheme] = handlers
for scheme, lookup in handle_error.iteritems():
for code, handlers in lookup.iteritems():
handlers = list(handlers)
handlers.sort()
lookup[code] = handlers
for scheme, handlers in handle_open.iteritems():
handlers = list(handlers)
handlers.sort()
handle_open[scheme] = handlers
# cache the indexes
self.handle_error = handle_error
self.handle_open = handle_open
self.process_request = process_request
self.process_response = process_response
self._any_request = any_request
self._any_response = any_response
def _request(self, url_or_req, data, visit,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
if isstringlike(url_or_req):
req = Request(url_or_req, data, visit=visit, timeout=timeout)
else:
# already a mechanize.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
# XXX yuck
set_request_attr(req, "visit", visit, None)
set_request_attr(req, "timeout", timeout,
_sockettimeout._GLOBAL_DEFAULT_TIMEOUT)
return req
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
req = self._request(fullurl, data, None, timeout)
req_scheme = req.get_type()
self._maybe_reindex_handlers()
# pre-process request
# XXX should we allow a Processor to change the URL scheme
# of the request?
request_processors = set(self.process_request.get(req_scheme, []))
request_processors.update(self._any_request)
request_processors = list(request_processors)
request_processors.sort()
for processor in request_processors:
for meth_name in ["any_request", req_scheme + "_request"]:
meth = getattr(processor, meth_name, None)
if meth:
req = meth(req)
# In Python >= 2.4, .open() supports processors already, so we must
# call ._open() instead.
urlopen = _urllib2_fork.OpenerDirector._open
response = urlopen(self, req, data)
# post-process response
response_processors = set(self.process_response.get(req_scheme, []))
response_processors.update(self._any_response)
response_processors = list(response_processors)
response_processors.sort()
for processor in response_processors:
for meth_name in ["any_response", req_scheme + "_response"]:
meth = getattr(processor, meth_name, None)
if meth:
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http @ReservedAssignment
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error # @ReservedAssignment
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = apply(self._call_chain, args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return apply(self._call_chain, args)
BLOCK_SIZE = 1024 * 8
def retrieve(self, fullurl, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT,
open=open_file): # @ReservedAssignment
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file. Temporary files are removed when the OpenerDirector.close()
method is called.
For file: URLs, at present the returned filename is None. This may
change in future.
If the actual number of bytes read is less than indicated by the
Content-Length header, raises ContentTooShortError (a URLError
subclass). The exception's .result attribute contains the (filename,
headers) that would have been returned.
"""
req = self._request(fullurl, data, False, timeout)
scheme = req.get_type()
fp = self.open(req)
try:
headers = fp.info()
if filename is None and scheme == 'file':
# XXX req.get_selector() seems broken here, return None,
# pending sanity :-/
return None, headers
# return urllib.url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = _rfc3986.urlsplit(req.get_full_url())[2]
suffix = os.path.splitext(path)[1]
fd, filename = tempfile.mkstemp(suffix)
self._tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
bs = self.BLOCK_SIZE
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: "
"got only %i out of %i bytes" % (read, size),
result
)
return result
def close(self):
_urllib2_fork.OpenerDirector.close(self)
# make it very obvious this object is no longer supposed to be used
self.open = self.error = self.retrieve = self.add_handler = None
if self._tempfiles:
for filename in self._tempfiles:
try:
os.unlink(filename)
except OSError:
pass
del self._tempfiles[:]
def wrapped_open(urlopen, process_response_object, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
success = True
try:
response = urlopen(fullurl, data, timeout)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
if response is not None:
response = process_response_object(response)
if not success:
raise response
return response
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
def bound_open(fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return OpenerDirector.open(self, fullurl, data, timeout)
return wrapped_open(
bound_open, self.process_response_object, fullurl, data, timeout)
def process_response_object(self, response):
return response
class SeekableResponseOpener(ResponseProcessingOpener):
def process_response_object(self, response):
return _response.seek_wrapped_response(response)
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
class OpenerFactory:
"""This class's interface is quite likely to change."""
default_classes = [
# handlers
_urllib2_fork.ProxyHandler,
_urllib2_fork.UnknownHandler,
_urllib2_fork.HTTPHandler,
_urllib2_fork.HTTPDefaultErrorHandler,
_urllib2_fork.HTTPRedirectHandler,
_urllib2_fork.FTPHandler,
_urllib2_fork.FileHandler,
# processors
_urllib2_fork.HTTPCookieProcessor,
_urllib2_fork.HTTPErrorProcessor,
]
if hasattr(httplib, 'HTTPS'):
default_classes.append(_urllib2_fork.HTTPSHandler)
handlers = []
replacement_handlers = []
def __init__(self, klass=OpenerDirector):
self.klass = klass
def build_opener(self, *handlers):
"""Create an opener object from a list of handlers and processors.
The opener will use several default handlers and processors, including
support for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
opener = self.klass()
default_classes = list(self.default_classes)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
build_opener = OpenerFactory().build_opener
_opener = None
urlopen_lock = _threading.Lock() # @UndefinedVariable
def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.open(url, data, timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.retrieve(url, filename, reporthook, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
|
ppizarror/korektor
|
bin/mechanize/_opener.py
|
Python
|
gpl-2.0
| 14,892
|
[
"VisIt"
] |
5c8bb57e574084d92cef74b2d1eca7e6759280310ca9c98a3081d5c925cffad0
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 05 18:55:55 2010
Last changed on : July 2016 by Xueshi
@author: Jonas Neergaard-Nielsen
Tools for calculation of Gaussian beam propagation using ABCD matrix formalism.
All lengths are in units of mm.
"""
import numpy as np
from numpy import pi, conj
def wR2q(w, R, lam, n=1):
"""
q = wR2q(w, R, n=1)
--------------
Get the q-parameter from a given spot size and radius of curvature.
n is the medium's refractive index.
"""
return 1/(1/R - 1j * lam/n / (pi * w**2))
def w02q(w0, lam, n=1 ):
"""
q = w02q(w0, n=1)
------------
Get the q-parameter at a waist point from the waist size.
n is the medium's refractive index.
"""
return 1j * pi * w0**2 / (lam/n)
def q2w(q, lam, n=1):
"""
w = q2w(q, n=1)
----------
Get the spot size from a given q-parameter.
n is the medium's refractive index.
"""
return np.sqrt(-lam/n / (pi * np.imag(1 / q)))
def q2R(q):
"""
w = q2R(q, n=1)
----------
Get the beam radius of curvature from a given q-parameter.
n is the medium's refractive index.
"""
return 1/ np.real(1 / q)
def q2w0(q, lam, n=1):
"""
w0 = q2w0(q, n=1)
------------
Get the waist size from a given q-parameter.
n is the medium's refractive index.
"""
return np.sqrt(np.imag(q) * lam/n / pi)
def q2div(q, lam, n=1):
"""
div = q2div(q, n=1)
--------------
Get the far-field beam divergence for a given q-parameter.
n is the medium's refractive index.
"""
return lam/n / (pi * q2w0(q, lam))
def qABCD(q, M):
"""
q1 = qABCD(q0, M)
-----------------
Transform the q-parameter according to the ABCD matrix M.
"""
M = np.array(M)
return (M[0, 0] * q + M[0, 1]) / (M[1, 0] * q + M[1, 1])
def qreverse(q):
"""
q1 = qreverse(q)
----------------
q-parameter transformation when changing propagation direction.
"""
return -conj(q)
def qpropagate(zini, qini, elements, z):
"""
qout = qpropagate(zini, qini, elements, z)
------------------------------------------
Propagate the q-parameter through an optical system.
zini, qini : location and value of a known q-parameter of the beam
(qini must be given for forward propagation of the beam)
elements : list of [z-location, ABCD matrix] descriptions of the
optical elements
z : location to calculate output q-parameter (if z < zini, the
output q-parameter will still be for forward propagation)
"""
elements = elements[:]
elements.sort()
zt = zini
qt = qini
if z >= zini:
elements.reverse()
while elements:
el = elements.pop()
if zt <= el[0] <= z:
qt += el[0] - zt
qt = qABCD(qt, el[1])
zt = el[0]
qt += z - zt
else:
qt = qreverse(qt)
while elements:
el = elements.pop()
if z <= el[0] <= zt:
qt += zt - el[0]
qt = qABCD(qt, el[1])
zt = el[0]
qt += zt - z
qt = qreverse(qt)
return qt
# =============
# ABCD matrices
#
def Mprop(d):
"""
M = Mprop(d)
------------
ABCD matrix for free space propagation of distance d.
"""
return np.matrix([[1, d], [0, 1]])
def Minterface(n0, n1, R=np.inf):
"""
M = Minterface(n0, n1, R='inf')
----------------------
ABCD matrix for the refraction at an interface (with radius of curvature R)
from a medium with refractive index n0 to a medium with refractive index n1.
If no R is given, R=infinite i.e. flat surface is assumed.
R>0 means convex interface.
"""
return np.matrix([[1, 0], [(n0-n1)/(R*n1), n0/n1]])
def Mlens(f):
"""
M = Mlens(f)
------------
ABCD matrix for a thin lens of focal length f.
"""
return np.matrix([[1, 0], [-1/f, 1]])
def Mmirror(R):
"""
M = Mmirror(R)
--------------
ABCD matrix for a curved mirror with radius of curvature R.
Concave mirrors have R<0, convex have R>0.
"""
return np.matrix([[1, 0], [2/R, 1]])
# ########################
# default initializations
|
XueshiGuo/Gau-modematching
|
modematching/abcd.py
|
Python
|
mit
| 4,366
|
[
"Gaussian"
] |
8de78a7fb991ccf7b132ed81d017ba4156c30ba9f96c3bea13c72d308baea180
|
import sys
from os.path import isfile
from collections import defaultdict
import numpy as np
from Bio import AlignIO, SeqIO
from . import config as ttconf
from . import MissingDataError
from .seq_utils import seq2array, guess_alphabet, alphabets
string_types = [str] if sys.version_info[0]==3 else [str, unicode]
def simple_logger(*args, **kwargs):
print(args)
class SequenceData(object):
"""docstring for SeqData
Attributes
----------
additional_constant_sites : int
length of the sequence without variation not included in the alignment
aln : dict
sequences, either sparse of full
ambiguous : byte
character signifying missing data
compress : bool
compress the alignment
compressed_alignment : dict
dictionary mapping sequence names to compressed sequences
compressed_to_full_sequence_map : dict
for each compressed position, contain a list of positions in the full alignment
fill_overhangs : bool
treat gaps at either end of sequence as missing data
full_length : int
length of the sequence
full_to_compressed_sequence_map : np.array
a map of each position in the full sequence to the compressed sequence
inferred_const_sites : list
list of positions that are constant but differ from the reference, or contain ambiguous characters
is_sparse : bool
whether the representation of the alignment is sparse (dict) or fill (array)
likely_alphabet : str
simply guess as to whether the sequence alignment is nucleotides or amino acids
logger : callable
function writting log messages
multiplicity : np.array
specifies for each column of the compressed alignment how often this pattern occurs
nonref_positions : list
positions where at least one sequence differs from the reference
ref : np.array
reference sequence (stored as np.array(dtype="S"))
seq_multiplicity : dict
store the multiplicity of sequence, for example read count in a deep sequencing experiment
sequence_names : list
list of all sequences in a fixed order
word_length : int
length of state (typically 1 A,C,G,T, but could be 3 for codons)
"""
def __init__(self, aln, ref=None, logger=None, convert_upper=True,
sequence_length=None, compress=True, word_length=1, sequence_type=None,
fill_overhangs=True, seq_multiplicity=None, ambiguous=None, **kwargs):
"""construct an sequence data object
Parameters
----------
aln : Bio.Align.MultipleSeqAlignment, str
alignment or file name
ref : Seq, str
sequence or file name
logger : callable, optional
logging function
convert_upper : bool, optional
convert all sequences to upper case, default true
sequence_length : None, optional
length of the sequence, only necessary when no alignment or ref is given
compress : bool, optional
compress identical alignment columns into one
word_length : int
length of state (typically 1 A,C,G,T, but could be 3 for codons)
fill_overhangs : bool
treat gaps at either end of sequence as missing data
seq_multiplicity : dict
store the multiplicity of sequence, for example read count in a deep sequencing experiment
ambiguous : byte
character signifying missing data
**kwargs
Description
"""
self.logger = logger if logger else simple_logger
self._aln = None
self._ref = None
self.likely_alphabet = None
self.compressed_to_full_sequence_map = None
self._multiplicity = None
self.is_sparse = None
self.convert_upper = convert_upper
self.compress = compress
self.seq_multiplicity = seq_multiplicity or {} # possibly a dict mapping sequences to their read cound/sample count
self.additional_constant_sites = kwargs['additional_constant_sites'] if 'additional_constant_sites' in kwargs else 0
# if not specified, this will be set as the alignment_length or reference length
self._full_length = None
self.full_length = sequence_length
self._compressed_length = None
self.word_length = word_length
self.fill_overhangs = fill_overhangs
self.ambiguous = ambiguous
self.sequence_type = sequence_type
self.ref = ref
self.aln = aln
@property
def aln(self):
"""
The multiple sequence alignment currently used by the TreeAnc
:setter: Takes in alignment as MultipleSeqAlignment, str, or dict/defaultdict \
and attaches sequences to tree nodes.
:getter: Returns alignment as MultipleSeqAlignment or dict/defaultdict
"""
return self._aln
@aln.setter
def aln(self,in_aln):
"""
Reads in the alignment (from a dict, MultipleSeqAlignment, or file,
as necessary), sets tree-related parameters, and attaches sequences
to the tree nodes.
Parameters
----------
in_aln : MultipleSeqAlignment, str, dict/defaultdict
The alignment to be read in
"""
# load alignment from file if necessary
from Bio.Align import MultipleSeqAlignment
self._aln, self.is_sparse = None, None
if in_aln is None:
return
elif type(in_aln) in [defaultdict, dict]: #if input is sparse (i.e. from VCF)
self._aln = in_aln
self.is_sparse = True
elif type(in_aln) in string_types and isfile(in_aln):
if any([in_aln.lower().endswith(x) for x in ['.vcf', '.vcf.gz']]) and (self.ref is not None):
from .vcf_utils import read_vcf
compress_seq = read_vcf(in_aln)
in_aln = compress_seq['sequences']
else:
for fmt in ['fasta', 'phylip-relaxed', 'nexus']:
try:
in_aln=AlignIO.read(in_aln, fmt)
except:
continue
if type(in_aln) is MultipleSeqAlignment:
# check whether the alignment is consistent with a nucleotide alignment.
self._aln = {}
for s in in_aln:
if s.id==s.name:
tmp_name = s.id
elif '<unknown' in s.id: # use s.name if id is BioPython default (previous behavior)
tmp_name = s.name
elif '<unknown' in s.name: # use s.id if s.name is BioPython default (change relative to previous, but what we want)
tmp_name = s.id
else:
tmp_name = s.name # otherwise use s.name (previous behavior)
self._aln[tmp_name] = seq2array(s, convert_upper=self.convert_upper,
fill_overhangs=self.fill_overhangs, ambiguous=self.ambiguous)
self.check_alphabet(list(self._aln.values()))
self.is_sparse = False
self.logger("SequenceData: loaded alignment.",1)
elif type(in_aln) in [dict, defaultdict]:
self.logger("SequenceData: loaded sparse/vcf alignment.",1)
self.check_alphabet([self.ref])
self.is_sparse = True
self._aln = in_aln
else:
raise MissingDataError("SequenceData: loading alignment failed... " + str(in_aln))
if self.full_length:
if self.is_sparse:
if self.full_length!=len(self.ref):
self.logger("SequenceData.aln: specified sequence length doesn't match reference length, ignoring sequence length.", 1, warn=True)
self._full_length = len(self.ref)
else:
if self.full_length < in_aln.get_alignment_length():
raise AttributeError("SequenceData.aln: specified sequence length is smaller than alignment length!")
elif self.full_length > in_aln.get_alignment_length():
self.logger("SequenceData.aln: specified sequence length doesn't match alignment length. Treating difference as constant sites.", 2, warn=True)
self.additional_constant_sites = max(0, self.full_length - in_aln.get_alignment_length())
else:
if self.is_sparse:
self.full_length = len(self.ref)
else:
self.full_length = in_aln.get_alignment_length()
self.sequence_names = list(self.aln.keys())
self.make_compressed_alignment()
@property
def full_length(self):
"""length of the uncompressed sequence
"""
return self._full_length
@full_length.setter
def full_length(self,L):
"""set the length of the uncompressed sequence. its inverse 'one_mutation'
is frequently used as a general length scale. This can't be changed once
it is set.
Parameters
----------
L : int
length of the sequence alignment
"""
if (not hasattr(self, '_full_length')) or self._full_length is None:
if L:
self._full_length = int(L)
else:
self.logger("Alignment: one_mutation and sequence length can only be specified once!",1)
@property
def compressed_length(self):
return self._compressed_length
@property
def ref(self):
"""
:setter: Sets the string reference sequence
:getter: Returns the string reference sequence
"""
return self._ref
@ref.setter
def ref(self, in_ref):
"""
Parameters
----------
in_ref : file name, str, Bio.Seq.Seq, Bio.SeqRecord.SeqRecord
reference sequence will read and stored a byte array
"""
read_from_file=False
if in_ref and isfile(in_ref):
for fmt in ['fasta', 'genbank']:
try:
in_ref = SeqIO.read(in_ref, fmt)
self.logger("SequenceData: loaded reference sequence as %s format"%fmt,1)
read_from_file=True
break
except:
continue
if not read_from_file:
raise TypeError('SequenceData.ref: reference sequence file %s could not be parsed, fasta and genbank formats are supported.')
if in_ref:
self._ref = seq2array(in_ref, fill_overhangs=False, word_length=self.word_length)
self.full_length = self._ref.shape[0]
self.compressed_to_full_sequence_map = None
self._multiplicity = None
def multiplicity(self, mask=None):
if mask is None:
return self._multiplicity
else:
return self._multiplicity*mask
def check_alphabet(self, seqs):
self.likely_alphabet = guess_alphabet(seqs)
if self.sequence_type:
if self.likely_alphabet!=self.sequence_type:
if self.sequence_type=='nuc':
self.logger("POSSIBLE ERROR: This does not look like a nucleotide alignment!", 0, warn=True)
elif self.sequence_type=='aa':
self.logger("POSSIBLE ERROR: This looks like a nucleotide alignment, you indicated amino acids!", 0, warn=True)
if self.ambiguous is None:
self.ambiguous = 'N' if self.likely_alphabet=='nuc' else 'X'
def make_compressed_alignment(self):
"""
Create the compressed alignment from the full sequences. This method counts
the multiplicity for each column of the alignment ('alignment pattern'), and
creates the compressed alignment, where only the unique patterns are present.
The maps from full sequence to compressed sequence and back are also stored to allow
compressing and expanding the sequences.
Notes
-----
full_to_compressed_sequence_map : (array)
Map to reduce a sequence
compressed_to_full_sequence_map : (dict)
Map to restore sequence from compressed alignment
multiplicity : (array)
Numpy array, which stores the pattern multiplicity for each position of the compressed alignment.
compressed_alignment : (2D numpy array)
The compressed alignment. Shape is (N x L'), where N is number of
sequences, L' - number of unique alignment patterns
"""
if not self.compress: #
self._multiplicity = np.ones(self.full_length, dtype=float)
self.full_to_compressed_sequence_map = np.arange(self.full_length)
self.compressed_to_full_sequence_map = {p:np.array([p]) for p in np.arange(self.full_length)}
self._compressed_length = self._full_length
self.compressed_alignment = self._aln
return ttconf.SUCCESS
self.logger("SeqData: making compressed alignment...", 1)
# bind positions in full length sequence to that of the compressed (compressed) sequence
self.full_to_compressed_sequence_map = np.zeros(self.full_length, dtype=int)
# bind position in compressed sequence to the array of positions in full length sequence
self.compressed_to_full_sequence_map = {}
#if alignment is sparse, don't iterate over all invarible sites.
#so pre-load alignment_patterns with the location of const sites!
#and get the sites that we want to iterate over only!
if self.is_sparse:
from .vcf_utils import process_sparse_alignment
tmp = process_sparse_alignment(self.aln, self.ref, self.ambiguous)
compressed_aln_transpose = tmp["constant_columns"]
alignment_patterns = tmp["constant_patterns"]
variable_positions = tmp["variable_positions"]
self.inferred_const_sites = tmp["constant_up_to_ambiguous"]
self.nonref_positions = tmp["nonref_positions"]
else: # transpose real alignment, for ease of iteration
alignment_patterns = {}
compressed_aln_transpose = []
aln_transpose = np.array([self.aln[k] for k in self.sequence_names]).T
variable_positions = np.arange(aln_transpose.shape[0])
for pi in variable_positions:
if self.is_sparse:
pattern = np.array([self.aln[k][pi] if pi in self.aln[k] else self.ref[pi]
for k in self.sequence_names])
else:
# pylint: disable=unsubscriptable-object
pattern = np.copy(aln_transpose[pi])
# if the column contains only one state and ambiguous nucleotides, replace
# those with the state in other strains right away
unique_letters = list(np.unique(pattern))
if len(unique_letters)==2 and self.ambiguous in unique_letters:
other = [c for c in unique_letters if c!=self.ambiguous][0]
#also replace in original pattern!
pattern[pattern == self.ambiguous] = other
unique_letters = [other]
str_pattern = "".join(pattern.astype('U'))
# if there is a mutation in this column, give it its private pattern
# this is required when sampling mutations from reconstructed profiles.
# otherwise, all mutations corresponding to the same pattern will be coupled.
# FIXME: this could be done more efficiently
if len(unique_letters)>1:
str_pattern += '_%d'%pi
# if the pattern is not yet seen,
if str_pattern not in alignment_patterns:
# bind the index in the compressed aln, index in sequence to the pattern string
alignment_patterns[str_pattern] = (len(compressed_aln_transpose), [pi])
# append this pattern to the compressed alignment
compressed_aln_transpose.append(pattern)
else:
# if the pattern is already seen, append the position in the real
# sequence to the compressed aln<->sequence_pos_indexes map
alignment_patterns[str_pattern][1].append(pi)
# add constant alignment column not in the alignment. We don't know where they
# are, so just add them to the end. First, determine sequence composition.
if self.additional_constant_sites:
character_counts = {c:np.sum(aln_transpose==c) for c in alphabets[self.likely_alphabet+'_nogap']
if c not in [self.ambiguous, '-']}
total = np.sum(list(character_counts.values()))
additional_columns_per_character = [(c,int(np.round(self.additional_constant_sites*n/total)))
for c, n in character_counts.items()]
columns_left = self.additional_constant_sites
pi = np.max(variable_positions)+1
for c,n in additional_columns_per_character:
if c==additional_columns_per_character[-1][0]: # make sure all additions add up to the correct number to avoid rounding
n = columns_left
str_pattern = c*len(self.sequence_names)
pos_list = list(range(pi, pi+n))
if n:
if str_pattern in alignment_patterns:
alignment_patterns[str_pattern][1].extend(pos_list)
else:
alignment_patterns[str_pattern] = (len(compressed_aln_transpose), pos_list)
compressed_aln_transpose.append(np.array(list(str_pattern)))
pi += n
columns_left -= n
# count how many times each column is repeated in the real alignment
self._multiplicity = np.zeros(len(alignment_patterns))
for p, pos in alignment_patterns.values():
self._multiplicity[p]=len(pos)
# create the compressed alignment as a dictionary linking names to sequences
tmp_compressed_alignment = np.array(compressed_aln_transpose).T
# pylint: disable=unsubscriptable-object
self.compressed_alignment = {k: tmp_compressed_alignment[i]
for i,k in enumerate(self.sequence_names)}
# create map to compress a sequence
for p, pos in alignment_patterns.values():
self.full_to_compressed_sequence_map[np.array(pos)]=p
# create a map to reconstruct full sequence from the compressed (compressed) sequence
for p, val in alignment_patterns.items():
self.compressed_to_full_sequence_map[val[0]]=np.array(val[1], dtype=int)
self.logger("SequenceData: constructed compressed alignment...", 1)
self._compressed_length = len(self._multiplicity)
return ttconf.SUCCESS
def full_to_sparse_sequence(self, sequence):
"""turn a sequence into a dictionary of differences from a reference sequence
Parameters
----------
sequence : str, numpy.ndarray
sequence to convert
Returns
-------
dict
dictionary of difference from reference
"""
if self.ref is None:
raise TypeError("SequenceData: sparse sequences can only be constructed when a reference sequence is defined")
if type(sequence) is not np.ndarray:
aseq = seq2array(sequence, fill_overhangs=False)
else:
aseq = sequence
differences = np.where(self.ref!=aseq)[0]
return {p:aseq[p] for p in differences}
def compressed_to_sparse_sequence(self, sequence):
"""turn a compressed sequence into a list of difference from a reference
Parameters
----------
sequence : numpy.ndarray
compressed sequence stored as array
Returns
-------
dict
dictionary of difference from reference
"""
if self.ref is None:
raise TypeError("SequenceData: sparse sequences can only be constructed when a reference sequence is defined")
sparse_seq = {}
compressed_nonref_positions = self.full_to_compressed_sequence_map[self.nonref_positions]
compressed_nonref_values = sequence[compressed_nonref_positions]
mismatches = (compressed_nonref_values != self.ref[self.nonref_positions])
return dict(zip(self.nonref_positions[mismatches], compressed_nonref_values[mismatches]))
def compressed_to_full_sequence(self, sequence, include_additional_constant_sites=False, as_string=False):
"""expand a compressed sequence
Parameters
----------
sequence : np.ndarray
compressed sequence
include_additional_constant_sites : bool, optional
add sites assumed constant
as_string : bool, optional
return a string instead of an array
Returns
-------
array,str
expanded sequence
"""
if include_additional_constant_sites:
L = self.full_length
else:
L = self.full_length - self.additional_constant_sites
tmp_seq = sequence[self.full_to_compressed_sequence_map[:L]]
if as_string:
return "".join(tmp_seq.astype('U'))
else:
return tmp_seq
def differences(self, seq1, seq2, seq1_compressed=True, seq2_compressed=True, mask=None):
diffs = []
if self.is_sparse:
if seq1_compressed: seq1 = self.compressed_to_sparse_sequence(seq1)
if seq2_compressed: seq2 = self.compressed_to_sparse_sequence(seq2)
for pos in set(seq1.keys()).union(seq2.keys()):
ref_state = self.ref[pos]
s1 = seq1.get(pos, ref_state)
s2 = seq2.get(pos, ref_state)
if s1!=s2:
diffs.append((s1,pos,s2))
else:
if seq1_compressed: seq1 = self.compressed_to_full_sequence(seq1)
if seq2_compressed: seq2 = self.compressed_to_full_sequence(seq2)
if mask is None:
diff_pos = np.where(seq1 != seq2)[0]
else:
diff_pos = np.where((seq1 != seq2)&(mask>0))[0]
for pos in diff_pos:
diffs.append((seq1[pos], pos, seq2[pos]))
return sorted(diffs, key=lambda x:x[1])
|
neherlab/treetime
|
treetime/sequence_data.py
|
Python
|
mit
| 22,596
|
[
"Biopython"
] |
a432846b9504686255852220e5ef8b3771fc1f6ed32775f46437058ed6b2fe9d
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of commonly used architectures and reconstruction losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import gin.tf
@gin.configurable("encoder", whitelist=["num_latent", "encoder_fn"])
def make_gaussian_encoder(input_tensor,
is_training=True,
num_latent=gin.REQUIRED,
encoder_fn=gin.REQUIRED):
"""Gin wrapper to create and apply a Gaussian encoder configurable with gin.
This is a separate function so that several different models (such as
BetaVAE and FactorVAE) can call this function while the gin binding always
stays 'encoder.(...)'. This makes it easier to configure models and parse
the results files.
Args:
input_tensor: Tensor with image that should be encoded.
is_training: Boolean that indicates whether we are training (usually
required for batch normalization).
num_latent: Integer with dimensionality of latent space.
encoder_fn: Function that that takes the arguments (input_tensor,
num_latent, is_training) and returns the tuple (means, log_vars) with the
encoder means and log variances.
Returns:
Tuple (means, log_vars) with the encoder means and log variances.
"""
with tf.variable_scope("encoder"):
return encoder_fn(
input_tensor=input_tensor,
num_latent=num_latent,
is_training=is_training)
@gin.configurable("decoder", whitelist=["decoder_fn"])
def make_decoder(latent_tensor,
output_shape,
is_training=True,
decoder_fn=gin.REQUIRED):
"""Gin wrapper to create and apply a decoder configurable with gin.
This is a separate function so that several different models (such as
BetaVAE and FactorVAE) can call this function while the gin binding always
stays 'decoder.(...)'. This makes it easier to configure models and parse
the results files.
Args:
latent_tensor: Tensor latent space embeddings to decode from.
output_shape: Tuple with the output shape of the observations to be
generated.
is_training: Boolean that indicates whether we are training (usually
required for batch normalization).
decoder_fn: Function that that takes the arguments (input_tensor,
output_shape, is_training) and returns the decoded observations.
Returns:
Tensor of decoded observations.
"""
with tf.variable_scope("decoder"):
return decoder_fn(
latent_tensor=latent_tensor,
output_shape=output_shape,
is_training=is_training)
@gin.configurable("discriminator", whitelist=["discriminator_fn"])
def make_discriminator(input_tensor,
is_training=False,
discriminator_fn=gin.REQUIRED):
"""Gin wrapper to create and apply a discriminator configurable with gin.
This is a separate function so that several different models (such as
FactorVAE) can potentially call this function while the gin binding always
stays 'discriminator.(...)'. This makes it easier to configure models and
parse the results files.
Args:
input_tensor: Tensor on which the discriminator operates.
is_training: Boolean that indicates whether we are training (usually
required for batch normalization).
discriminator_fn: Function that that takes the arguments
(input_tensor, is_training) and returns tuple of (logits, clipped_probs).
Returns:
Tuple of (logits, clipped_probs) tensors.
"""
with tf.variable_scope("discriminator"):
logits, probs = discriminator_fn(input_tensor, is_training=is_training)
clipped = tf.clip_by_value(probs, 1e-6, 1 - 1e-6)
return logits, clipped
@gin.configurable("fc_encoder", whitelist=[])
def fc_encoder(input_tensor, num_latent, is_training=True):
"""Fully connected encoder used in beta-VAE paper for the dSprites data.
Based on row 1 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl).
Args:
input_tensor: Input tensor of shape (batch_size, 64, 64, num_channels) to
build encoder on.
num_latent: Number of latent variables to output.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
means: Output tensor of shape (batch_size, num_latent) with latent variable
means.
log_var: Output tensor of shape (batch_size, num_latent) with latent
variable log variances.
"""
del is_training
flattened = tf.layers.flatten(input_tensor)
e1 = tf.layers.dense(flattened, 1200, activation=tf.nn.relu, name="e1")
e2 = tf.layers.dense(e1, 1200, activation=tf.nn.relu, name="e2")
means = tf.layers.dense(e2, num_latent, activation=None)
log_var = tf.layers.dense(e2, num_latent, activation=None)
return means, log_var
@gin.configurable("conv_encoder", whitelist=[])
def conv_encoder(input_tensor, num_latent, is_training=True):
"""Convolutional encoder used in beta-VAE paper for the chairs data.
Based on row 3 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl)
Args:
input_tensor: Input tensor of shape (batch_size, 64, 64, num_channels) to
build encoder on.
num_latent: Number of latent variables to output.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
means: Output tensor of shape (batch_size, num_latent) with latent variable
means.
log_var: Output tensor of shape (batch_size, num_latent) with latent
variable log variances.
"""
del is_training
e1 = tf.layers.conv2d(
inputs=input_tensor,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="e1",
)
e2 = tf.layers.conv2d(
inputs=e1,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
name="e2",
)
e3 = tf.layers.conv2d(
inputs=e2,
filters=64,
kernel_size=2,
strides=2,
activation=tf.nn.relu,
padding="same",
name="e3",
)
e4 = tf.layers.conv2d(
inputs=e3,
filters=64,
kernel_size=2,
strides=2,
activation=tf.nn.relu,
padding="same",
name="e4",
)
flat_e4 = tf.layers.flatten(e4)
e5 = tf.layers.dense(flat_e4, 256, activation=tf.nn.relu, name="e5")
means = tf.layers.dense(e5, num_latent, activation=None, name="means")
log_var = tf.layers.dense(e5, num_latent, activation=None, name="log_var")
return means, log_var
@gin.configurable("fc_decoder", whitelist=[])
def fc_decoder(latent_tensor, output_shape, is_training=True):
"""Fully connected encoder used in beta-VAE paper for the dSprites data.
Based on row 1 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl)
Args:
latent_tensor: Input tensor to connect decoder to.
output_shape: Shape of the data.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
Output tensor of shape (None, 64, 64, num_channels) with the [0,1] pixel
intensities.
"""
del is_training
d1 = tf.layers.dense(latent_tensor, 1200, activation=tf.nn.tanh)
d2 = tf.layers.dense(d1, 1200, activation=tf.nn.tanh)
d3 = tf.layers.dense(d2, 1200, activation=tf.nn.tanh)
d4 = tf.layers.dense(d3, np.prod(output_shape))
return tf.reshape(d4, shape=[-1] + output_shape)
@gin.configurable("deconv_decoder", whitelist=[])
def deconv_decoder(latent_tensor, output_shape, is_training=True):
"""Convolutional decoder used in beta-VAE paper for the chairs data.
Based on row 3 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl)
Args:
latent_tensor: Input tensor of shape (batch_size,) to connect decoder to.
output_shape: Shape of the data.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
Output tensor of shape (batch_size, 64, 64, num_channels) with the [0,1]
pixel intensities.
"""
del is_training
d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)
d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)
d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])
d3 = tf.layers.conv2d_transpose(
inputs=d2_reshaped,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d4 = tf.layers.conv2d_transpose(
inputs=d3,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d5 = tf.layers.conv2d_transpose(
inputs=d4,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d6 = tf.layers.conv2d_transpose(
inputs=d5,
filters=output_shape[2],
kernel_size=4,
strides=2,
padding="same",
)
return tf.reshape(d6, [-1] + output_shape)
@gin.configurable("fc_discriminator", whitelist=[])
def fc_discriminator(input_tensor, is_training=True):
"""Fully connected discriminator used in FactorVAE paper for all datasets.
Based on Appendix A page 11 "Disentangling by Factorizing"
(https://arxiv.org/pdf/1802.05983.pdf)
Args:
input_tensor: Input tensor of shape (None, num_latents) to build
discriminator on.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
logits: Output tensor of shape (batch_size, 2) with logits from
discriminator.
probs: Output tensor of shape (batch_size, 2) with probabilities from
discriminator.
"""
del is_training
flattened = tf.layers.flatten(input_tensor)
d1 = tf.layers.dense(flattened, 1000, activation=tf.nn.leaky_relu, name="d1")
d2 = tf.layers.dense(d1, 1000, activation=tf.nn.leaky_relu, name="d2")
d3 = tf.layers.dense(d2, 1000, activation=tf.nn.leaky_relu, name="d3")
d4 = tf.layers.dense(d3, 1000, activation=tf.nn.leaky_relu, name="d4")
d5 = tf.layers.dense(d4, 1000, activation=tf.nn.leaky_relu, name="d5")
d6 = tf.layers.dense(d5, 1000, activation=tf.nn.leaky_relu, name="d6")
logits = tf.layers.dense(d6, 2, activation=None, name="logits")
probs = tf.nn.softmax(logits)
return logits, probs
@gin.configurable("test_encoder", whitelist=["num_latent"])
def test_encoder(input_tensor, num_latent, is_training):
"""Simple encoder for testing.
Args:
input_tensor: Input tensor of shape (batch_size, 64, 64, num_channels) to
build encoder on.
num_latent: Number of latent variables to output.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
means: Output tensor of shape (batch_size, num_latent) with latent variable
means.
log_var: Output tensor of shape (batch_size, num_latent) with latent
variable log variances.
"""
del is_training
flattened = tf.layers.flatten(input_tensor)
means = tf.layers.dense(flattened, num_latent, activation=None, name="e1")
log_var = tf.layers.dense(flattened, num_latent, activation=None, name="e2")
return means, log_var
@gin.configurable("test_decoder", whitelist=[])
def test_decoder(latent_tensor, output_shape, is_training=False):
"""Simple decoder for testing.
Args:
latent_tensor: Input tensor to connect decoder to.
output_shape: Output shape.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
Output tensor of shape (batch_size, 64, 64, num_channels) with the [0,1]
pixel intensities.
"""
del is_training
output = tf.layers.dense(latent_tensor, np.prod(output_shape), name="d1")
return tf.reshape(output, shape=[-1] + output_shape)
|
google-research/disentanglement_lib
|
disentanglement_lib/methods/shared/architectures.py
|
Python
|
apache-2.0
| 12,624
|
[
"Gaussian"
] |
5e3c72f31e12326039522122772e14a7642beab03bac013d01afc77490db8182
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK
from DIRAC.AccountingSystem.Client.Types.WMSHistory import WMSHistory
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter
class WMSHistoryPlotter(BaseReporter):
_typeName = "WMSHistory"
_typeKeyFields = [dF[0] for dF in WMSHistory().definitionKeyFields]
def _translateGrouping(self, grouping):
if grouping == "Country":
sqlRepr = 'upper( substring( %s, locate( ".", %s, length( %s ) - 4 ) + 1 ) )'
return (sqlRepr, ["Site", "Site", "Site"], sqlRepr)
elif grouping == "Grid":
return ('substring_index( %s, ".", 1 )', ["Site"])
else:
return ("%s", [grouping])
def _reportNumberOfJobs(self, reportRequest):
selectFields = (
self._getSelectStringForGrouping(reportRequest["groupingFields"]) + ", %s, %s, SUM(%s/%s)",
reportRequest["groupingFields"][1] + ["startTime", "bucketLength", "Jobs", "entriesInBucket"],
)
retVal = self._getTimedData(
reportRequest["startTime"],
reportRequest["endTime"],
selectFields,
reportRequest["condDict"],
reportRequest["groupingFields"],
{"convertToGranularity": "average", "checkNone": True},
)
if not retVal["OK"]:
return retVal
dataDict, granularity = retVal["Value"]
self.stripDataField(dataDict, 0)
return S_OK({"data": dataDict, "granularity": granularity})
def _plotNumberOfJobs(self, reportRequest, plotInfo, filename):
metadata = {
"title": "Jobs by %s" % reportRequest["grouping"],
"starttime": reportRequest["startTime"],
"endtime": reportRequest["endTime"],
"span": plotInfo["granularity"],
"skipEdgeColor": True,
"ylabel": "jobs",
}
plotInfo["data"] = self._fillWithZero(
plotInfo["granularity"], reportRequest["startTime"], reportRequest["endTime"], plotInfo["data"]
)
return self._generateStackedLinePlot(filename, plotInfo["data"], metadata)
def _reportNumberOfReschedules(self, reportRequest):
selectFields = (
self._getSelectStringForGrouping(reportRequest["groupingFields"]) + ", %s, %s, SUM(%s/%s)",
reportRequest["groupingFields"][1] + ["startTime", "bucketLength", "Reschedules", "entriesInBucket"],
)
retVal = self._getTimedData(
reportRequest["startTime"],
reportRequest["endTime"],
selectFields,
reportRequest["condDict"],
reportRequest["groupingFields"],
{"convertToGranularity": "average", "checkNone": True},
)
if not retVal["OK"]:
return retVal
dataDict, granularity = retVal["Value"]
self.stripDataField(dataDict, 0)
return S_OK({"data": dataDict, "granularity": granularity})
def _plotNumberOfReschedules(self, reportRequest, plotInfo, filename):
metadata = {
"title": "Reschedules by %s" % reportRequest["grouping"],
"starttime": reportRequest["startTime"],
"endtime": reportRequest["endTime"],
"span": plotInfo["granularity"],
"skipEdgeColor": True,
"ylabel": "reschedules",
}
plotInfo["data"] = self._fillWithZero(
plotInfo["granularity"], reportRequest["startTime"], reportRequest["endTime"], plotInfo["data"]
)
return self._generateStackedLinePlot(filename, plotInfo["data"], metadata)
def _reportAverageNumberOfJobs(self, reportRequest):
selectFields = (
self._getSelectStringForGrouping(reportRequest["groupingFields"]) + ", SUM(%s/%s)",
reportRequest["groupingFields"][1] + ["Jobs", "entriesInBucket"],
)
retVal = self._getSummaryData(
reportRequest["startTime"],
reportRequest["endTime"],
selectFields,
reportRequest["condDict"],
reportRequest["groupingFields"],
{},
)
if not retVal["OK"]:
return retVal
dataDict = retVal["Value"]
bins = self._getBins(self._typeName, reportRequest["startTime"], reportRequest["endTime"])
numBins = len(bins)
for key in dataDict:
dataDict[key] = float(dataDict[key] / numBins)
return S_OK({"data": dataDict})
def _plotAverageNumberOfJobs(self, reportRequest, plotInfo, filename):
metadata = {
"title": "Average Number of Jobs by %s" % reportRequest["grouping"],
"ylabel": "Jobs",
"starttime": reportRequest["startTime"],
"endtime": reportRequest["endTime"],
}
return self._generatePiePlot(filename, plotInfo["data"], metadata)
|
ic-hep/DIRAC
|
src/DIRAC/AccountingSystem/private/Plotters/WMSHistoryPlotter.py
|
Python
|
gpl-3.0
| 4,970
|
[
"DIRAC"
] |
21a088fe616f3214e5bd859694cadd181368a67b9b73be50e129cb426b63336e
|
from __future__ import division
import numpy as np
from astropy.coordinates.distances import Distance
import matplotlib.pyplot as plt
import pyfits
import db
import pyfits
from string import *
from astroML.plotting import hist
from geom import getIncl
def simple_plot(x, y, vel, filename):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, aspect='equal',autoscale_on=False, xlim=[-25,25], ylim=[-25,25])
cb = ax.scatter(x, y, c=vel, edgecolor="none", vmin=-120, vmax=120)
plt.colorbar(cb)
ax.axhline(c='k')
ax.axvline(c='k')
plt.savefig(filename)
def get_GAMA_incl(sami_id):
sami_id = str(sami_id)
GAMA_file = 'db/metadata/'+sami_id+"/"+sami_id+"_GAMA_metadata.fits.gz"
ell = pyfits.getdata(GAMA_file, extname='SERSICCATALL', header=False)['GAL_ELLIP_R'][0]
ba = 1-ell
incl = getIncl(ba)
return incl
def plot_hist(x, filename):
fig = plt.figure(figsize=(10, 10))
hist(x, bins='scott')
plt.savefig(filename)
plt.close()
def get_SAMI_data(sami_id):
r50 = db.dbUtils.getFromDB('R_e', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0]
W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0]
W50_err = db.dbUtils.getFromDB('W50_err', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0]
return r50, W50, W50_err
def get_SAMI_coords(sami_id):
ra = db.dbUtils.getFromDB('ra', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0]
dec = db.dbUtils.getFromDB('dec', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0]
return ra, dec
def get_delta_z(sami_id):
z = db.dbUtils.getFromDB('z', 'db/SAMI.sqlite', 'SAMI_Master ', ' where sami_id = '+ str(sami_id))[0]
alfalfa_id = db.dbUtils.getFromDB('ALFALFA_id', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0]
alfalfa_z = db.dbUtils.getFromDB('V_P', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'")/300000
return round(np.abs(float(z) - float(alfalfa_z)), 6)
def get_delta_coords(sami_id):
ra, dec = get_SAMI_coords(sami_id)
alfalfa_id = db.dbUtils.getFromDB('ALFALFA_id', 'db/SAMI.sqlite', 'ALFALFA_Xmatch ', ' where sami_id = '+ str(sami_id))[0]
print 'where object ='+"'"+lstrip(str(alfalfa_id))+"'"
alfalfa_ra = 15*db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'")
alfalfa_dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where object ='+"'"+lstrip(str(alfalfa_id))+"'")
return (round(np.abs(float(alfalfa_ra) - float(ra)), 6), round(np.abs(float(alfalfa_dec) - float(dec)), 6))
def get_ALFALFA_W50(ra, dec):
alfalfa_ra = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA')
alfalfa_dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA')
#print np.round(alfalfa_ra*15, 1), np.round(alfalfa_dec, 1)
W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA ', ' where round(15*raopt, 1) = '+ str(np.round(ra, 1))+' and round(decopt, 1) = '+str(np.round(dec, 1)))[0]
return W50
def get_stellar_velfield(filename):
print filename
all_vel = pyfits.getdata(filename, extname='VEL', header=False)
all_vel_err = pyfits.getdata(filename, extname='VEL_ERR', header=False)
#good = np.where(all_vel_err) < 100
#vel = all_vel[good]
#vel_err = all_vel_err[good]
print all_vel
all_vel = np.ma.masked_invalid(all_vel)
all_vel_err = np.ma.masked_invalid(all_vel_err)
mask = np.where(all_vel_err < 300)
print mask
#getting indices, i.e. y and x:
ind = np.column_stack(mask) - 25
x, y = np.asarray(zip(*ind))
vel_err = all_vel_err.filled()[mask]
vel = all_vel.filled()[mask]
#print 'HI', HI_linewidth
return x, y, vel, vel_err
def get_gas_velfield(filename):
print filename
all_vel = pyfits.getdata(filename, extname='V', header=False)[1]
all_vel_err = pyfits.getdata(filename, extname='V_ERR', header=False)[1]
#good = np.where(all_vel_err) < 100
#vel = all_vel[good]
#vel_err = all_vel_err[good]
all_vel = np.ma.masked_invalid(all_vel)
all_vel_err = np.ma.masked_invalid(all_vel_err)
mask = np.where(all_vel_err < 20)
#getting indices, i.e. y and x:
ind = np.column_stack(mask) - 25
x, y = np.asarray(zip(*ind))
vel_err = all_vel_err.filled()[mask]
vel = all_vel.filled()[mask]
#print 'HI', HI_linewidth
return x, y, vel, vel_err
def angular2physical(arcsec, z): #return physical effective diameter of the galaxy in kpc
return (np.radians(arcsec/3600) *Distance(z=z).kpc / (1 + z)**2)
def sqlify(arr):
strings = ''
for i in arr:
if type(i) == type(tuple()):
i = i[0]
strings = strings+","+'"'+strip(str(i))+'"'
strings = '('+strings[1:]+')'
return strings
def convert_pc_to_meters(pc):
return pc*3.0857*10e16
def decodeU(query_output):
output = []
for u in query_output:
u = str(u)
output.append(u)
return output
def get_ALFALFA_data():
ra = db.dbUtils.getFromDB('ra', 'db/SAMI.sqlite', 'SAMI_Master ')
dec = db.dbUtils.getFromDB('dec', 'db/SAMI.sqlite', 'SAMI_Master ')
SAMI_all_ids = db.dbUtils.getFromDB('sami_id', 'db/SAMI.sqlite', 'SAMI_Master ')
raopt = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA')
decopt = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA')
#print np.round(alfalfa_ra*15, 1), np.round(alfalfa_dec, 1)
ALFALFA_ids = []
sami_ids = []
for sami_ra, sami_dec, sami_id in zip(ra, dec, SAMI_all_ids):
obj = (db.dbUtils.getFromDB('Object', 'db/SAMI.sqlite', 'ALFALFA ', ' where round(15*raopt, 1) = '+ str(np.round(sami_ra, 1))+' and round(decopt, 1) = '+str(np.round(sami_dec, 1))))
if len(obj) == 1:
obj = decodeU(obj)[0]
ALFALFA_ids.append(obj)
sami_ids.append(sami_id)
W50 = db.dbUtils.getFromDB('W50', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
W50_err = db.dbUtils.getFromDB('Werr', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
int_flux = db.dbUtils.getFromDB('sintmap', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
ra = db.dbUtils.getFromDB('raopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
dec = db.dbUtils.getFromDB('decopt', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
SN = db.dbUtils.getFromDB('SN', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
rms = db.dbUtils.getFromDB('rms', 'db/SAMI.sqlite', 'ALFALFA ', ' where Object in'+sqlify(ALFALFA_ids))
f = open('db/ALFALFA_Xmatch.csv', 'a')
for i, s in enumerate(W50): #not galaxies have flux measurements
print i, s
f.write(str(sami_ids[i])+", "+ str(ALFALFA_ids[i])+", "+str(ra[i])+", "+str(dec[i])+", "+str(W50[i])+", "+str(W50_err[i])+", "+str(int_flux[i])+", "+str(SN[i])+", "+str(rms[i])+"\n")
f.close()
|
astrolitterbox/SAMI
|
utils.py
|
Python
|
gpl-2.0
| 6,843
|
[
"Galaxy"
] |
5c66dbfcc19e2360f2a7255772de2308306a508717f56d51d1ed0eb50f6a9113
|
"""
Tests for geography support in PostGIS
"""
import os
from django.contrib.gis.db import models
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.measure import D
from django.db import NotSupportedError, connection
from django.db.models.functions import Cast
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from ..utils import FuncTestMixin
from .models import City, County, Zipcode
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
if not connection.ops.postgis:
self.skipTest('This is a PostGIS-specific test.')
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
with self.assertRaises(ValueError):
City.objects.filter(point__within=z.poly).count()
# `@` operator not available.
with self.assertRaises(ValueError):
City.objects.filter(point__contained=z.poly).count()
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
with self.assertRaises(ValueError):
City.objects.get(point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {
'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
class GeographyFunctionTests(FuncTestMixin, TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_cast_aggregate(self):
"""
Cast a geography to a geometry field for an aggregate function that
expects a geometry input.
"""
if not connection.features.supports_geography:
self.skipTest("This test needs geography support")
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
res = City.objects.filter(
name__in=('Houston', 'Dallas')
).aggregate(extent=models.Extent(Cast('point', models.PointField())))
for val, exp in zip(res['extent'], expected):
self.assertAlmostEqual(exp, val, 4)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if connection.ops.oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
elif connection.ops.spatialite:
# SpatiaLite returns non-zero distance for polygons and points
# covered by that polygon.
ref_dists = [326.61, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(
distance=Distance('poly', htown.point),
distance2=Distance(htown.point, 'poly'),
)
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
if connection.ops.postgis:
# PostGIS casts geography to geometry when distance2 is calculated.
ref_dists = [0, 4899.68, 8081.30, 9115.15]
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance2.m, ref, 2)
if not connection.ops.spatialite:
# Distance function combined with a lookup.
hzip = Zipcode.objects.get(code='77002')
self.assertEqual(qs.get(distance__lte=0), hzip)
@skipUnlessDBFeature("has_Area_function", "supports_area_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
# Round to the nearest thousand as possible values (depending on
# the database and geolib) include 5439084, 5439100, 5439101.
rounded_value = z.area.sq_m
rounded_value -= z.area.sq_m % 1000
self.assertEqual(rounded_value, 5439000)
@skipUnlessDBFeature("has_Area_function")
@skipIfDBFeature("supports_area_geodetic")
def test_geodetic_area_raises_if_not_supported(self):
with self.assertRaisesMessage(NotSupportedError, 'Area on geodetic coordinate systems not supported.'):
Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
|
elena/django
|
tests/gis_tests/geogapp/tests.py
|
Python
|
bsd-3-clause
| 6,929
|
[
"VisIt"
] |
610aa00bd2d077bd79a42131d6902e9cea13c2198b4691d6814eb3057fbbb795
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1 import (
BinauthzManagementServiceV1Beta1AsyncClient,
)
from google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1 import (
BinauthzManagementServiceV1Beta1Client,
)
from google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1 import (
pagers,
)
from google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1 import (
transports,
)
from google.cloud.binaryauthorization_v1beta1.types import resources
from google.cloud.binaryauthorization_v1beta1.types import service
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(None) is None
)
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(
api_mtls_endpoint
)
== api_mtls_endpoint
)
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(
sandbox_endpoint
)
== sandbox_mtls_endpoint
)
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
BinauthzManagementServiceV1Beta1Client._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[
BinauthzManagementServiceV1Beta1Client,
BinauthzManagementServiceV1Beta1AsyncClient,
],
)
def test_binauthz_management_service_v1_beta1_client_from_service_account_info(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "binaryauthorization.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.BinauthzManagementServiceV1Beta1GrpcTransport, "grpc"),
(
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_binauthz_management_service_v1_beta1_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[
BinauthzManagementServiceV1Beta1Client,
BinauthzManagementServiceV1Beta1AsyncClient,
],
)
def test_binauthz_management_service_v1_beta1_client_from_service_account_file(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "binaryauthorization.googleapis.com:443"
def test_binauthz_management_service_v1_beta1_client_get_transport_class():
transport = BinauthzManagementServiceV1Beta1Client.get_transport_class()
available_transports = [
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
]
assert transport in available_transports
transport = BinauthzManagementServiceV1Beta1Client.get_transport_class("grpc")
assert transport == transports.BinauthzManagementServiceV1Beta1GrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1Client),
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1AsyncClient),
)
def test_binauthz_management_service_v1_beta1_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
BinauthzManagementServiceV1Beta1Client, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
BinauthzManagementServiceV1Beta1Client, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
"true",
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
"false",
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1Client),
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1AsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_binauthz_management_service_v1_beta1_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[
BinauthzManagementServiceV1Beta1Client,
BinauthzManagementServiceV1Beta1AsyncClient,
],
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1Client,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1Client),
)
@mock.patch.object(
BinauthzManagementServiceV1Beta1AsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BinauthzManagementServiceV1Beta1AsyncClient),
)
def test_binauthz_management_service_v1_beta1_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_binauthz_management_service_v1_beta1_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
grpc_helpers,
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_binauthz_management_service_v1_beta1_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_binauthz_management_service_v1_beta1_client_client_options_from_dict():
with mock.patch(
"google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1.transports.BinauthzManagementServiceV1Beta1GrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = BinauthzManagementServiceV1Beta1Client(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
"grpc",
grpc_helpers,
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_binauthz_management_service_v1_beta1_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"binaryauthorization.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="binaryauthorization.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [service.GetPolicyRequest, dict,])
def test_get_policy(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy(
name="name_value",
description="description_value",
global_policy_evaluation_mode=resources.Policy.GlobalPolicyEvaluationMode.ENABLE,
)
response = client.get_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Policy)
assert response.name == "name_value"
assert response.description == "description_value"
assert (
response.global_policy_evaluation_mode
== resources.Policy.GlobalPolicyEvaluationMode.ENABLE
)
def test_get_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
client.get_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetPolicyRequest()
@pytest.mark.asyncio
async def test_get_policy_async(
transport: str = "grpc_asyncio", request_type=service.GetPolicyRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Policy(
name="name_value",
description="description_value",
global_policy_evaluation_mode=resources.Policy.GlobalPolicyEvaluationMode.ENABLE,
)
)
response = await client.get_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Policy)
assert response.name == "name_value"
assert response.description == "description_value"
assert (
response.global_policy_evaluation_mode
== resources.Policy.GlobalPolicyEvaluationMode.ENABLE
)
@pytest.mark.asyncio
async def test_get_policy_async_from_dict():
await test_get_policy_async(request_type=dict)
def test_get_policy_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
call.return_value = resources.Policy()
client.get_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_policy_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Policy())
await client.get_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_policy_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_policy_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_policy(
service.GetPolicyRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_policy_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_policy_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_policy(
service.GetPolicyRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.UpdatePolicyRequest, dict,])
def test_update_policy(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy(
name="name_value",
description="description_value",
global_policy_evaluation_mode=resources.Policy.GlobalPolicyEvaluationMode.ENABLE,
)
response = client.update_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdatePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Policy)
assert response.name == "name_value"
assert response.description == "description_value"
assert (
response.global_policy_evaluation_mode
== resources.Policy.GlobalPolicyEvaluationMode.ENABLE
)
def test_update_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
client.update_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdatePolicyRequest()
@pytest.mark.asyncio
async def test_update_policy_async(
transport: str = "grpc_asyncio", request_type=service.UpdatePolicyRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Policy(
name="name_value",
description="description_value",
global_policy_evaluation_mode=resources.Policy.GlobalPolicyEvaluationMode.ENABLE,
)
)
response = await client.update_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdatePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Policy)
assert response.name == "name_value"
assert response.description == "description_value"
assert (
response.global_policy_evaluation_mode
== resources.Policy.GlobalPolicyEvaluationMode.ENABLE
)
@pytest.mark.asyncio
async def test_update_policy_async_from_dict():
await test_update_policy_async(request_type=dict)
def test_update_policy_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdatePolicyRequest()
request.policy.name = "policy.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
call.return_value = resources.Policy()
client.update_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_policy_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdatePolicyRequest()
request.policy.name = "policy.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Policy())
await client.update_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"]
def test_update_policy_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_policy(policy=resources.Policy(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].policy
mock_val = resources.Policy(name="name_value")
assert arg == mock_val
def test_update_policy_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_policy(
service.UpdatePolicyRequest(), policy=resources.Policy(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_policy_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_policy(
policy=resources.Policy(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].policy
mock_val = resources.Policy(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_policy_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_policy(
service.UpdatePolicyRequest(), policy=resources.Policy(name="name_value"),
)
@pytest.mark.parametrize("request_type", [service.CreateAttestorRequest, dict,])
def test_create_attestor(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor(
name="name_value",
description="description_value",
user_owned_drydock_note=resources.UserOwnedDrydockNote(
note_reference="note_reference_value"
),
)
response = client.create_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
def test_create_attestor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
client.create_attestor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateAttestorRequest()
@pytest.mark.asyncio
async def test_create_attestor_async(
transport: str = "grpc_asyncio", request_type=service.CreateAttestorRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Attestor(name="name_value", description="description_value",)
)
response = await client.create_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.CreateAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_attestor_async_from_dict():
await test_create_attestor_async(request_type=dict)
def test_create_attestor_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateAttestorRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
call.return_value = resources.Attestor()
client.create_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_attestor_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.CreateAttestorRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
await client.create_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_attestor_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_attestor(
parent="parent_value",
attestor_id="attestor_id_value",
attestor=resources.Attestor(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].attestor_id
mock_val = "attestor_id_value"
assert arg == mock_val
arg = args[0].attestor
mock_val = resources.Attestor(name="name_value")
assert arg == mock_val
def test_create_attestor_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_attestor(
service.CreateAttestorRequest(),
parent="parent_value",
attestor_id="attestor_id_value",
attestor=resources.Attestor(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_attestor_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_attestor(
parent="parent_value",
attestor_id="attestor_id_value",
attestor=resources.Attestor(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].attestor_id
mock_val = "attestor_id_value"
assert arg == mock_val
arg = args[0].attestor
mock_val = resources.Attestor(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_attestor_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_attestor(
service.CreateAttestorRequest(),
parent="parent_value",
attestor_id="attestor_id_value",
attestor=resources.Attestor(name="name_value"),
)
@pytest.mark.parametrize("request_type", [service.GetAttestorRequest, dict,])
def test_get_attestor(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor(
name="name_value",
description="description_value",
user_owned_drydock_note=resources.UserOwnedDrydockNote(
note_reference="note_reference_value"
),
)
response = client.get_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
def test_get_attestor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
client.get_attestor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetAttestorRequest()
@pytest.mark.asyncio
async def test_get_attestor_async(
transport: str = "grpc_asyncio", request_type=service.GetAttestorRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Attestor(name="name_value", description="description_value",)
)
response = await client.get_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.GetAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_attestor_async_from_dict():
await test_get_attestor_async(request_type=dict)
def test_get_attestor_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAttestorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
call.return_value = resources.Attestor()
client.get_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_attestor_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.GetAttestorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
await client.get_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_attestor_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_attestor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_attestor_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_attestor(
service.GetAttestorRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_attestor_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_attestor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_attestor_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_attestor(
service.GetAttestorRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.UpdateAttestorRequest, dict,])
def test_update_attestor(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor(
name="name_value",
description="description_value",
user_owned_drydock_note=resources.UserOwnedDrydockNote(
note_reference="note_reference_value"
),
)
response = client.update_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
def test_update_attestor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
client.update_attestor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateAttestorRequest()
@pytest.mark.asyncio
async def test_update_attestor_async(
transport: str = "grpc_asyncio", request_type=service.UpdateAttestorRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Attestor(name="name_value", description="description_value",)
)
response = await client.update_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.UpdateAttestorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Attestor)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_attestor_async_from_dict():
await test_update_attestor_async(request_type=dict)
def test_update_attestor_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateAttestorRequest()
request.attestor.name = "attestor.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
call.return_value = resources.Attestor()
client.update_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "attestor.name=attestor.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_attestor_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateAttestorRequest()
request.attestor.name = "attestor.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
await client.update_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "attestor.name=attestor.name/value",) in kw[
"metadata"
]
def test_update_attestor_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_attestor(attestor=resources.Attestor(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].attestor
mock_val = resources.Attestor(name="name_value")
assert arg == mock_val
def test_update_attestor_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_attestor(
service.UpdateAttestorRequest(),
attestor=resources.Attestor(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_attestor_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Attestor()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Attestor())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_attestor(
attestor=resources.Attestor(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].attestor
mock_val = resources.Attestor(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_attestor_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_attestor(
service.UpdateAttestorRequest(),
attestor=resources.Attestor(name="name_value"),
)
@pytest.mark.parametrize("request_type", [service.ListAttestorsRequest, dict,])
def test_list_attestors(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListAttestorsResponse(
next_page_token="next_page_token_value",
)
response = client.list_attestors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListAttestorsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAttestorsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_attestors_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
client.list_attestors()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListAttestorsRequest()
@pytest.mark.asyncio
async def test_list_attestors_async(
transport: str = "grpc_asyncio", request_type=service.ListAttestorsRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListAttestorsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_attestors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListAttestorsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAttestorsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_attestors_async_from_dict():
await test_list_attestors_async(request_type=dict)
def test_list_attestors_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListAttestorsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
call.return_value = service.ListAttestorsResponse()
client.list_attestors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_attestors_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.ListAttestorsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListAttestorsResponse()
)
await client.list_attestors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_attestors_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListAttestorsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_attestors(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_attestors_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_attestors(
service.ListAttestorsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_attestors_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListAttestorsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListAttestorsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_attestors(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_attestors_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_attestors(
service.ListAttestorsRequest(), parent="parent_value",
)
def test_list_attestors_pager(transport_name: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListAttestorsResponse(
attestors=[
resources.Attestor(),
resources.Attestor(),
resources.Attestor(),
],
next_page_token="abc",
),
service.ListAttestorsResponse(attestors=[], next_page_token="def",),
service.ListAttestorsResponse(
attestors=[resources.Attestor(),], next_page_token="ghi",
),
service.ListAttestorsResponse(
attestors=[resources.Attestor(), resources.Attestor(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_attestors(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.Attestor) for i in results)
def test_list_attestors_pages(transport_name: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_attestors), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListAttestorsResponse(
attestors=[
resources.Attestor(),
resources.Attestor(),
resources.Attestor(),
],
next_page_token="abc",
),
service.ListAttestorsResponse(attestors=[], next_page_token="def",),
service.ListAttestorsResponse(
attestors=[resources.Attestor(),], next_page_token="ghi",
),
service.ListAttestorsResponse(
attestors=[resources.Attestor(), resources.Attestor(),],
),
RuntimeError,
)
pages = list(client.list_attestors(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_attestors_async_pager():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_attestors), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListAttestorsResponse(
attestors=[
resources.Attestor(),
resources.Attestor(),
resources.Attestor(),
],
next_page_token="abc",
),
service.ListAttestorsResponse(attestors=[], next_page_token="def",),
service.ListAttestorsResponse(
attestors=[resources.Attestor(),], next_page_token="ghi",
),
service.ListAttestorsResponse(
attestors=[resources.Attestor(), resources.Attestor(),],
),
RuntimeError,
)
async_pager = await client.list_attestors(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.Attestor) for i in responses)
@pytest.mark.asyncio
async def test_list_attestors_async_pages():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_attestors), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListAttestorsResponse(
attestors=[
resources.Attestor(),
resources.Attestor(),
resources.Attestor(),
],
next_page_token="abc",
),
service.ListAttestorsResponse(attestors=[], next_page_token="def",),
service.ListAttestorsResponse(
attestors=[resources.Attestor(),], next_page_token="ghi",
),
service.ListAttestorsResponse(
attestors=[resources.Attestor(), resources.Attestor(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_attestors(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [service.DeleteAttestorRequest, dict,])
def test_delete_attestor(request_type, transport: str = "grpc"):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteAttestorRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_attestor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
client.delete_attestor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteAttestorRequest()
@pytest.mark.asyncio
async def test_delete_attestor_async(
transport: str = "grpc_asyncio", request_type=service.DeleteAttestorRequest
):
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteAttestorRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_attestor_async_from_dict():
await test_delete_attestor_async(request_type=dict)
def test_delete_attestor_field_headers():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteAttestorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
call.return_value = None
client.delete_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_attestor_field_headers_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteAttestorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_attestor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_attestor_flattened():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_attestor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_attestor_flattened_error():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_attestor(
service.DeleteAttestorRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_attestor_flattened_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_attestor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_attestor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_attestor_flattened_error_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_attestor(
service.DeleteAttestorRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BinauthzManagementServiceV1Beta1Client(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = BinauthzManagementServiceV1Beta1Client(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = BinauthzManagementServiceV1Beta1Client(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BinauthzManagementServiceV1Beta1Client(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = BinauthzManagementServiceV1Beta1Client(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.BinauthzManagementServiceV1Beta1GrpcTransport,
)
def test_binauthz_management_service_v1_beta1_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.BinauthzManagementServiceV1Beta1Transport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_binauthz_management_service_v1_beta1_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1.transports.BinauthzManagementServiceV1Beta1Transport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.BinauthzManagementServiceV1Beta1Transport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"get_policy",
"update_policy",
"create_attestor",
"get_attestor",
"update_attestor",
"list_attestors",
"delete_attestor",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_binauthz_management_service_v1_beta1_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1.transports.BinauthzManagementServiceV1Beta1Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BinauthzManagementServiceV1Beta1Transport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_binauthz_management_service_v1_beta1_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.binaryauthorization_v1beta1.services.binauthz_management_service_v1_beta1.transports.BinauthzManagementServiceV1Beta1Transport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BinauthzManagementServiceV1Beta1Transport()
adc.assert_called_once()
def test_binauthz_management_service_v1_beta1_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BinauthzManagementServiceV1Beta1Client()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
],
)
def test_binauthz_management_service_v1_beta1_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.BinauthzManagementServiceV1Beta1GrpcTransport, grpc_helpers),
(
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
grpc_helpers_async,
),
],
)
def test_binauthz_management_service_v1_beta1_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"binaryauthorization.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="binaryauthorization.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
],
)
def test_binauthz_management_service_v1_beta1_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_binauthz_management_service_v1_beta1_host_no_port():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="binaryauthorization.googleapis.com"
),
)
assert client.transport._host == "binaryauthorization.googleapis.com:443"
def test_binauthz_management_service_v1_beta1_host_with_port():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="binaryauthorization.googleapis.com:8000"
),
)
assert client.transport._host == "binaryauthorization.googleapis.com:8000"
def test_binauthz_management_service_v1_beta1_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BinauthzManagementServiceV1Beta1GrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_binauthz_management_service_v1_beta1_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
],
)
def test_binauthz_management_service_v1_beta1_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
],
)
def test_binauthz_management_service_v1_beta1_transport_channel_mtls_with_adc(
transport_class,
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_attestor_path():
project = "squid"
attestor = "clam"
expected = "projects/{project}/attestors/{attestor}".format(
project=project, attestor=attestor,
)
actual = BinauthzManagementServiceV1Beta1Client.attestor_path(project, attestor)
assert expected == actual
def test_parse_attestor_path():
expected = {
"project": "whelk",
"attestor": "octopus",
}
path = BinauthzManagementServiceV1Beta1Client.attestor_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_attestor_path(path)
assert expected == actual
def test_policy_path():
project = "oyster"
expected = "projects/{project}/policy".format(project=project,)
actual = BinauthzManagementServiceV1Beta1Client.policy_path(project)
assert expected == actual
def test_parse_policy_path():
expected = {
"project": "nudibranch",
}
path = BinauthzManagementServiceV1Beta1Client.policy_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_policy_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = BinauthzManagementServiceV1Beta1Client.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = BinauthzManagementServiceV1Beta1Client.common_billing_account_path(
**expected
)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_common_billing_account_path(
path
)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = BinauthzManagementServiceV1Beta1Client.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = BinauthzManagementServiceV1Beta1Client.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = BinauthzManagementServiceV1Beta1Client.common_organization_path(
organization
)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = BinauthzManagementServiceV1Beta1Client.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = BinauthzManagementServiceV1Beta1Client.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = BinauthzManagementServiceV1Beta1Client.common_project_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = BinauthzManagementServiceV1Beta1Client.common_location_path(
project, location
)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = BinauthzManagementServiceV1Beta1Client.common_location_path(**expected)
# Check that the path construction is reversible.
actual = BinauthzManagementServiceV1Beta1Client.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.BinauthzManagementServiceV1Beta1Transport, "_prep_wrapped_messages"
) as prep:
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.BinauthzManagementServiceV1Beta1Transport, "_prep_wrapped_messages"
) as prep:
transport_class = BinauthzManagementServiceV1Beta1Client.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = BinauthzManagementServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = BinauthzManagementServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
BinauthzManagementServiceV1Beta1Client,
transports.BinauthzManagementServiceV1Beta1GrpcTransport,
),
(
BinauthzManagementServiceV1Beta1AsyncClient,
transports.BinauthzManagementServiceV1Beta1GrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-binary-authorization
|
tests/unit/gapic/binaryauthorization_v1beta1/test_binauthz_management_service_v1_beta1.py
|
Python
|
apache-2.0
| 116,500
|
[
"Octopus"
] |
d52f974fc2ac997e28ccee1ca96b070a68d1bf49cafaada959f8fb1de15cdb3b
|
import sqlite3
import pysam
def initialize_database(db):
"""Set up the schema for SQLite3 handle *db*.
"""
db.execute("""
create table sample_group (
id integer primary key,
label text unique,
is_control boolean
)""")
db.execute("""
create table samples (
id integer primary key,
sample_group integer references sample_group(id),
filename text,
n_reads integer
)""")
db.execute("""
create table transcripts (
id integer primary key,
label text,
length integer
)
""")
db.execute("""
create table leftsites (
sample integer references samples(id),
transcript integer references transcripts(id),
position integer not null,
n integer not null default 0,
primary key (sample,transcript,position)
)
""")
db.execute("""
create table multiplicities (
id integer primary key,
sample integer references leftsites(sample),
n integer
)
""")
db.execute("""
create table multiplicity_entries (
id integer primary key,
transcript integer references leftsites(transcript),
position integer references leftsites(position),
multiplicity integer references multiplicities(id)
)
""")
db.execute("""
create table inferences (
id integer primary key,
group1 integer references sample_group(id),
group2 integer references sample_group(id),
unique (group1,group2),
check (group1 <= group2)
)
""")
db.execute("""
create table posterior_samples (
inference integer references inferences(id),
transcript integer references transcripts(id),
variable text not null,
sample integer not null,
value float not null,
primary key (inference,transcript,variable,sample)
)
""")
db.commit()
def insert_sample_group(db, label, is_control, group_id=None):
if group_id != None:
x = db.execute("""select id from sample_group where id=?""", (group_id,)).fetchall()
if x != None:
raise ValueError("Group %d already exists in database." % group_id)
elif group_id == None:
db.execute("""insert into sample_group (label,is_control)
values (?,?)""", (label, is_control))
else:
db.execute("""insert into sample_group (id,label,is_control)
values (?,?,?)""", (group_id, label, is_control))
(sample_group,) = db.execute("""select last_insert_rowid()""").fetchone()
return sample_group
def insert_sample(db, filename, sample_group):
db.execute("""insert into samples (filename,sample_group)
values (?,?)""", (filename, sample_group))
(sample,) = db.execute("""select last_insert_rowid()""").fetchone()
return sample
def insert_or_check_transcripts(db, sample, transcripts):
if db.execute("""select count(id)>0
from transcripts""").fetchone()[0] == 1:
# Another call to load_sam has already loaded the transcripts
# for this analysis. Just check that the transcripts in this
# file match those already loaded.
for i,h in enumerate(transcripts):
q = db.execute("""select label,length from transcripts
where id=?""", (i,)).fetchone()
if q == None:
raise ValueError(("Failed checking transcripts against " + \
"database: transcript in position " + \
"%d with label %s does not exist in " + \
"database.") % (i,h['SN']))
else:
(label,length) = q
if label != h['SN'] or length != (h['LN']-38):
raise ValueError(("Transcript at position %d of %s does " + \
"not match existing database. " + \
"Database had label %s with " + \
"length %d; file had label %s " + \
"with length %d.") % (i,filename,
h['SN'],h['LN'],
label,length+38))
else:
# The database has no transcripts. Insert them.
for i,h in enumerate(transcripts):
db.execute("""insert into transcripts(id,label,length)
values (?,?,?)""", (i,h['SN'],h['LN']-38))
for i,h in enumerate(transcripts):
for p in range(h['LN']-38+1): # Have to add 1 to get final leftsite
db.execute("""insert into leftsites(sample,transcript,position,n)
values (?,?,?,0)""", (sample,i,p))
def insert_reads_and_multiplicities(db, sample, samfile):
n_reads = 0
for readset in split_by_readname(samfile):
n_reads += 1
if len(readset) > 1:
targets = tuple([(r.rname,r.pos) for r in readset])
mid = (sample,targets).__hash__()
if db.execute("""select id from multiplicities where id=?""", (mid,)).fetchone() == None:
db.execute("""insert into multiplicities(id,sample,n)
values (?,?,1)""", (mid, sample))
for (t,p) in targets:
db.execute("""insert into multiplicity_entries
(transcript,position,multiplicity)
values (?,?,?)""", (t,p,mid))
else:
db.execute("""update multiplicities set n=n+1
where id=?""", (mid,))
for r in readset:
db.execute("""update leftsites set n=n+1 where
sample=? and transcript=? and position=?""",
(sample,r.rname,r.pos))
return n_reads
def load_sam(db, filename, sample_group):
s = pysam.Samfile(filename)
sample = insert_sample(db, filename, sample_group)
insert_or_check_transcripts(db, sample, s.header['SQ'])
n_reads = insert_reads_and_multiplicities(db, sample, s)
db.execute("""update samples set n_reads=? where id=?""",
(n_reads, sample))
db.commit()
s.close()
return sample
def split_by_readname(samfile):
"""Return an iterator over the reads in *samfile* grouped by read name.
The SAM file produced by bowtie is sorted by read name. Often we
want to work with all of the alignments of a particular read at
once. This function turns the flat list of reads into a list of
lists of reads, where each sublist has the same read name.
"""
last_read = None
for r in samfile:
if r.qname != last_read:
if last_read != None:
yield accum
accum = [r]
last_read = r.qname
else:
accum.append(r)
yield accum
|
madhadron/rnaseq
|
rnaseq/load.py
|
Python
|
gpl-3.0
| 7,679
|
[
"Bowtie",
"pysam"
] |
9a86b05f74f128555706b3d99ede8d20609cfa4648f71adb0061274eea3dfe7f
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
from glob import glob
from metacram import *
''' the actual pipeline '''
args = {}
out = args.get('out', 'out')
reads = glob('data/*')[0]
read_format = args.get('read_format', 'fastq')
# location of databases
db = {
'tc_seed': '~/cram/db/tc_seed.fasta',
'taxcollector': '~/cram/db/taxcollector.fa',
'subsystems2peg': '~/cram/db/subsystems2peg',
'subsystems2role': '~/cram/db/subsystems2role',
'seed_ss': '~/cram/db/seed_ss.txt',
}
# expand tilde to home directory
for k in db:
db[k] = os.path.expanduser(db[k])
# Creates a simple function to prepend the output directory
# to the directory/filename you specify
d = get_outdir(out)
# Define how to filter taxonomic matches
phylo = {
'phylum': { 'num': 2, 'sim': 0.80 },
'genus': { 'num': 8, 'sim': 0.95 },
}
ohai('running pipeline!')
## MAKE DIRECTORIES
[ run('mkdir -p %s' % i) for i in [
out,
d('orfs'),
d('anno'),
d('refs'),
d('tables'),
d('trimmed') ] ]
## TRIM READS
if not os.path.exists(d('trimmed/reads_trimmed.fasta')):
ohai('trimming sequences')
sequences = (r for r in Dna(open(reads), type='fastq'))
trimmed = (Trim.trim(r) for r in sequences)
# filter by minimum length (no need for this w/ Velvet?)
trimmed = (i for i in trimmed if len(i) > 70)
with open(d('trimmed/reads_trimmed.fasta'), 'w') as handle:
for t in trimmed:
print >> handle, t.fasta
else:
ohai('trimming sequences [skipping]')
## ASSEMBLE WITH VELVET
# 3 sub assemblies:
kmers = {
31: d('contigs_31'),
51: d('contigs_51'),
71: d('contigs_71')
}
[ velvet(
reads = [
('fasta', 'short', d('trimmed/reads_trimmed.fasta')),
],
outdir = kmers[k],
k = k
) for k in kmers ]
# run final assembly
velvet(
reads = [('fasta', 'long', d('contigs_%s/contigs.fa' % k)) for k in kmers],
outdir = d('contigs_final'),
k = 51
)
## PREDICT OPEN READING FRAMES
prodigal(
input = d('contigs_final/contigs.fa'),
out = d('orfs/predicted_orfs') # prefix*
)
## IDENTIFY ORFS WITH BLASTP
# misbehaving ! just assume someone ran it already.
# formatdb(database = db['tc_seed'])
blastp(
query = d("orfs/predicted_orfs.faa"),
database =db['tc_seed'],
out = d('anno/blast.txt' % os.path.basename(i)),
evalue = 0.00001,
threads = 24,
)
## GET ORF COVERAGE using CLC
# index reference database
ohai('indexing orfs')
smalt_index(
reference = d('orfs/predicted_orfs.fna'),
name = d('orfs/predicted_orfs')
)
# reference assemble
ohai('smalt mapping reads to orfs')
smalt_map(
query = reads,
reference = d('orfs/predicted_orfs'),
out = d('refs/reads_vs_orfs.cigar'),
identity = 0.80
)
ohai('coverage table: reads vs orfs')
# make coverage table
smalt_coverage_table(
assembly = d('refs/reads_vs_orfs.cigar'),
blast = d('anno/blast.txt'),
out = d('tables/orfs_coverage.txt'),
seed = '~/cram/db/tc_seed.fasta'
)
# concatenate assembly coverage tables
ohai('concatenating assembly coverage tables')
|
audy/cram
|
bin/simple.py
|
Python
|
bsd-3-clause
| 3,135
|
[
"BLAST"
] |
b3b8c1e977c9a6fbc29977eeb1020ba0a258350fa2651e79a7f792f70824c3fb
|
"""
Downloads feeds, keys, packages and icons.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, sys
from zeroinstall import support
from zeroinstall.support import tasks, basedir, portable_rename
from zeroinstall.injector.namespaces import XMLNS_IFACE, config_site
from zeroinstall.injector import model
from zeroinstall.injector.model import Recipe, SafeException, escape, DistributionSource
from zeroinstall.injector.iface_cache import PendingFeed, ReplayAttack
from zeroinstall.injector.handler import NoTrustedKeys
from zeroinstall.injector import download
def _escape_slashes(path):
"""@type path: str
@rtype: str"""
return path.replace('/', '%23')
def _get_feed_dir(feed):
"""The algorithm from 0mirror.
@type feed: str
@rtype: str"""
if '#' in feed:
raise SafeException(_("Invalid URL '%s'") % feed)
scheme, rest = feed.split('://', 1)
assert '/' in rest, "Missing / in %s" % feed
domain, rest = rest.split('/', 1)
for x in [scheme, domain, rest]:
if not x or x.startswith('.'):
raise SafeException(_("Invalid URL '%s'") % feed)
return '/'.join(['feeds', scheme, domain, _escape_slashes(rest)])
class KeyInfoFetcher(object):
"""Fetches information about a GPG key from a key-info server.
See L{Fetcher.fetch_key_info} for details.
@since: 0.42
Example:
>>> kf = KeyInfoFetcher(fetcher, 'https://server', fingerprint)
>>> while True:
print kf.info
if kf.blocker is None: break
print kf.status
yield kf.blocker
"""
def __init__(self, fetcher, server, fingerprint):
"""@type fetcher: L{Fetcher}
@type server: str
@type fingerprint: str"""
self.fingerprint = fingerprint
self.info = []
self.blocker = None
if server is None: return
self.status = _('Fetching key information from %s...') % server
dl = fetcher.download_url(server + '/key/' + fingerprint)
from xml.dom import minidom
@tasks.async
def fetch_key_info():
tempfile = dl.tempfile
try:
yield dl.downloaded
self.blocker = None
tasks.check(dl.downloaded)
tempfile.seek(0)
doc = minidom.parse(tempfile)
if doc.documentElement.localName != 'key-lookup':
raise SafeException(_('Expected <key-lookup>, not <%s>') % doc.documentElement.localName)
self.info += doc.documentElement.childNodes
except Exception as ex:
doc = minidom.parseString('<item vote="bad"/>')
root = doc.documentElement
root.appendChild(doc.createTextNode(_('Error getting key information: %s') % ex))
self.info.append(root)
finally:
tempfile.close()
self.blocker = fetch_key_info()
class Fetcher(object):
"""Downloads and stores various things.
@ivar config: used to get handler, iface_cache and stores
@type config: L{config.Config}
@ivar key_info: caches information about GPG keys
@type key_info: {str: L{KeyInfoFetcher}}
"""
__slots__ = ['config', 'key_info', '_scheduler', 'external_store']
def __init__(self, config):
"""@type config: L{zeroinstall.injector.config.Config}"""
assert config.handler, "API change!"
self.config = config
self.key_info = {}
self._scheduler = None
self.external_store = os.environ.get('ZEROINSTALL_EXTERNAL_STORE')
@property
def handler(self):
return self.config.handler
@property
def scheduler(self):
if self._scheduler is None:
from . import scheduler
self._scheduler = scheduler.DownloadScheduler()
return self._scheduler
# (force is deprecated and ignored)
@tasks.async
def cook(self, required_digest, recipe, stores, force = False, impl_hint = None, dry_run = False, may_use_mirror = True):
"""Follow a Recipe.
@type required_digest: str
@type recipe: L{Recipe}
@type stores: L{zeroinstall.zerostore.Stores}
@type force: bool
@param impl_hint: the Implementation this is for (if any) as a hint for the GUI
@type dry_run: bool
@type may_use_mirror: bool
@see: L{download_impl} uses this method when appropriate"""
# Maybe we're taking this metaphor too far?
# Start a download for each ingredient
blockers = []
steps = []
try:
for stepdata in recipe.steps:
cls = StepRunner.class_for(stepdata)
step = cls(stepdata, impl_hint = impl_hint, may_use_mirror = may_use_mirror)
step.prepare(self, blockers)
steps.append(step)
while blockers:
yield blockers
tasks.check(blockers)
blockers = [b for b in blockers if not b.happened]
if self.external_store:
# Note: external_store will not yet work with non-<archive> steps.
streams = [step.stream for step in steps]
self._add_to_external_store(required_digest, recipe.steps, streams)
else:
# Create an empty directory for the new implementation
store = stores.stores[0]
tmpdir = store.get_tmp_dir_for(required_digest)
try:
# Unpack each of the downloaded archives into it in turn
for step in steps:
step.apply(tmpdir)
# Check that the result is correct and store it in the cache
store.check_manifest_and_rename(required_digest, tmpdir, dry_run=dry_run)
tmpdir = None
finally:
# If unpacking fails, remove the temporary directory
if tmpdir is not None:
support.ro_rmtree(tmpdir)
finally:
for step in steps:
try:
step.close()
except IOError as ex:
# Can get "close() called during
# concurrent operation on the same file
# object." if we're unlucky (Python problem).
logger.info("Failed to close: %s", ex)
def _get_mirror_url(self, feed_url, resource):
"""Return the URL of a mirror for this feed.
@type feed_url: str
@type resource: str
@rtype: str"""
if self.config.mirror is None:
return None
if feed_url.startswith('http://') or feed_url.startswith('https://'):
if support.urlparse(feed_url).hostname == 'localhost':
return None
return '%s/%s/%s' % (self.config.mirror, _get_feed_dir(feed_url), resource)
return None
def get_feed_mirror(self, url):
"""Return the URL of a mirror for this feed.
@type url: str
@rtype: str"""
return self._get_mirror_url(url, 'latest.xml')
def _get_archive_mirror(self, source):
"""@type source: L{model.DownloadSource}
@rtype: str"""
if self.config.mirror is None:
return None
if support.urlparse(source.url).hostname == 'localhost':
return None
if sys.version_info[0] > 2:
from urllib.parse import quote
else:
from urllib import quote
return '{mirror}/archive/{archive}'.format(
mirror = self.config.mirror,
archive = quote(source.url.replace('/', '#'), safe = ''))
def _get_impl_mirror(self, impl):
"""@type impl: L{zeroinstall.injector.model.ZeroInstallImplementation}
@rtype: str"""
return self._get_mirror_url(impl.feed.url, 'impl/' + _escape_slashes(impl.id))
@tasks.async
def get_packagekit_feed(self, feed_url):
"""Send a query to PackageKit (if available) for information about this package.
On success, the result is added to iface_cache.
@type feed_url: str"""
assert feed_url.startswith('distribution:'), feed_url
master_feed = self.config.iface_cache.get_feed(feed_url.split(':', 1)[1])
if master_feed:
fetch = self.config.iface_cache.distro.fetch_candidates(master_feed)
if fetch:
yield fetch
tasks.check(fetch)
# Force feed to be regenerated with the new information
self.config.iface_cache.get_feed(feed_url, force = True)
def download_and_import_feed(self, feed_url, iface_cache = None):
"""Download the feed, download any required keys, confirm trust if needed and import.
@param feed_url: the feed to be downloaded
@type feed_url: str
@param iface_cache: (deprecated)
@type iface_cache: L{zeroinstall.injector.iface_cache.IfaceCache} | None
@rtype: L{zeroinstall.support.tasks.Blocker}"""
from .download import DownloadAborted
assert iface_cache is None or iface_cache is self.config.iface_cache
if not self.config.handler.dry_run:
try:
self.config.iface_cache.mark_as_checking(feed_url)
except OSError as ex:
retval = tasks.Blocker("mark_as_checking")
retval.trigger(exception = (ex, None))
return retval
logger.debug(_("download_and_import_feed %(url)s"), {'url': feed_url})
assert not os.path.isabs(feed_url)
if feed_url.startswith('distribution:'):
return self.get_packagekit_feed(feed_url)
primary = self._download_and_import_feed(feed_url, use_mirror = False, timeout = 5)
@tasks.named_async("monitor feed downloads for " + feed_url)
def wait_for_downloads(primary):
# Download just the upstream feed, unless it takes too long...
timeout = primary.dl.timeout
yield primary, timeout
tasks.check(timeout)
try:
tasks.check(primary)
if primary.happened:
return # OK, primary succeeded!
# OK, maybe it's just being slow...
logger.info("Feed download from %s is taking a long time.", feed_url)
primary_ex = None
except NoTrustedKeys as ex:
raise # Don't bother trying the mirror if we have a trust problem
except ReplayAttack as ex:
raise # Don't bother trying the mirror if we have a replay attack
except DownloadAborted as ex:
raise # Don't bother trying the mirror if the user cancelled
except SafeException as ex:
# Primary failed
primary = None
primary_ex = ex
logger.warning(_("Feed download from %(url)s failed: %(exception)s"), {'url': feed_url, 'exception': ex})
# Start downloading from mirror...
mirror = self._download_and_import_feed(feed_url, use_mirror = True)
# Wait until both mirror and primary tasks are complete...
while True:
blockers = list(filter(None, [primary, mirror]))
if not blockers:
break
yield blockers
if primary:
try:
tasks.check(primary)
if primary.happened:
primary = None
# No point carrying on with the mirror once the primary has succeeded
if mirror:
logger.info(_("Primary feed download succeeded; aborting mirror download for %s") % feed_url)
mirror.dl.abort()
except SafeException as ex:
primary = None
primary_ex = ex
logger.info(_("Feed download from %(url)s failed; still trying mirror: %(exception)s"), {'url': feed_url, 'exception': ex})
if mirror:
try:
tasks.check(mirror)
if mirror.happened:
mirror = None
if primary_ex:
# We already warned; no need to raise an exception too,
# as the mirror download succeeded.
primary_ex = None
except ReplayAttack as ex:
logger.info(_("Version from mirror is older than cached version; ignoring it: %s"), ex)
mirror = None
primary_ex = None
except SafeException as ex:
logger.info(_("Mirror download failed: %s"), ex)
mirror = None
if primary_ex:
raise primary_ex
return wait_for_downloads(primary)
def _download_and_import_feed(self, feed_url, use_mirror, timeout = None):
"""Download and import a feed.
@type feed_url: str
@param use_mirror: False to use primary location; True to use mirror.
@type use_mirror: bool
@param timeout: create a blocker which triggers if a download hangs for this long
@type timeout: float | None
@rtype: L{zeroinstall.support.tasks.Blocker}"""
if use_mirror:
url = self.get_feed_mirror(feed_url)
if url is None: return None
logger.info(_("Trying mirror server for feed %s") % feed_url)
else:
url = feed_url
if self.config.handler.dry_run:
print(_("[dry-run] downloading feed {url}").format(url = url))
dl = self.download_url(url, hint = feed_url, timeout = timeout)
stream = dl.tempfile
@tasks.named_async("fetch_feed " + url)
def fetch_feed():
try:
yield dl.downloaded
tasks.check(dl.downloaded)
pending = PendingFeed(feed_url, stream)
if use_mirror:
# If we got the feed from a mirror, get the key from there too
key_mirror = self.config.mirror + '/keys/'
else:
key_mirror = None
keys_downloaded = tasks.Task(pending.download_keys(self, feed_hint = feed_url, key_mirror = key_mirror), _("download keys for %s") % feed_url)
yield keys_downloaded.finished
tasks.check(keys_downloaded.finished)
dry_run = self.handler.dry_run
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run):
blocker = self.config.trust_mgr.confirm_keys(pending)
if blocker:
yield blocker
tasks.check(blocker)
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run):
raise NoTrustedKeys(_("No signing keys trusted; not importing"))
finally:
stream.close()
task = fetch_feed()
task.dl = dl
return task
def fetch_key_info(self, fingerprint):
"""@type fingerprint: str
@rtype: L{KeyInfoFetcher}"""
try:
return self.key_info[fingerprint]
except KeyError:
if self.config.handler.dry_run:
print(_("[dry-run] asking {url} about key {key}").format(
url = self.config.key_info_server,
key = fingerprint))
self.key_info[fingerprint] = key_info = KeyInfoFetcher(self,
self.config.key_info_server, fingerprint)
return key_info
# (force is deprecated and ignored)
def download_impl(self, impl, retrieval_method, stores, force = False):
"""Download an implementation.
@param impl: the selected implementation
@type impl: L{model.ZeroInstallImplementation}
@param retrieval_method: a way of getting the implementation (e.g. an Archive or a Recipe)
@type retrieval_method: L{model.RetrievalMethod}
@param stores: where to store the downloaded implementation
@type stores: L{zerostore.Stores}
@type force: bool
@rtype: L{tasks.Blocker}"""
assert impl
assert retrieval_method
if isinstance(retrieval_method, DistributionSource):
return retrieval_method.install(self.handler)
from zeroinstall.zerostore import manifest, parse_algorithm_digest_pair
best = None
for digest in impl.digests:
alg_name, digest_value = parse_algorithm_digest_pair(digest)
alg = manifest.algorithms.get(alg_name, None)
if alg and (best is None or best.rating < alg.rating):
best = alg
required_digest = digest
if best is None:
if not impl.digests:
raise SafeException(_("No <manifest-digest> given for '%(implementation)s' version %(version)s") %
{'implementation': impl.feed.get_name(), 'version': impl.get_version()})
raise SafeException(_("Unknown digest algorithms '%(algorithms)s' for '%(implementation)s' version %(version)s") %
{'algorithms': impl.digests, 'implementation': impl.feed.get_name(), 'version': impl.get_version()})
@tasks.async
def download_impl(method):
original_exception = None
while True:
if not isinstance(method, Recipe):
# turn an individual method into a single-step Recipe
step = method
method = Recipe()
method.steps.append(step)
try:
blocker = self.cook(required_digest, method, stores,
impl_hint = impl,
dry_run = self.handler.dry_run,
may_use_mirror = original_exception is None)
yield blocker
tasks.check(blocker)
except download.DownloadError as ex:
if original_exception:
logger.info("Error from mirror: %s", ex)
raise original_exception
else:
original_exception = ex
mirror_url = self._get_impl_mirror(impl)
if mirror_url is not None:
logger.info("%s: trying implementation mirror at %s", ex, mirror_url)
method = model.DownloadSource(impl, mirror_url,
None, None, type = 'application/x-bzip-compressed-tar')
continue # Retry
raise
except SafeException as ex:
raise SafeException("Error fetching {url} {version}: {ex}".format(
url = impl.feed.url,
version = impl.get_version(),
ex = ex))
break
self.handler.impl_added_to_store(impl)
return download_impl(retrieval_method)
def _add_to_external_store(self, required_digest, steps, streams):
"""@type required_digest: str"""
from zeroinstall.zerostore.unpack import type_from_url
# combine archive path, extract directory and MIME type arguments in an alternating fashion
paths = map(lambda stream: stream.name, streams)
extracts = map(lambda step: step.extract or "", steps)
types = map(lambda step: step.type or type_from_url(step.url), steps)
args = [None]*(len(paths)+len(extracts)+len(types))
args[::3] = paths
args[1::3] = extracts
args[2::3] = types
# close file handles to allow external processes access
for stream in streams:
stream.close()
# delegate extracting archives to external tool
import subprocess
subprocess.call([self.external_store, "add", required_digest] + args)
# delete temp files
for path in paths:
os.remove(path)
# (force is deprecated and ignored)
def download_archive(self, download_source, force = False, impl_hint = None, may_use_mirror = False):
"""Fetch an archive. You should normally call L{download_impl}
instead, since it handles other kinds of retrieval method too.
It is the caller's responsibility to ensure that the returned stream is closed.
@type download_source: L{model.DownloadSource}
@type force: bool
@type may_use_mirror: bool
@rtype: (L{Blocker}, file)"""
from zeroinstall.zerostore import unpack
mime_type = download_source.type
if not mime_type:
mime_type = unpack.type_from_url(download_source.url)
if not mime_type:
raise SafeException(_("No 'type' attribute on archive, and I can't guess from the name (%s)") % download_source.url)
if not self.external_store:
unpack.check_type_ok(mime_type)
if may_use_mirror:
mirror = self._get_archive_mirror(download_source)
else:
mirror = None
if self.config.handler.dry_run:
print(_("[dry-run] downloading archive {url}").format(url = download_source.url))
dl = self.download_url(download_source.url, hint = impl_hint, mirror_url = mirror)
if download_source.size is not None:
dl.expected_size = download_source.size + (download_source.start_offset or 0)
# (else don't know sizes for mirrored archives)
return (dl.downloaded, dl.tempfile)
def download_file(self, download_source, impl_hint=None):
"""Fetch a single file. You should normally call L{download_impl}
instead, since it handles other kinds of retrieval method too.
It is the caller's responsibility to ensure that the returned stream is closed.
@type download_source: L{zeroinstall.injector.model.FileSource}
@type impl_hint: L{zeroinstall.injector.model.ZeroInstallImplementation} | None
@rtype: tuple"""
if self.config.handler.dry_run:
print(_("[dry-run] downloading file {url}").format(url = download_source.url))
dl = self.download_url(download_source.url, hint = impl_hint)
dl.expected_size = download_source.size
return (dl.downloaded, dl.tempfile)
# (force is deprecated and ignored)
def download_icon(self, interface, force = False):
"""Download an icon for this interface and add it to the
icon cache. If the interface has no icon do nothing.
@type interface: L{zeroinstall.injector.model.Interface}
@type force: bool
@return: the task doing the import, or None
@rtype: L{tasks.Task}"""
logger.debug("download_icon %(interface)s", {'interface': interface})
modification_time = None
existing_icon = self.config.iface_cache.get_icon_path(interface)
if existing_icon:
file_mtime = os.stat(existing_icon).st_mtime
from email.utils import formatdate
modification_time = formatdate(timeval = file_mtime, localtime = False, usegmt = True)
feed = self.config.iface_cache.get_feed(interface.uri)
if feed is None:
return None
# Find a suitable icon to download
for icon in feed.get_metadata(XMLNS_IFACE, 'icon'):
type = icon.getAttribute('type')
if type != 'image/png':
logger.debug(_('Skipping non-PNG icon'))
continue
source = icon.getAttribute('href')
if source:
break
logger.warning(_('Missing "href" attribute on <icon> in %s'), interface)
else:
logger.info(_('No PNG icons found in %s'), interface)
return
dl = self.download_url(source, hint = interface, modification_time = modification_time)
@tasks.async
def download_and_add_icon():
stream = dl.tempfile
try:
yield dl.downloaded
tasks.check(dl.downloaded)
if dl.unmodified: return
stream.seek(0)
import shutil, tempfile
icons_cache = basedir.save_cache_path(config_site, 'interface_icons')
tmp_file = tempfile.NamedTemporaryFile(dir = icons_cache, delete = False)
shutil.copyfileobj(stream, tmp_file)
tmp_file.close()
icon_file = os.path.join(icons_cache, escape(interface.uri))
portable_rename(tmp_file.name, icon_file)
finally:
stream.close()
return download_and_add_icon()
def download_impls(self, implementations, stores):
"""Download the given implementations, choosing a suitable retrieval method for each.
If any of the retrieval methods are DistributionSources and
need confirmation, handler.confirm is called to check that the
installation should proceed.
@type implementations: [L{zeroinstall.injector.model.ZeroInstallImplementation}]
@type stores: L{zeroinstall.zerostore.Stores}
@rtype: L{zeroinstall.support.tasks.Blocker}"""
unsafe_impls = []
to_download = []
for impl in implementations:
logger.debug(_("start_downloading_impls: for %(feed)s get %(implementation)s"), {'feed': impl.feed, 'implementation': impl})
source = self.get_best_source(impl)
if not source:
raise SafeException(_("Implementation %(implementation_id)s of interface %(interface)s"
" cannot be downloaded (no download locations given in "
"interface!)") % {'implementation_id': impl.id, 'interface': impl.feed.get_name()})
to_download.append((impl, source))
if isinstance(source, DistributionSource) and source.needs_confirmation:
unsafe_impls.append(source.package_id)
@tasks.async
def download_impls():
if unsafe_impls:
confirm = self.handler.confirm_install(_('The following components need to be installed using native packages. '
'These come from your distribution, and should therefore be trustworthy, but they also '
'run with extra privileges. In particular, installing them may run extra services on your '
'computer or affect other users. You may be asked to enter a password to confirm. The '
'packages are:\n\n') + ('\n'.join('- ' + x for x in unsafe_impls)))
yield confirm
tasks.check(confirm)
blockers = []
for impl, source in to_download:
blockers.append(self.download_impl(impl, source, stores))
# Record the first error log the rest
error = []
def dl_error(ex, tb = None):
if error:
self.handler.report_error(ex)
else:
error.append((ex, tb))
while blockers:
yield blockers
tasks.check(blockers, dl_error)
blockers = [b for b in blockers if not b.happened]
if error:
from zeroinstall import support
support.raise_with_traceback(*error[0])
if not to_download:
return None
return download_impls()
def get_best_source(self, impl):
"""Return the best download source for this implementation.
@type impl: L{zeroinstall.injector.model.ZeroInstallImplementation}
@rtype: L{model.RetrievalMethod}"""
if impl.download_sources:
return impl.download_sources[0]
return None
def download_url(self, url, hint = None, modification_time = None, expected_size = None, mirror_url = None, timeout = None):
"""The most low-level method here; just download a raw URL.
It is the caller's responsibility to ensure that dl.stream is closed.
@param url: the location to download from
@type url: str
@param hint: user-defined data to store on the Download (e.g. used by the GUI)
@param modification_time: don't download unless newer than this
@param mirror_url: an altertive URL to try if this one fails
@type mirror_url: str
@param timeout: create a blocker which triggers if a download hangs for this long
@type timeout: float | None
@rtype: L{download.Download}
@since: 1.5"""
if not (url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:')):
raise SafeException(_("Unknown scheme in download URL '%s'") % url)
dl = download.Download(url, hint = hint, modification_time = modification_time, expected_size = expected_size, auto_delete = not self.external_store)
dl.mirror = mirror_url
self.handler.monitor_download(dl)
if timeout is not None:
dl.timeout = tasks.Blocker('Download timeout')
dl.downloaded = self.scheduler.download(dl, timeout = timeout)
return dl
class StepRunner(object):
"""The base class of all step runners.
@since: 1.10"""
def __init__(self, stepdata, impl_hint, may_use_mirror = True):
"""@type stepdata: L{zeroinstall.injector.model.RetrievalMethod}
@type may_use_mirror: bool"""
self.stepdata = stepdata
self.impl_hint = impl_hint
self.may_use_mirror = may_use_mirror
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
pass
@classmethod
def class_for(cls, model):
"""@type model: L{zeroinstall.injector.model.RetrievalMethod}"""
for subcls in cls.__subclasses__():
if subcls.model_type == type(model):
return subcls
raise Exception(_("Unknown download type for '%s'") % model)
def close(self):
"""Release any resources (called on success or failure)."""
pass
class RenameStepRunner(StepRunner):
"""A step runner for the <rename> step.
@since: 1.10"""
model_type = model.RenameStep
def apply(self, basedir):
"""@type basedir: str"""
source = native_path_within_base(basedir, self.stepdata.source)
dest = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(os.path.dirname(dest))
os.rename(source, dest)
class RemoveStepRunner(StepRunner):
"""A step runner for the <remove> step."""
model_type = model.RemoveStep
def apply(self, basedir):
"""@type basedir: str"""
path = native_path_within_base(basedir, self.stepdata.path)
support.ro_rmtree(path)
class DownloadStepRunner(StepRunner):
"""A step runner for the <archive> step.
@since: 1.10"""
model_type = model.DownloadSource
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
self.blocker, self.stream = fetcher.download_archive(self.stepdata, impl_hint = self.impl_hint, may_use_mirror = self.may_use_mirror)
assert self.stream
blockers.append(self.blocker)
def apply(self, basedir):
"""@type basedir: str"""
from zeroinstall.zerostore import unpack
assert self.blocker.happened
if self.stepdata.dest is not None:
basedir = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(basedir)
unpack.unpack_archive_over(self.stepdata.url, self.stream, basedir,
extract = self.stepdata.extract,
type=self.stepdata.type,
start_offset = self.stepdata.start_offset or 0)
def close(self):
self.stream.close()
class FileStepRunner(StepRunner):
"""A step runner for the <file> step."""
model_type = model.FileSource
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
self.blocker, self.stream = fetcher.download_file(self.stepdata,
impl_hint = self.impl_hint)
assert self.stream
blockers.append(self.blocker)
def apply(self, basedir):
"""@type basedir: str"""
import shutil
assert self.blocker.happened
dest = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(os.path.dirname(dest))
with open(dest, 'wb') as output:
shutil.copyfileobj(self.stream, output)
os.utime(dest, (0, 0))
def close(self):
self.stream.close()
def native_path_within_base(base, crossplatform_path):
"""Takes a cross-platform relative path (i.e using forward slashes, even on windows)
and returns the absolute, platform-native version of the path.
If the path does not resolve to a location within `base`, a SafeError is raised.
@type base: str
@type crossplatform_path: str
@rtype: str
@since: 1.10"""
assert os.path.isabs(base)
if crossplatform_path.startswith("/"):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
native_path = os.path.join(*crossplatform_path.split("/"))
fullpath = os.path.realpath(os.path.join(base, native_path))
base = os.path.realpath(base)
if not fullpath.startswith(base + os.path.sep):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
return fullpath
def _ensure_dir_exists(dest):
"""@type dest: str"""
if not os.path.isdir(dest):
os.makedirs(dest)
|
slovenwd/0install
|
zeroinstall/injector/fetch.py
|
Python
|
lgpl-2.1
| 28,659
|
[
"VisIt"
] |
7ad4e15c116b677d0c8ed1e8cd0b175ae460fda480b249a8cdb56727e826a3f5
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'LoFi',
'author': 'Desmond Morris',
'author_email': 'hi@desmondmorris.com',
'version': '0.0.1',
'install_requires': ['Flask', 'Flask-MongoEngine', 'nose'],
'packages': ['lofi', 'flask_cors'],
'scripts': [],
'name': 'LoFi'
}
setup(**config)
|
DoSomething/lofi-schools-flask
|
setup.py
|
Python
|
mit
| 395
|
[
"Desmond"
] |
2cfb06944abcaffaae78d6d63f00cf85e8f0d5f89f856a302163cef33f7914f5
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import product
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
from espressomd import constraints
class FieldTest(ut.TestCase):
"""Tests for not space-dependent external fields.
"""
system = espressomd.System(box_l=[10, 10, 10], time_step=0.01)
system.cell_system.skin = 0.
def potential(self, x):
x0 = 5.0 * np.ones_like(x)
return 0.1 * np.sum(np.power((x - x0), 2))
def force(self, x):
x0 = 5.0 * np.ones_like(x)
return -0.2 * (x - x0)
def tearDown(self):
self.system.constraints.clear()
self.system.part.clear()
def test_gravity(self):
g_const = np.array([1, 2, 3])
gravity = constraints.Gravity(g=g_const)
np.testing.assert_almost_equal(g_const, np.copy(gravity.g))
self.system.constraints.add(gravity)
if espressomd.has_features("MASS"):
p = self.system.part.add(pos=[0, 0, 0], mass=3.1)
else:
p = self.system.part.add(pos=[0, 0, 0])
self.system.integrator.run(0)
np.testing.assert_almost_equal(g_const, np.copy(p.f) / p.mass)
self.assertAlmostEqual(self.system.analysis.energy()['total'], 0.)
@utx.skipIfMissingFeatures("ELECTROSTATICS")
def test_linear_electric_potential(self):
E = np.array([1., 2., 3.])
phi0 = 4.
electric_field = constraints.LinearElectricPotential(E=E, phi0=phi0)
np.testing.assert_almost_equal(E, electric_field.E)
self.assertEqual(phi0, electric_field.phi0)
self.system.constraints.add(electric_field)
p = self.system.part.add(pos=[0.5, 0.5, 0.5])
p.q = -3.1
self.system.integrator.run(0)
np.testing.assert_almost_equal(p.q * E, np.copy(p.f))
self.assertAlmostEqual(self.system.analysis.energy()['total'],
p.q * (- np.dot(E, p.pos) + phi0))
self.assertAlmostEqual(self.system.analysis.energy()['total'],
self.system.analysis.energy()['external_fields'])
@utx.skipIfMissingFeatures("ELECTROSTATICS")
def test_electric_plane_wave(self):
E0 = np.array([1., -2., 3.])
k = np.array([-.1, .2, 0.3])
omega = 5.
phi = 1.4
electric_wave = constraints.ElectricPlaneWave(
E0=E0, k=k, omega=omega, phi=phi)
np.testing.assert_almost_equal(E0, electric_wave.E0)
np.testing.assert_almost_equal(k, electric_wave.k)
np.testing.assert_almost_equal(omega, electric_wave.omega)
np.testing.assert_almost_equal(phi, electric_wave.phi)
self.system.constraints.add(electric_wave)
p = self.system.part.add(pos=[0.4, 0.1, 0.11], q=-14.)
self.system.time = 1042.
self.system.integrator.run(0)
np.testing.assert_almost_equal(
np.copy(p.f), p.q * E0 * np.sin(np.dot(k, p.pos_folded)
- omega * self.system.time + phi))
self.system.integrator.run(10)
np.testing.assert_almost_equal(
np.copy(p.f), p.q * E0 * np.sin(np.dot(k, p.pos_folded)
- omega * self.system.time + phi))
def test_homogeneous_flow_field(self):
u = np.array([1., 2., 3.])
gamma = 2.3
flow_field = constraints.HomogeneousFlowField(u=u, gamma=gamma)
np.testing.assert_almost_equal(u, np.copy(flow_field.u))
self.system.constraints.add(flow_field)
p = self.system.part.add(pos=[0.5, 0.5, 0.5], v=[3., 4., 5.])
self.system.integrator.run(0)
np.testing.assert_almost_equal(gamma * (u - p.v), np.copy(p.f))
self.assertAlmostEqual(self.system.analysis.energy()['total'],
self.system.analysis.energy()['kinetic'])
def test_potential_field(self):
h = np.array([.2, .2, .2])
box = np.array([10., 10., 10.])
scaling = 2.6
field_data = constraints.PotentialField.field_from_fn(
box, h, self.potential)
F = constraints.PotentialField(field=field_data, grid_spacing=h,
default_scale=scaling)
p = self.system.part.add(pos=[0, 0, 0])
self.system.constraints.add(F)
for i in product(*map(range, 3 * [10])):
x = (h * i)
f_val = F.call_method("_eval_field", x=x)
np.testing.assert_allclose(f_val, self.potential(x), rtol=1e-3)
p.pos = x
self.system.integrator.run(0)
self.assertAlmostEqual(self.system.analysis.energy()['total'],
scaling * f_val, places=5)
np.testing.assert_allclose(
np.copy(p.f), scaling * self.force(x), rtol=1e-5)
@utx.skipIfMissingFeatures("ELECTROSTATICS")
def test_electric_potential_field(self):
h = np.array([.2, .2, .2])
box = np.array([10., 10., 10.])
field_data = constraints.ElectricPotential.field_from_fn(
box, h, self.potential)
F = constraints.ElectricPotential(field=field_data, grid_spacing=h)
p = self.system.part.add(pos=[0, 0, 0])
p.q = -3.1
self.system.constraints.add(F)
for i in product(*map(range, 3 * [10])):
x = (h * i)
f_val = F.call_method("_eval_field", x=x)
np.testing.assert_allclose(f_val, self.potential(x), rtol=1e-3)
p.pos = x
self.system.integrator.run(0)
self.assertAlmostEqual(self.system.analysis.energy()['total'],
p.q * f_val, places=5)
np.testing.assert_allclose(
np.copy(p.f), p.q * self.force(x), rtol=1e-5)
def test_force_field(self):
h = np.array([.8, .8, .8])
box = np.array([10., 10., 10.])
scaling = 2.6
field_data = constraints.ForceField.field_from_fn(box, h, self.force)
F = constraints.ForceField(field=field_data, grid_spacing=h,
default_scale=scaling)
p = self.system.part.add(pos=[0, 0, 0])
self.system.constraints.add(F)
for i in product(*map(range, 3 * [10])):
x = (h * i)
f_val = np.array(F.call_method("_eval_field", x=x))
np.testing.assert_allclose(f_val, self.force(x))
p.pos = x
self.system.integrator.run(0)
np.testing.assert_allclose(scaling * f_val, np.copy(p.f))
def test_flow_field(self):
h = np.array([.8, .8, .8])
box = np.array([10., 10., 10.])
gamma = 2.6
field_data = constraints.FlowField.field_from_fn(box, h, self.force)
F = constraints.FlowField(
field=field_data,
grid_spacing=h,
gamma=gamma)
p = self.system.part.add(pos=[0, 0, 0], v=[1, 2, 3])
self.system.constraints.add(F)
for i in product(*map(range, 3 * [10])):
x = (h * i)
f_val = np.array(F.call_method("_eval_field", x=x))
np.testing.assert_allclose(f_val, self.force(x))
p.pos = x
self.system.integrator.run(0)
np.testing.assert_allclose(
-gamma * (p.v - f_val), np.copy(p.f), atol=1e-12)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/field_test.py
|
Python
|
gpl-3.0
| 8,115
|
[
"ESPResSo"
] |
dccb858a0dd5c83556c5053953b76e999a2669588ad28722dee9ef7c9bfb6ea6
|
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django import forms
from horizon import forms
from horizon import tables
from models import CrystalProject
from openstack_dashboard import api
from crystal_dashboard.api import projects as crystal_api
class EnableProject(tables.BatchAction):
"""
Enable a Project
"""
name = "enable_project"
success_url = "horizon:crystal:projects:index"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Enable Crystal",
u"Enable Crystal Projects",
count,
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Enabled Crystal",
u"Enabled Crystal Projects",
count,
)
def allowed(self, request, project):
return (project is None) or not project.crystal_enabled
def action(self, request, project_id):
crystal_api.enable_crystal(request, project_id)
class DisableProject(tables.DeleteAction):
name = "disable_project"
success_url = "horizon:crystal:projects:index"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Disable Crystal",
u"Disable Crystal Projects",
count,
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Disabled Crystal",
u"Disabled Crystal Projects",
count,
)
def allowed(self, request, project):
return (project is None) or project.crystal_enabled
def action(self, request, project_id):
crystal_api.disable_crystal(request, project_id)
class TenantFilterAction(tables.FilterAction):
if api.keystone.VERSIONS.active < 3:
filter_type = "query"
else:
filter_type = "server"
filter_choices = (('name', _("Project Name ="), True),
('id', _("Project ID ="), True),
('enabled', _("Enabled ="), True, _('e.g. Yes/No')),
('sds', _("Crystal Enabled ="), True, _('e.g. Yes/No')))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, project_id):
project_info = api.keystone.tenant_get(request, project_id, admin=True)
response = crystal_api.is_crystal_project(request, project_id)
if response.status_code == 200:
crystal_enabled = True
else:
crystal_enabled = False
project = CrystalProject(project_info.id, project_info.name,
project_info.description, project_info.domain_id,
project_info.enabled, crystal_enabled)
return project
class TenantsTable(tables.DataTable):
name = tables.WrappingColumn('name', verbose_name=_('Name'),
form_field=forms.CharField(max_length=64))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False))
id = tables.Column('id', verbose_name=_('Project ID'))
if api.keystone.VERSIONS.active >= 3:
domain_name = tables.Column(
'domain_name', verbose_name=_('Domain Name'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True,
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Enabled'),
required=False))
crystal_enabled = tables.Column('crystal_enabled',
verbose_name=_('Crystal Enabled'), status=True,
filters=(filters.yesno, filters.capfirst),
form_field=forms.BooleanField(
label=_('Crystal Enabled'),
required=False)
)
class Meta(object):
name = "tenants"
verbose_name = _("Projects")
status_columns = ['crystal_enabled', ]
row_actions = (EnableProject, DisableProject)
table_actions = (TenantFilterAction,)
pagination_param = "tenant_marker"
row_class = UpdateRow
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/projects/projects/tables.py
|
Python
|
gpl-3.0
| 4,629
|
[
"CRYSTAL"
] |
2213f588367b56cd9553a8adaef338553f0ee7c70a69f6ee5d417d2692310a48
|
#Joe Young
#Sourced from - https://github.com/mrGorman/Python/blob/master/Searching/search_algorithms.py
def linear_search(key, array, index):
""" Function to return index of key in array using
a recursive linear search
"""
if index == len(array):#Checks if the whole list has been checked. If no value found, -1 is returned
return -1
elif array[index] != key: # If the key does not equal the current index
return linear_search(key, array, index + 1) #Recursive - Retuens the same, with an increased index
else:
return index #Returns the position when the value is found
test_array = ["Aardvark", "Beaver", "Cat", "Dog", "Elephant", "Frog", "Giraffe", "Hyena",
"Iguana", "Jaguar", "Koala", "Lion", "Monkey", "Nyala", "Ostrich", "Parrot",
"Quail", "Rhino", "Snake", "Tiger", "Upupa", "Viper", "Worm", "Xenon", "Zebra"]
test_cases = ["Buffalo", "Snake", "Panther", "Cat", "Dog", "Kudu", "Wolf", "Jaguar"]
for case in test_cases:
position = linear_search(case, test_array, 0) + 1
if position > 0:
print(case, "found at position", position)
else:
print(case, "not found")
|
joeyoung658/A-Level_2016-18
|
Challenges/Sorting Algorithms/Linear.py
|
Python
|
gpl-3.0
| 1,187
|
[
"Jaguar"
] |
900478b8b46deb15ecbcb582cfd3c11bafb59626458a10e3f73765311c2da67e
|
"""Helpful utilities for building analysis pipelines.
"""
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
import ConfigParser
import collections
import fnmatch
import subprocess
import sys
import subprocess
import toolz as tz
import yaml
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
with open(fname, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = ConfigParser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
pass
def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext)
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f):
return gzip.open(f) if f.endswith(".gz") else open(f)
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def robust_partition_all(n, iterable):
"""
replaces partition_all with a more robust version.
Workaround for a segfault in pybedtools when using a BedTool as an iterator:
https://github.com/daler/pybedtools/issues/88 for the discussion
"""
it = iter(iterable)
while True:
x = []
for _ in range(n):
try:
x.append(it.next())
except StopIteration:
yield x
# Omitting this StopIteration results in a segfault!
raise StopIteration
yield x
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = itertools.ifilterfalse(pred, t1)
itrue = itertools.ifilter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.iteritems():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.iteritems():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el,
basestring):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, basestring)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program):
""" returns the path to an executable or None if it can't be found"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript")
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
from bcbio import install
return os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError, e:
return None
for line in output.split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "perl"))
if perl:
return perl
else:
return which("perl")
def get_perl_exports(tooldir=None):
"""Environmental exports to use conda install perl and site library.
"""
from bcbio import install
if tooldir is None:
tooldir = install.get_defaults().get("tooldir", "/usr/local")
perllib = "%s/lib/perl5" % tooldir
perl_path = os.path.dirname(perl_cmd())
return "export PATH=%s:$PATH && export PERL5LIB=%s:$PERL5LIB" % (perl_path, perllib)
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
http://www.in-ulm.de/~mascheck/various/argmax/
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_max = os.sysconf('SC_ARG_MAX')
env_lines = len(os.environ) * 4
env_chars = sum([len(x) + len(y) for x, y in os.environ.iteritems()])
arg_length = arg_max - env_lines - 2048
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
|
gifford-lab/bcbio-nextgen
|
bcbio/utils.py
|
Python
|
mit
| 21,875
|
[
"Galaxy"
] |
663d51ee751f679403352be2c7a4228ad7456e687a06b1b19a0a30e419703c2c
|
#!/usr/bin/env python
from brian import *
duration = 0.1*second
N_sims = 1
lif_eq = ['dV/dt = (V_rest-V)/tau_mem : volt']
V_rest = 0*mV
V_reset = 0*mV
V_th = 15*mV
t_refr = 2*ms
tau_mem = 10*msecond
N_in = 100
f_in = 20*Hz
DV_s = 1*mV
inp = PoissonGroup(N_in,f_in)
nrns = NeuronGroup(N_sims,lif_eq,threshold=V_th,reset=V_reset,\
refractory=t_refr)
con = Connection(inp,nrns,'V')
con[:,0] = DV_s
nrns.rest()
mem = StateMonitor(nrns, 'V', record=True)
st = SpikeMonitor(nrns)
inp_mon = SpikeMonitor(inp)
run(duration)
mem.insert_spikes(st, 17*mV)
print "Neuron(s) fired at: ", st.spiketimes
print "Input rate voltage:\t\t", N_in*f_in*DV_s*tau_mem
print "Mean membrane potential:\t", mean(mem[0])*volt
subplot(2,1,1)
raster_plot(inp_mon)
title('Input spikes')
subplot(2,1,2)
plot(mem.times/ms, mem[0]/mV, mem.times/ms, ones(len(mem.times))*V_th/mV)
title('Membrane')
xlabel("Time (ms)")
ylabel("Membrane potential (mV)")
show()
|
achilleas-k/brian-scripts
|
lif.py
|
Python
|
apache-2.0
| 943
|
[
"Brian",
"NEURON"
] |
eddae70b90e70111b32431df324e7308ca9b2623e59e05196bc7b1e1c7195c33
|
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import os
import sys
import operator
import numpy as np
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#import user query
def importQuery(name):
query = open(name).read().splitlines()
matrix = []
for q in query:
matrix.append(calcFingerprints(q))
matrix = np.array(matrix, dtype=np.uint8)
return matrix
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#get names of uniprots
def getUpName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
#import thresholds as specified by user
def importThresholds():
global thresholds
global metric
if metric == 'p':
m = 1
if metric == 'f':
m = 2
if metric == 'r':
m = 3
if metric == 'a':
m = 4
if metric == '0.5':
m = 5
t_file = open('thresholds.txt').read().splitlines()
for t in t_file:
t = t.split('\t')
thresholds[t[0]] = float(t[m])
return
def predict(input, name):
results = dict()
count=0
#for each model
for filename in glob.glob('models/*.pkl'):
hits = 0
count +=1
#unpickle model
with open(filename, 'rb') as fid:
bnb = cPickle.load(fid)
probs = bnb.predict_proba(input)
for prob in probs:
#if the probability of activity is above threshold then active
if prob[1] >= thresholds[filename[7:-4]]:
hits+=1
results[filename[7:-4]] = (float(hits)/float(len(input)))*100
#update precent finished
percent = (float(count)/float(t_count))*100
sys.stdout.write(' Performing Classification on '+name+' Molecules: %3d%%\r' % percent)
sys.stdout.flush()
print
return results
def calculateEnrichment(positives,background):
out = dict()
for uniprot, hits in positives.items():
if hits == 0:
out[uniprot] = 999.0
continue
try:
out[uniprot] = background[uniprot]/hits
except ZeroDivisionError:
out[uniprot] = 999.0
return out
#main
introMessage()
file_name = sys.argv[1]
file_name2 = sys.argv[2]
metric = sys.argv[3]
print ' Using Class Specific Cut-off Thresholds of : ' + metric
t_count = len(glob.glob('models/*.pkl'))
print ' Total Number of Classes : ' + str(t_count)
outf = open(file_name + '_vs_' + file_name2 + '_out_results_enriched.txt','w')
thresholds = dict()
importThresholds()
u_name = dict()
getUpName()
querymatrix = importQuery(file_name)
querymatrix2 = importQuery(file_name2)
print ' Total Number of Library Molecules : ' + str(len(querymatrix))
print ' Total Number of Background Molecules : ' + str(len(querymatrix2))
positives = predict(querymatrix, file_name)
background = predict(querymatrix2, file_name2)
enrichedTargets = calculateEnrichment(positives,background)
#write to file
outf.write('Uniprot\tName\tHits\tBG_Hits\tOdds_Ratio\n')
for uniprot, rate in sorted(enrichedTargets.items(), key=operator.itemgetter(1)):
if positives[uniprot] == 0: continue
outf.write(uniprot + '\t' + u_name[uniprot] + '\t' + str(round(positives[uniprot],2)) + '\t' + str(round(background[uniprot],2)) + '\t' + str(rate) + '\n')
outf.close()
|
lhm30/PIDGIN
|
predict_enriched_two_libraries.py
|
Python
|
mit
| 4,150
|
[
"RDKit"
] |
b1e83473004bf9de74fef3991d0383d49d5e5f36a6a49a2bc9a8c2f5fbaafb2e
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'module_name': '_name',
'no_log': 'no_log',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tempdir': 'tempdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, frozenset, KeysView)
except ImportError:
SEQUENCETYPE = (Sequence, frozenset)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
else:
sj_version = json.__version__.split('.')
if sj_version < ['1', '6']:
# Version 1.5 released 2007-01-18 does not have the encoding parameter which we need
print('\n{"msg": "Error: Ansible requires the stdlib json or simplejson >= 1.6. Neither was found!", "failed": true}')
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tempdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand shellisms
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
schlueter/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 114,268
|
[
"VisIt"
] |
02cc434a72206930df49e5c79056fb58d37bef9a8b3f0aa561cf40fa3876b65a
|
from __future__ import absolute_import, division, print_function
import numbers
import os
import re
import subprocess
import sys
import decimal
import warnings
from functools import partial
from operator import attrgetter
from itertools import chain
from collections import Iterator
from datetime import datetime, date, timedelta
from distutils.spawn import find_executable
import numpy as np
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from sqlalchemy.dialects import mssql, postgresql
from multipledispatch import MDNotImplementedError
import datashape
from datashape.dispatch import dispatch
from datashape.predicates import isdimension, isrecord, isscalar, isdatelike
from datashape import (
DataShape, Record, Option, var, dshape, Map, discover,
datetime_, date_, float64, int64, int_, string, bytes_, float32,
)
from toolz import (partition_all, keyfilter, valfilter, identity, concat,
curry, merge, memoize)
from toolz.curried import pluck, map
from ..compatibility import unicode, StringIO
from ..directory import Directory
from ..utils import (
keywords,
ignoring,
iter_except,
filter_kwargs,
literal_compile,
)
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = int, float, datetime, date, bool, str, decimal.Decimal, timedelta
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
revtypes = dict(map(reversed, types.items()))
# Subclass mssql.TIMESTAMP subclass for use when differentiating between
# mssql.TIMESTAMP and sa.TIMESTAMP.
# At the time of this writing, (mssql.TIMESTAMP == sa.TIMESTAMP) is True,
# which causes a collision when defining the revtypes mappings.
#
# See:
# https://bitbucket.org/zzzeek/sqlalchemy/issues/4092/type-problem-with-mssqltimestamp
class MSSQLTimestamp(mssql.TIMESTAMP):
pass
# Assign the custom subclass as the type to use instead of `mssql.TIMESTAMP`.
mssql.base.ischema_names['TIMESTAMP'] = MSSQLTimestamp
revtypes.update({
sa.DATETIME: datetime_,
sa.TIMESTAMP: datetime_,
sa.FLOAT: float64,
sa.DATE: date_,
sa.BIGINT: int64,
sa.INTEGER: int_,
sa.BIGINT: int64,
sa.types.NullType: string,
sa.REAL: float32,
sa.Float: float64,
mssql.BIT: datashape.bool_,
mssql.DATETIMEOFFSET: string,
mssql.MONEY: float64,
mssql.SMALLMONEY: float32,
mssql.UNIQUEIDENTIFIER: string,
# The SQL Server TIMESTAMP value doesn't correspond to the ISO Standard
# It is instead just a binary(8) value with no relation to dates or times
MSSQLTimestamp: bytes_,
})
# Types which can be specified on precision.
# These are checked before checking membership in revtypes, because:
# 1) An instance of a precision type does not equal another instancec with
# the same precision.
# (DOUBLE_PRECISION(precision=53) != DOUBLE_PRECISION(precision=53)
# 2) Precision types can be a instance of a type in revtypes.
# isinstance(sa.Float(precision=53), sa.Float)
precision_types = {
sa.Float,
postgresql.base.DOUBLE_PRECISION
}
def precision_to_dtype(precision):
"""
Maps a float or double precision attribute to the desired dtype.
The mappings are as follows:
[1, 24] -> float32
[25, 53] -> float64
Values outside of those ranges raise a ``ValueError``.
Parameter
---------
precision : int
A double or float precision. e.g. the value returned by
`postgresql.base.DOUBLE_PRECISION(precision=53).precision`
Returns
-------
dtype : datashape.dtype (float32|float64)
The dtype to use for columns of the specified precision.
"""
if isinstance(precision, numbers.Integral):
if 1 <= precision <= 24:
return float32
elif 25 <= precision <= 53:
return float64
raise ValueError("{} is not a supported precision".format(precision))
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def getbind(t, bind):
if bind is None:
return t.bind
if isinstance(bind, sa.engine.interfaces.Connectable):
return bind
return create_engine(bind)
def batch(sel, chunksize=10000, bind=None):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowiterator(sel, chunksize=chunksize):
with getbind(sel, bind).connect() as conn:
result = conn.execute(sel)
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
columns = [col.name for col in sel.columns]
iterator = rowiterator(sel)
return columns, concat(iterator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.Interval(day_precision=0, second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if type(typ) in precision_types and typ.precision is not None:
return precision_to_dtype(typ.precision)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return revtypes[type(typ)]
if isinstance(typ, sa.Numeric):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, 'U8')
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.ForeignKey, sa.sql.FromClause)
def discover_foreign_key_relationship(fk, parent, parent_measure=None):
if fk.column.table is not parent:
parent_measure = discover(fk.column.table).measure
return {fk.parent.name: Map(discover(fk.parent.type), parent_measure)}
@discover.register(sa.sql.elements.ColumnClause)
def discover_sqlalchemy_column(c):
meta = Option if getattr(c, 'nullable', True) else identity
return Record([(c.name, meta(discover(c.type)))])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
ordering = {str(c): i for i, c in enumerate(c for c in t.columns.keys())}
record = list(_process_columns(t.columns))
fkeys = [discover(fkey, t, parent_measure=Record(record))
for fkey in t.foreign_keys]
for name, column in merge(*fkeys).items():
index = ordering[name]
_, key_type = record[index]
# If the foreign-key is nullable the column (map) key
# should be an Option type
if isinstance(key_type, Option):
column.key = Option(column.key)
record[index] = (name, column)
return var * Record(record)
def _process_columns(columns):
"""Process the dshapes of the columns of a table.
Parameters
----------
columns : iterable[column]
The columns to process.
Yields
------
record_entry : tuple[str, dshape]
A record entry containing the name and type of each column.
"""
for col in columns:
(name, dtype), = discover(col).fields
yield str(name), dtype
@memoize
def metadata_of_engine(engine, schema=None):
return sa.MetaData(engine, schema=schema)
def create_engine(uri, connect_args=None, **kwargs):
"""Creates a cached sqlalchemy engine.
This differs from ``sa.create_engine``\s api by only accepting
``uri`` positionally.
If the ``uri`` is an in memory sqlite database then this will not memioize
the engine.
"""
return (
_create_engine_hashable_args
if uri == 'sqlite:///:memory:' else
_memoized_create_engine_hashable_args
)(uri, connect_args=frozenset((connect_args or {}).items()), **kwargs)
def _create_engine_hashable_args(uri, connect_args=None, **kwargs):
"""Unpacks non-hashable args for ``sa.create_engine`` and puts that back
into whatever structure is expected.
"""
return sa.create_engine(
uri,
connect_args=dict(connect_args or {}),
**kwargs
)
_memoized_create_engine_hashable_args = memoize(_create_engine_hashable_args)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = sa.MetaData(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
return discover(metadata_of_engine(engine))
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
warnings.warn(
"Can not discover type of table {name}.\n"
"SQLAlchemy provided this error message:\n\t{msg}"
"\nSkipping.".format(
name=name,
msg=e.message,
),
stacklevel=3,
)
except NotImplementedError as e:
warnings.warn(
"Odo does not understand a SQLAlchemy type.\n"
"Odo provided the following error:\n\t{msg}"
"\nSkipping.".format(msg="\n\t".join(e.args)),
stacklevel=3,
)
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def validate_foreign_keys(ds, foreign_keys):
# passed foreign_keys and column in dshape, but not a ForeignKey type
for field in foreign_keys:
if field not in ds.measure.names:
raise TypeError('Requested foreign key field %r is not a field in '
'datashape %s' % (field, ds))
for field, typ in ds.measure.fields:
if field in foreign_keys and not isinstance(getattr(typ, 'ty', typ),
Map):
raise TypeError('Foreign key %s passed in but not a Map '
'datashape, got %s' % (field, typ))
if isinstance(typ, Map) and field not in foreign_keys:
raise TypeError('Map type %s found on column %s, but %r '
"wasn't found in %s" %
(typ, field, field, foreign_keys))
def dshape_to_table(name, ds, metadata=None, foreign_keys=None,
primary_key=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if not isrecord(ds.measure):
raise TypeError('dshape measure must be a record type e.g., '
'"{a: int64, b: int64}". Input measure is %r' %
ds.measure)
if metadata is None:
metadata = sa.MetaData()
if foreign_keys is None:
foreign_keys = {}
validate_foreign_keys(ds, foreign_keys)
cols = dshape_to_alchemy(ds, primary_key=primary_key or frozenset())
cols.extend(sa.ForeignKeyConstraint([column_name], [referent])
for column_name, referent in foreign_keys.items())
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, foreign_keys=None,
primary_key=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = metadata_of_engine(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return engine
def dshape_to_alchemy(dshape, primary_key=frozenset()):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Map):
return dshape_to_alchemy(dshape.key.measure, primary_key=primary_key)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty, primary_key=primary_key)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(getattr(typ, 'ty', typ),
primary_key=primary_key),
primary_key=name in primary_key,
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1], primary_key=primary_key)
else:
return dshape_to_alchemy(dshape[0], primary_key=primary_key)
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.TEXT
string_types = dict(U=sa.Unicode, A=sa.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
return sa.DATETIME(timezone=dshape.tz is not None)
if isinstance(dshape, datashape.Decimal):
return sa.NUMERIC(dshape.precision, dshape.scale)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, bind=None, **kwargs):
_, rows = batch(sa.select([t]), bind=bind)
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel, bind=bind)
return func(rows)
@convert.register(base, sa.sql.Select, cost=200.0)
def select_to_base(sel, dshape=None, bind=None, **kwargs):
with getbind(sel, bind).connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, bind=None, **kwargs):
assert not isinstance(t, type)
bind = getbind(t, bind)
if not t.exists(bind=bind):
t.create(bind=bind)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return t
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
dshape = dshape and datashape.dshape(dshape)
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
with bind.begin():
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
bind.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, bind=None, **kwargs):
t_bind = getbind(t, bind)
o_bind = getbind(o, bind)
if t_bind != o_bind:
return append(
t,
convert(Iterator, o, bind=bind, **kwargs),
bind=bind,
**kwargs
)
bind = t_bind
assert bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
bind.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(
obj,
'before_create',
ddl.execute_if(
callable_=should_create_schema,
dialect='postgresql'
)
)
return obj
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
engine = create_engine(
uri,
# roundtrip through a frozenset of tuples so we can cache the dict
connect_args=kwargs.pop('connect_args', {}),
**filter_kwargs(sa.create_engine, kwargs)
)
ds = kwargs.pop('dshape', None)
schema = kwargs.pop('schema', None)
foreign_keys = kwargs.pop('foreign_keys', None)
primary_key = kwargs.pop('primary_key', None)
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(
sa.Table(
table_name,
metadata,
autoload_with=engine,
),
schema,
)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema,
foreign_keys=foreign_keys)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table, bind=None):
bind = getbind(table, bind)
table.drop(bind=bind, checkfirst=True)
if table.exists(bind=bind):
raise ValueError('table %r dropped but still exists' % table.name)
metadata_of_engine(bind, schema=table.schema).remove(table)
@convert.register(sa.sql.Select, sa.Table, cost=0)
def table_to_select(t, **kwargs):
return t.select()
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=300.0)
def select_or_selectable_to_frame(el, bind=None, dshape=None, **kwargs):
bind = getbind(el, bind)
if bind.dialect.name == 'postgresql':
buf = StringIO()
append(CSV(None, buffer=buf), el, bind=bind, **kwargs)
buf.seek(0)
datetime_fields = []
other_dtypes = {}
optional_string_fields = []
try:
fields = dshape.measure.fields
except AttributeError:
fields = [(0, dshape.measure)]
for n, (field, dtype) in enumerate(fields):
if isdatelike(dtype):
datetime_fields.append(field)
elif isinstance(dtype, Option):
ty = dtype.ty
if ty in datashape.integral:
other_dtypes[field] = 'float64'
else:
other_dtypes[field] = ty.to_numpy_dtype()
if ty == string:
# work with integer column indices for the
# optional_string columns because we don't always
# know the column name and then the lookup will fail
# in the loop below.
optional_string_fields.append(n)
else:
other_dtypes[field] = dtype.to_numpy_dtype()
df = pd.read_csv(
buf,
parse_dates=datetime_fields,
dtype=other_dtypes,
skip_blank_lines=False,
escapechar=kwargs.get('escapechar', '\\'),
)
# read_csv really wants missing values to be NaN, but for
# string (object) columns, we want None to be missing
columns = df.columns
for field_ix in optional_string_fields:
# use ``df.loc[bool, df.columns[field_ix]]`` because you cannot do
# boolean slicing with ``df.iloc``.
field = columns[field_ix]
df.loc[df[field].isnull(), field] = None
return df
columns, rows = batch(el, bind=bind)
dtypes = {}
try:
fields = dshape.measure.fields
except AttributeError:
fields = [(columns[0], dshape.measure)]
for field, dtype in fields:
if isinstance(dtype, Option):
ty = dtype.ty
try:
dtypes[field] = ty.to_numpy_dtype()
except TypeError:
dtypes[field] = np.dtype(object)
else:
if np.issubdtype(dtypes[field], np.integer):
# cast nullable ints to float64 so NaN can be used for nulls
dtypes[field] = np.float64
else:
try:
dtypes[field] = dtype.to_numpy_dtype()
except TypeError:
dtypes[field] = np.dtype(object)
return pd.DataFrame(np.array(list(map(tuple, rows)),
dtype=[(str(c), dtypes[c]) for c in columns]))
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(
self,
element,
path,
delimiter=',',
quotechar='"',
lineterminator='\n',
escapechar='\\',
header=True,
na_value='',
encoding=None,
bind=None,
):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
self._bind = bind = getbind(element, bind)
# mysql cannot write headers
self.header = header and bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
self.encoding = encoding
@property
def bind(self):
return self._bind
try:
from sqlalchemy.dialects.postgresql.psycopg2 import PGCompiler_psycopg2
except ImportError:
pass
else:
@partial(setattr, PGCompiler_psycopg2, 'visit_mod_binary')
def _postgres_visit_mod_binary(self, binary, operator, **kw):
"""Patched visit mod binary to work with literal_binds.
When https://github.com/zzzeek/sqlalchemy/pull/366 is merged we can
remove this patch.
"""
literal_binds = kw.get('literal_binds', False)
if (getattr(self.preparer, '_double_percents', True) and
not literal_binds):
return '{} %% {}'.format(
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
return '{} % {}'.format(
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
if isinstance(selectable, sa.Table):
selectable_part = compiler.preparer.format_table(selectable)
else:
selectable_part = '(%s)' % compiler.process(element.element, **kwargs)
return 'COPY %s TO STDOUT WITH (%s)' % (
selectable_part,
compiler.process(
sa.text(
"""
FORMAT CSV,
HEADER :header,
DELIMITER :delimiter,
QUOTE :quotechar,
NULL :na_value,
ESCAPE :escapechar,
ENCODING :encoding
""",
).bindparams(
header=element.header,
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar,
encoding=element.encoding or element.bind.execute(
'show client_encoding',
).scalar(),
),
**kwargs
),
)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""{0} INTO OUTFILE :path
CHARACTER SET :encoding
FIELDS TERMINATED BY :delimiter
OPTIONALLY ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
""".format(
compiler.process(
selectable.select()
if isinstance(selectable, sa.Table) else selectable,
**kwargs
)
)
).bindparams(
path=element.path,
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator
)
)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if element.encoding is not None:
raise ValueError(
"'encoding' keyword argument not supported for "
"SQLite to CSV conversion"
)
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
# we are sending a SQL string directorly to the SQLite process so we always
# need to bind everything before sending it
kwargs['literal_binds'] = True
selectable = element.element
sql = compiler.process(
selectable.select() if isinstance(selectable, sa.Table) else selectable,
**kwargs
) + ';'
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode(
sys.getfilesystemencoding() # we send bytes to the process
)
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
try:
from sqlalchemy_redshift.dialect import UnloadFromSelect
from odo.backends.aws import S3, get_s3_connection
except ImportError:
pass
else:
@resource.register('s3://.*/$')
def resource_s3_prefix(uri, **kwargs):
return Directory(S3)(uri, **kwargs)
@append.register(Directory(S3), sa.Table)
def redshit_to_s3_bucket(bucket, selectable, dshape=None, bind=None,
**kwargs):
s3_conn_kwargs = filter_kwargs(get_s3_connection, kwargs)
s3 = get_s3_connection(**s3_conn_kwargs)
unload_kwargs = filter_kwargs(UnloadFromSelect, kwargs)
unload_kwargs['unload_location'] = bucket.path
unload_kwargs['access_key_id'] = s3.access_key
unload_kwargs['secret_access_key'] = s3.secret_key
unload = UnloadFromSelect(selectable.select(), **unload_kwargs)
with getbind(selectable, bind).begin() as conn:
conn.execute(unload)
return bucket.path
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, bind=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(
selectable,
os.path.abspath(csv.path) if csv.path is not None else None,
bind=bind,
**kwargs
)
bind = getbind(selectable, bind)
if bind.dialect.name == 'postgresql':
with csv.open('ab+') as f:
with bind.begin() as conn:
conn.connection.cursor().copy_expert(literal_compile(stmt), f)
else:
with bind.begin() as conn:
conn.execute(stmt)
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
|
ContinuumIO/odo
|
odo/backends/sql.py
|
Python
|
bsd-3-clause
| 34,291
|
[
"VisIt"
] |
548881348d75fef2aad9514b979cd0457e08ee83785b3affecdfbf978fb9259a
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
from tensorforce import TensorforceError
from tensorforce.agents import TensorforceAgent
class AdvantageActorCritic(TensorforceAgent):
"""
[Advantage Actor-Critic](https://arxiv.org/abs/1602.01783) agent
(specification key: `a2c`).
Args:
states (specification): States specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create(...)`), arbitrarily nested dictionary of state
descriptions (usually taken from `Environment.states()`) with the following attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
actions (specification): Actions specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create(...)`), arbitrarily nested dictionary of
action descriptions (usually taken from `Environment.actions()`) with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
max_episode_timesteps (int > 0): Upper bound for numer of timesteps per episode
(<span style="color:#00C000"><b>default</b></span>: not given, better implicitly
specified via `environment` argument for `Agent.create(...)`).
batch_size (<a href="../modules/parameters.html">parameter</a>, int > 0): Number of
timesteps per update batch
(<span style="color:#C00000"><b>required</b></span>).
network ("auto" | specification): Policy network configuration, see the
[networks documentation](../modules/networks.html)
(<span style="color:#00C000"><b>default</b></span>: "auto", automatically configured
network).
use_beta_distribution (bool): Whether to use the Beta distribution for bounded continuous
actions by default.
(<span style="color:#00C000"><b>default</b></span>: false).
memory (int > 0): Batch memory capacity, has to fit at least maximum batch_size + maximum
network/estimator horizon + 1 timesteps
(<span style="color:#00C000"><b>default</b></span>: minimum capacity, usually does not
need to be changed).
update_frequency ("never" | <a href="../modules/parameters.html">parameter</a>, int > 0 | 0.0 < float <= 1.0):
Frequency of updates, relative to batch_size if float
(<span style="color:#00C000"><b>default</b></span>: batch_size).
learning_rate (<a href="../modules/parameters.html">parameter</a>, float > 0.0): Optimizer
learning rate
(<span style="color:#00C000"><b>default</b></span>: 1e-3).
horizon ("episode" | <a href="../modules/parameters.html">parameter</a>, int >= 0): Horizon
of discounted-sum reward estimation before critic estimate
(<span style="color:#00C000"><b>default</b></span>: 1).
discount (<a href="../modules/parameters.html">parameter</a>, 0.0 <= float <= 1.0): Discount
factor for future rewards of discounted-sum reward estimation
(<span style="color:#00C000"><b>default</b></span>: 0.99).
return_processing (specification): Return processing as layer or list of layers, see the
[preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no return processing).
advantage_processing (specification): Advantage processing as layer or list of layers, see
the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no advantage processing).
predict_terminal_values (bool): Whether to predict the value of terminal states, usually
not required since max_episode_timesteps terminals are handled separately
(<span style="color:#00C000"><b>default</b></span>: false).
critic (specification): Critic network configuration, see the
[networks documentation](../modules/networks.html)
(<span style="color:#00C000"><b>default</b></span>: "auto").
critic_optimizer (float > 0.0 | specification): Critic optimizer configuration, see the
[optimizers documentation](../modules/optimizers.html), a float instead specifies a
custom weight for the critic loss
(<span style="color:#00C000"><b>default</b></span>: 1.0).
l2_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
L2 regularization loss weight
(<span style="color:#00C000"><b>default</b></span>: no L2 regularization).
entropy_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Entropy regularization loss weight, to discourage the policy distribution from being
"too certain"
(<span style="color:#00C000"><b>default</b></span>: no entropy regularization).
state_preprocessing (dict[specification]): State preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html),
specified per state-type or -name
(<span style="color:#00C000"><b>default</b></span>: linear normalization of bounded
float states to [-2.0, 2.0]).
reward_preprocessing (specification): Reward preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no reward preprocessing).
exploration (<a href="../modules/parameters.html">parameter</a> | dict[<a href="../modules/parameters.html">parameter</a>], float >= 0.0):
Exploration, defined as the probability for uniformly random output in case of `bool`
and `int` actions, and the standard deviation of Gaussian noise added to every output in
case of `float` actions, specified globally or per action-type or -name
(<span style="color:#00C000"><b>default</b></span>: no exploration).
variable_noise (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Add Gaussian noise with given standard deviation to all trainable variables, as
alternative exploration mechanism
(<span style="color:#00C000"><b>default</b></span>: no variable noise).<br/><br/>
>>>: For arguments below, see the [Tensorforce agent documentation](tensorforce.html).
parallel_interactions (int > 0)
config (specification)
saver (path | specification)
summarizer (path | specification)
tracking ("all" | iter[string])
recorder (path | specification)
"""
def __init__(
# Required
self, states, actions, batch_size,
# Environment
max_episode_timesteps=None,
# Network
network='auto', use_beta_distribution=False,
# Memory
memory='minimum',
# Optimization
update_frequency=1.0, learning_rate=1e-3,
# Reward estimation
horizon=1, discount=0.99, return_processing=None, advantage_processing=None,
predict_terminal_values=False,
# Critic
critic='auto', critic_optimizer=1.0,
# Preprocessing
state_preprocessing='linear_normalization', reward_preprocessing=None,
# Exploration
exploration=0.0, variable_noise=0.0,
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Parallel interactions
parallel_interactions=1,
# Config, saver, summarizer, tracking, recorder
config=None, saver=None, summarizer=None, tracking=None, recorder=None,
# Deprecated
**kwargs
):
if 'estimate_terminal' in kwargs:
raise TensorforceError.deprecated(
name='A2C', argument='estimate_terminal', replacement='predict_terminal_values'
)
if 'critic_network' in kwargs:
raise TensorforceError.deprecated(
name='A2C', argument='critic_network', replacement='critic'
)
self.spec = OrderedDict(
agent='a2c',
states=states, actions=actions, batch_size=batch_size,
max_episode_timesteps=max_episode_timesteps,
network=network, use_beta_distribution=use_beta_distribution,
memory=memory,
update_frequency=update_frequency, learning_rate=learning_rate,
horizon=horizon, discount=discount, return_processing=return_processing,
advantage_processing=advantage_processing,
predict_terminal_values=predict_terminal_values,
critic=critic, critic_optimizer=critic_optimizer,
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
exploration=exploration, variable_noise=variable_noise,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
parallel_interactions=parallel_interactions,
config=config, saver=saver, summarizer=summarizer, tracking=tracking, recorder=recorder
)
policy = dict(
type='parametrized_distributions', network=network, temperature=1.0,
use_beta_distribution=use_beta_distribution
)
if memory == 'minimum':
memory = dict(type='recent')
else:
memory = dict(type='recent', capacity=memory)
update = dict(unit='timesteps', batch_size=batch_size, frequency=update_frequency)
optimizer = dict(type='adam', learning_rate=learning_rate)
objective = 'policy_gradient'
reward_estimation = dict(
horizon=horizon, discount=discount, predict_horizon_values='early',
estimate_advantage=True, predict_action_values=False,
return_processing=return_processing, predict_terminal_values=predict_terminal_values
)
baseline = dict(type='parametrized_state_value', network=critic)
baseline_objective = dict(type='state_value')
super().__init__(
# Agent
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
parallel_interactions=parallel_interactions, config=config, recorder=recorder,
# TensorforceModel
policy=policy, memory=memory, update=update, optimizer=optimizer, objective=objective,
reward_estimation=reward_estimation,
baseline=baseline, baseline_optimizer=critic_optimizer,
baseline_objective=baseline_objective,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
exploration=exploration, variable_noise=variable_noise,
saver=saver, summarizer=summarizer, tracking=tracking, **kwargs
)
|
reinforceio/tensorforce
|
tensorforce/agents/a2c.py
|
Python
|
apache-2.0
| 13,157
|
[
"Gaussian"
] |
8a2a0088ee1098bcadeaba57ed2a8ec102733431427fb30a411eebc35b0a31e9
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_HTTPError,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
extract_attributes,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
_YOUTUBE_CLIENT_HEADERS = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '1.20200609.04.02',
}
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
query = kwargs.get('query', {}).copy()
query['disable_polymer'] = 'true'
kwargs['query'] = query
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
count = 0
retries = 3
while count <= retries:
try:
# Downloading page may result in intermittent 5xx HTTP error
# that is usually worked around with a retry
more = self._download_json(
'https://www.youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s%s'
% (page_num, ' (retry #%d)' % count if count else ''),
transform_source=uppercase_escape,
headers=self._YOUTUBE_CLIENT_HEADERS)
break
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
count += 1
if count <= retries:
continue
raise
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page):
for mobj in re.finditer(video_re, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(
mobj.group('title')) if 'title' in mobj.groupdict() else None
if video_title:
video_title = video_title.strip()
if video_title == '► Play all':
video_title = None
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
self.extract_videos_from_page_impl(
self._VIDEO_RE, page, ids_in_page, titles_in_page)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:19a2f98d9032b9311e686ed039564f63',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:307195cd21ff7fa352270fe884570ef0',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# Retrieve 'artist' field from 'Artist:' in video description
# when it is present on youtube music video
'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
'info_dict': {
'id': 'k0jLE7tTwjY',
'ext': 'mp4',
'title': 'Latch Feat. Sam Smith',
'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
'upload_date': '20150110',
'uploader': 'Various Artists - Topic',
'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
'artist': 'Disclosure',
'track': 'Latch Feat. Sam Smith',
'album': 'Latch Featuring Sam Smith',
'release_date': '20121008',
'release_year': 2012,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle multiple artists on youtube music video
'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
'info_dict': {
'id': '74qn0eJSjpA',
'ext': 'mp4',
'title': 'Eastside',
'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
'upload_date': '20180710',
'uploader': 'Benny Blanco - Topic',
'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
'artist': 'benny blanco, Halsey, Khalid',
'track': 'Eastside',
'album': 'Eastside',
'release_date': '20180713',
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle youtube music video with release_year and no release_date
'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
'info_dict': {
'id': '-hcAI0g-f5M',
'ext': 'mp4',
'title': 'Put It On Me',
'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
'upload_date': '20180426',
'uploader': 'Matt Maeson - Topic',
'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
'artist': 'Matt Maeson',
'track': 'Put It On Me',
'album': 'The Hearse',
'release_date': None,
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, webpage, video_id, duration):
if not webpage:
return
player = self._parse_json(
self._search_regex(
r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
'player args', default='{}'),
video_id, fatal=False)
if not player or not isinstance(player, dict):
return
watch_next_response = player.get('watch_next_response')
if not isinstance(watch_next_response, compat_str):
return
response = self._parse_json(watch_next_response, video_id, fatal=False)
if not response or not isinstance(response, dict):
return
chapters_list = try_get(
response,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
@staticmethod
def _extract_chapters_from_description(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _extract_chapters(self, webpage, description, video_id, duration):
return (self._extract_chapters_from_json(webpage, video_id, duration)
or self._extract_chapters_from_description(description, duration))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
microformat = try_get(
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = video_details.get('shortDescription') or self._html_search_meta('description', video_webpage)
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if view_count is None and microformat:
view_count = int_or_none(microformat.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
if owner_profile_url:
video_uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
default=None)
video_uploader_url = owner_profile_url
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
thumbnails = []
thumbnails_list = try_get(
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
for t in thumbnails_list:
if not isinstance(t, dict):
continue
thumbnail_url = url_or_none(t.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
if not thumbnails:
video_thumbnail = None
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
if thumbnail_url:
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
if video_thumbnail:
thumbnails.append({'url': video_thumbnail})
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
category = None
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
if not category:
category = try_get(
microformat, lambda x: x['category'], compat_str)
video_categories = None if category is None else [category]
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
if not video_tags:
video_tags = try_get(video_details, lambda x: x['keywords'], list)
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnails': thumbnails,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?'
_VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'ChRiStIaAn008',
},
'playlist_count': 96,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'Wickydoo',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'Cauchemar89',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'sdragonfang',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
},
'skip': 'This playlist does not exist',
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'InterstellarMovie1',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'Computerphile',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for item in re.findall(
r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page):
attrs = extract_attributes(item)
video_id = attrs['data-video-id']
video_title = unescapeHTML(attrs.get('data-title'))
if video_title:
video_title = video_title.strip()
ids_in_page.append(video_id)
titles_in_page.append(video_title)
# Fallback with old _VIDEO_RE
self.extract_videos_from_page_impl(
self._VIDEO_RE, page, ids_in_page, titles_in_page)
# Relaxed fallbacks
self.extract_videos_from_page_impl(
r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page,
ids_in_page, titles_in_page)
self.extract_videos_from_page_impl(
r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page,
ids_in_page, titles_in_page)
return zip(ids_in_page, titles_in_page)
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://www.youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title')
or search_title('title long-title')
or search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._html_search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/ytdl-org/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
'uploader': 'Deus Ex',
'uploader_id': 'DeusExOfficial',
},
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
'uploader': 'The Linux Foundation',
'uploader_id': 'TheLinuxFoundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
'uploader': '12 Minute Athlete',
'uploader_id': 'the12minuteathlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel|c)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'ThirstForScience',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
'skip': 'Blocked',
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape,
headers=self._YOUTUBE_CLIENT_HEADERS)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
Orochimarufan/youtube-dl
|
youtube_dl/extractor/youtube.py
|
Python
|
unlicense
| 155,314
|
[
"ADF"
] |
66cfa919d8c3ce0cf1dea174d0dbe296bf03f6e620715a1d45ab478ab994e7dc
|
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Test the different representations of Genes.
This exercises the Motif, Schema and Signature methods of representing
genes, as well as generic Pattern methods.
"""
# standard library
from __future__ import print_function
import os
import unittest
# Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
# stuff we are testing
from Bio.NeuralNetwork.Gene import Schema
from Bio.NeuralNetwork.Gene import Motif
from Bio.NeuralNetwork.Gene import Signature
from Bio.NeuralNetwork.Gene import Pattern
VERBOSE = 0
# --- Tests for Pattern
class PatternIOTest(unittest.TestCase):
"""Tests for reading and writing patterns to a file.
"""
def setUp(self):
self.alphabet = IUPAC.ambiguous_dna
self.test_file = os.path.join("NeuralNetwork", "patternio.txt")
# Remove any existing copy of the output file,
if os.path.isfile(self.test_file):
os.remove(self.test_file)
self.pattern_io = Pattern.PatternIO(self.alphabet)
def tearDown(self):
# Clean up by removing our output file,
if os.path.isfile(self.test_file):
os.remove(self.test_file)
def test_motif(self):
"""Reading and writing motifs to a file
"""
# write to a file
motifs = ["GAC", "AAA", "TTT", "GGG"]
output_handle = open(self.test_file, "w")
self.pattern_io.write(motifs, output_handle)
output_handle.close()
# read 'em back
input_handle = open(self.test_file, "r")
read_motifs = self.pattern_io.read(input_handle)
input_handle.close()
self.assertEqual(read_motifs, motifs,
"Failed to get back expected motifs %s, got %s"
% (motifs, read_motifs))
# write seqs
seq_motifs = []
for motif in motifs:
seq_motifs.append(Seq(motif, self.alphabet))
output_handle = open(self.test_file, "w")
self.pattern_io.write_seq(seq_motifs, output_handle)
output_handle.close()
# read the seqs back
input_handle = open(self.test_file, "r")
read_motifs = self.pattern_io.read(input_handle)
input_handle.close()
self.assertEqual(read_motifs, motifs,
"Failed to get back expected motifs %s from seqs, got %s"
% (motifs, read_motifs))
def test_schema(self):
"""Reading and writing schemas to a file.
"""
schemas = ["GTR", "GAC"]
# write out the schemas
output_handle = open(self.test_file, "w")
self.pattern_io.write(schemas, output_handle)
output_handle.close()
# read back the schemas
input_handle = open(self.test_file, "r")
read_schemas = self.pattern_io.read(input_handle)
input_handle.close()
self.assertEqual(schemas, read_schemas,
"Read incorrect schemas %s, expected %s."
% (read_schemas, schemas))
# --- make sure inappropriate alphabets are reported
schemas = ["GTR", "G*C"] # '*' not in the unambigous alphabet
output_handle = open(self.test_file, "w")
self.pattern_io.write(schemas, output_handle)
output_handle.close()
input_handle = open(self.test_file, "r")
try:
read_schemas = self.pattern_io.read(input_handle)
raise AssertionError("Did not report error on bad alphabet.")
except ValueError:
pass # expected behavior
except:
raise AssertionError("Got unexpected error while reading.")
input_handle.close()
def test_signature(self):
"""Reading and writing signatures to a file.
"""
signatures = [("GAC", "GAC"), ("AAA", "TTT")]
output_handle = open(self.test_file, "w")
self.pattern_io.write(signatures, output_handle)
output_handle.close()
input_handle = open(self.test_file, "r")
read_sigs = self.pattern_io.read(input_handle)
input_handle.close()
self.assertEqual(read_sigs, signatures,
"Got back unexpected signatures %s, wanted %s"
% (read_sigs, signatures))
class PatternRepositoryTest(unittest.TestCase):
"""Tests for retrieving info from a repository of patterns.
"""
def setUp(self):
self.motifs = {"GATC": 30,
"GGGG": 10,
"GTAG": 0,
"AAAA": -10,
"ATAT": -20}
self.repository = Pattern.PatternRepository(self.motifs)
def test_get_all(self):
"""Retrieve all patterns from a repository.
"""
all_motifs = self.repository.get_all()
self.assertEqual(all_motifs,
["GATC", "GGGG", "GTAG", "AAAA", "ATAT"],
"Unexpected motifs returned %s" % all_motifs)
def test_get_random(self):
"""Retrieve random patterns from the repository.
"""
for num_patterns in range(5):
patterns = self.repository.get_random(num_patterns)
self.assertEqual(len(patterns), num_patterns,
"Got unexpected number of patterns %s, expected %s"
% (len(patterns), num_patterns))
for pattern in patterns:
self.assertTrue(pattern in list(self.motifs.keys()),
"Got unexpected pattern %s" % pattern)
def test_get_top_percentage(self):
"""Retrieve the top percentge of patterns from the repository.
"""
for num_patterns, percentage in ((1, 0.2), (2, .4), (5, 1.0)):
patterns = self.repository.get_top_percentage(percentage)
self.assertEqual(len(patterns), num_patterns,
"Got unexpected number of patterns %s, expected %s"
% (len(patterns), num_patterns))
for pattern in patterns:
self.assertTrue(pattern in list(self.motifs.keys()),
"Got unexpected pattern %s" % pattern)
def test_get_top(self):
"""Retrieve a certain number of the top patterns.
"""
for num_patterns in range(5):
patterns = self.repository.get_top(num_patterns)
self.assertEqual(len(patterns), num_patterns,
"Got unexpected number of patterns %s, expected %s"
% (len(patterns), num_patterns))
for pattern in patterns:
self.assertTrue(pattern in list(self.motifs.keys()),
"Got unexpected pattern %s" % pattern)
def test_get_differing(self):
"""Retrieve patterns from both sides of the list (top and bottom).
"""
patterns = self.repository.get_differing(2, 2)
self.assertEqual(patterns,
["GATC", "GGGG", "AAAA", "ATAT"],
"Got unexpected patterns %s" % patterns)
def test_remove_polyA(self):
"""Test the ability to remove A rich patterns from the repository.
"""
patterns = self.repository.get_all()
self.assertEqual(len(patterns), 5,
"Unexpected starting: %s" % patterns)
self.repository.remove_polyA()
patterns = self.repository.get_all()
self.assertEqual(len(patterns), 3,
"Unexpected ending: %s" % patterns)
self.assertEqual(patterns,
["GATC", "GGGG", "GTAG"],
"Unexpected patterns: %s" % patterns)
def test_count(self):
"""Retrieve counts for particular patterns in the repository.
"""
num_times = self.repository.count("GGGG")
self.assertEqual(num_times, 10,
"Did not count item in the respository: %s" % num_times)
num_times = self.repository.count("NOT_IN_THERE")
self.assertEqual(num_times, 0,
"Counted items not in repository: %s" % num_times)
# --- Tests for motifs
class MotifFinderTest(unittest.TestCase):
"""Tests for finding motifs from sequences.
"""
def setUp(self):
test_file = os.path.join('NeuralNetwork', 'enolase.fasta')
diff_file = os.path.join('NeuralNetwork', 'repeat.fasta')
self.test_records = []
self.diff_records = []
# load the records
for file, records in ((test_file, self.test_records),
(diff_file, self.diff_records)):
handle = open(file, 'r')
iterator = SeqIO.parse(handle, "fasta",
alphabet=IUPAC.unambiguous_dna)
while True:
try:
seq_record = next(iterator)
except StopIteration:
break
if seq_record is None:
break
records.append(seq_record)
handle.close()
self.motif_finder = Motif.MotifFinder()
def test_find(self):
"""Find all motifs in a set of sequences.
"""
motif_repository = self.motif_finder.find(self.test_records, 8)
top_motif = motif_repository.get_top(1)
self.assertEqual(top_motif[0], 'TTGGAAAG',
"Got unexpected motif %s" % top_motif[0])
def test_find_differences(self):
"""Find the difference in motif counts between two sets of sequences.
"""
motif_repository = \
self.motif_finder.find_differences(self.test_records,
self.diff_records, 8)
top, bottom = motif_repository.get_differing(1, 1)
self.assertEqual(top, "TTGGAAAG",
"Got unexpected top motif %s" % top)
self.assertEqual(bottom, "AATGGCAT",
"Got unexpected bottom motif %s" % bottom)
class MotifCoderTest(unittest.TestCase):
"""Test the ability to encode sequences as a set of motifs.
"""
def setUp(self):
motifs = ["GAG", "GAT", "GCC", "ATA"]
self.match_strings = (("GATCGCC", [0.0, 1.0, 1.0, 0.0]),
("GATGATCGAGCC", [.5, 1.0, .5, 0.0]))
self.coder = Motif.MotifCoder(motifs)
def test_representation(self):
"""Convert a sequence into its motif representation.
"""
for match_string, expected in self.match_strings:
seq_to_code = Seq(match_string, IUPAC.unambiguous_dna)
matches = self.coder.representation(seq_to_code)
self.assertEqual(matches, expected,
"Did not match representation, expected %s, got %s"
% (expected, matches))
# --- Tests for schemas
class SchemaTest(unittest.TestCase):
"""Matching ambiguous motifs with multiple ambiguity characters.
"""
def setUp(self):
ambiguity_chars = {"G": "G",
"A": "A",
"T": "T",
"C": "C",
"R": "AG",
"*": "AGTC"}
self.motif_coder = Schema.Schema(ambiguity_chars)
self.match_string = "GATAG"
self.match_info = [("GA", ["GA"]),
("GATAG", ["GATAG"]),
("GA*AG", ["GATAG"]),
("GATRG", ["GATAG"]),
("*A", ["GA", "TA"])]
def test_find_matches(self):
"""Find all matches in a sequence.
"""
for motif, expected in self.match_info:
found_matches = self.motif_coder.find_matches(motif,
self.match_string)
self.assertEqual(found_matches, expected,
"Expected %s, got %s"
% (expected, found_matches))
def test_num_matches(self):
"""Find how many matches are present in a sequence.
"""
for motif, expected in self.match_info:
num_matches = self.motif_coder.num_matches(motif,
self.match_string)
self.assertEqual(num_matches, len(expected),
"Expected %s, got %s"
% (num_matches, len(expected)))
def test_find_ambiguous(self):
"""Find the positions of ambiguous items in a sequence.
"""
ambig_info = (("GATC", []),
("G***", [1, 2, 3]),
("GART", [2]),
("*R*R", [0, 1, 2, 3]))
for motif, expected in ambig_info:
found_positions = self.motif_coder.find_ambiguous(motif)
self.assertEqual(found_positions, expected,
"Expected %s, got %s for %s"
% (expected, found_positions, motif))
def test_num_ambiguous(self):
"""Find the number of ambiguous items in a sequence.
"""
ambig_info = (("GATC", 0),
("G***", 3),
("GART", 1),
("*R*R", 4))
for motif, expected in ambig_info:
found_num = self.motif_coder.num_ambiguous(motif)
self.assertEqual(found_num, expected,
"Expected %s, got %s for %s"
% (expected, found_num, motif))
def test_motif_cache(self):
"""Make sure motif compiled regular expressions are cached properly.
"""
test_motif = "GATC"
self.motif_coder.find_matches(test_motif, "GATCGATC")
self.assertTrue(test_motif in self.motif_coder._motif_cache,
"Did not find motif cached properly.")
# make sure we don't bomb out if we use the same motif twice
self.motif_coder.find_matches(test_motif, "GATCGATC")
def test_all_unambiguous(self):
"""Return all unambiguous characters that can be in a motif.
"""
found_unambig = self.motif_coder.all_unambiguous()
expected = ["A", "C", "G", "T"]
self.assertEqual(found_unambig, expected,
"Got %s, expected %s" % (found_unambig, expected))
class SchemaFinderTest(unittest.TestCase):
"""Test finding schemas from a set of sequences.
"""
def setUp(self):
test_file = os.path.join('NeuralNetwork', 'enolase.fasta')
diff_file = os.path.join('NeuralNetwork', 'repeat.fasta')
self.test_records = []
self.diff_records = []
# load the records
for file, records in ((test_file, self.test_records),
(diff_file, self.diff_records)):
handle = open(file, 'r')
records.extend(SeqIO.parse(handle, "fasta",
alphabet=IUPAC.unambiguous_dna))
handle.close()
self.num_schemas = 2
schema_ga = Schema.GeneticAlgorithmFinder()
schema_ga.min_generations = 1
self.finder = Schema.SchemaFinder(num_schemas=self.num_schemas,
schema_finder=schema_ga)
def test_find(self):
"""Find schemas from sequence inputs.
"""
# this test takes too long
if VERBOSE:
repository = self.finder.find(self.test_records + self.diff_records)
schemas = repository.get_all()
self.assertTrue(len(schemas) >= self.num_schemas,
"Got too few schemas.")
def test_find_differences(self):
"""Find schemas that differentiate between two sets of sequences.
"""
# this test takes too long
if VERBOSE:
repository = self.finder.find_differences(self.test_records,
self.diff_records)
schemas = repository.get_all()
self.assertTrue(len(schemas) >= self.num_schemas,
"Got too few schemas.")
class SchemaCoderTest(unittest.TestCase):
"""Test encoding sequences as a grouping of motifs.
"""
def setUp(self):
ambiguity_chars = {"G": "G",
"A": "A",
"T": "T",
"C": "C",
"R": "AG",
"*": "AGTC"}
motif_representation = Schema.Schema(ambiguity_chars)
motifs = ("GA", "GATAG", "GA*AG", "GATRG", "*A")
self.motif_coder = Schema.SchemaCoder(motifs,
motif_representation)
self.match_strings = [("GATAG", [.5, .5, .5, .5, 1.0]),
("GAGAGATA", [float(3) / float(4), 0,
float(1) / float(4), 0,
1])]
def test_representation(self):
"""Convert a string into a representation of motifs.
"""
for match_string, expected in self.match_strings:
match_seq = Seq(match_string, IUPAC.unambiguous_dna)
found_rep = self.motif_coder.representation(match_seq)
self.assertEqual(found_rep, expected,
"Got %s, expected %s" % (found_rep, expected))
class SchemaMatchingTest(unittest.TestCase):
"""Matching schema to strings works correctly.
"""
def shortDescription(self):
return "%s:%s" % (self.__class__.__name__, self.__doc__)
def runTest(self):
match = Schema.matches_schema("GATC", "AAAAA")
self.assertEqual(match, 0,
"Expected no match because of length differences")
match = Schema.matches_schema("GATC", "GAT*")
self.assertEqual(match, 1,
"Expected match")
match = Schema.matches_schema("GATC", "GATC")
self.assertEqual(match, 1,
"Expected match")
match = Schema.matches_schema("GATC", "C*TC")
self.assertEqual(match, 0,
"Expected no match because of char mismatch.")
match = Schema.matches_schema("G*TC", "*TTC")
self.assertEqual(match, 1,
"Expected match because of ambiguity.")
class SchemaFactoryTest(unittest.TestCase):
"""Test the SchemaFactory for generating Schemas.
"""
def __init__(self, method):
unittest.TestCase.__init__(self, method)
# a cached schema bank, so we don't have to load it multiple times
self.schema_bank = None
def setUp(self):
self.factory = Schema.SchemaFactory()
self.test_file = os.path.join(os.getcwd(), "NeuralNetwork", "enolase.fasta")
ambiguity_chars = {"G": "G",
"A": "A",
"T": "T",
"C": "C",
"R": "AG",
"*": "AGTC"}
self.schema = Schema.Schema(ambiguity_chars)
def test_easy_from_motifs(self):
"""Generating schema from a simple list of motifs.
"""
motifs = {"GATCGAA": 20,
"GATCGAT": 15,
"GATTGAC": 25,
"TTTTTTT": 10}
motif_bank = Pattern.PatternRepository(motifs)
schema_bank = self.factory.from_motifs(motif_bank, .5, 2)
if VERBOSE:
print("\nSchemas:")
for schema in schema_bank.get_all():
print("%s: %s" % (schema, schema_bank.count(schema)))
def test_hard_from_motifs(self):
"""Generating schema from a real life set of motifs.
"""
schema_bank = self._load_schema_repository()
if VERBOSE:
print("\nSchemas:")
for schema in schema_bank.get_top(5):
print("%s: %s" % (schema, schema_bank.count(schema)))
def _load_schema_repository(self):
"""Helper function to load a schema repository from a file.
This also caches a schema bank, to prevent having to do this
time consuming operation multiple times.
"""
# if we already have a cached repository, return it
if self.schema_bank is not None:
return self.schema_bank
# otherwise, we'll read in a new schema bank
# read in the all of the motif records
motif_handle = open(self.test_file, 'r')
seq_records = list(SeqIO.parse(motif_handle, "fasta",
alphabet=IUPAC.unambiguous_dna))
motif_handle.close()
# find motifs from the file
motif_finder = Motif.MotifFinder()
motif_size = 9
motif_bank = motif_finder.find(seq_records, motif_size)
schema_bank = self.factory.from_motifs(motif_bank, .1, 2)
# cache the repository
self.schema_bank = schema_bank
return schema_bank
def test_schema_representation(self):
"""Convert sequences into schema representations.
"""
# get a set of schemas we want to code the sequence in
schema_bank = self._load_schema_repository()
top_schemas = schema_bank.get_top(25)
schema_coder = Schema.SchemaCoder(top_schemas, self.schema)
# get the sequences one at a time, and encode them
fasta_handle = open(self.test_file, 'r')
for seq_record in SeqIO.parse(fasta_handle, "fasta",
alphabet=IUPAC.unambiguous_dna):
schema_values = schema_coder.representation(seq_record.seq)
if VERBOSE:
print("Schema values: %s" % schema_values)
fasta_handle.close()
# --- Tests for Signatures
class SignatureFinderTest(unittest.TestCase):
"""Test the ability to find signatures in a set of sequences.
"""
def setUp(self):
test_file = os.path.join('NeuralNetwork', 'enolase.fasta')
self.test_records = []
# load the records
handle = open(test_file, 'r')
self.test_records = list(SeqIO.parse(handle, "fasta",
alphabet=IUPAC.unambiguous_dna))
handle.close()
self.sig_finder = Signature.SignatureFinder()
def test_find(self):
"""Find signatures from sequence inputs.
"""
repository = self.sig_finder.find(self.test_records, 6, 9)
top_sig = repository.get_top(1)
self.assertEqual(top_sig[0], ('TTGGAA', 'TGGAAA'))
class SignatureCoderTest(unittest.TestCase):
"""Test the ability to encode sequences as a set of signatures.
"""
def setUp(self):
signatures = [("GAC", "GAC"), ("AAA", "TTT"), ("CAA", "TTG")]
self.coder = Signature.SignatureCoder(signatures, 9)
self.test_seqs = [("GACAAAGACTTT", [1.0, 1.0, 0.0]),
("CAAAGACGACTTTAAATTT", [0.5, 1.0, 0.0]),
("AAATTTAAAGACTTTGAC", [1.0 / 3.0, 1.0, 0.0]),
("GACGAC", [1.0, 0.0, 0.0]),
("GACAAAAAAAAAGAC", [1.0, 0.0, 0.0]),
("GACAAAAAAAAAAGAC", [0.0, 0.0, 0.0])]
def test_representation(self):
"""Convert a sequence into its signature representation.
"""
for seq_string, expected in self.test_seqs:
test_seq = Seq(seq_string, IUPAC.unambiguous_dna)
predicted = self.coder.representation(test_seq)
self.assertEqual(predicted, expected,
"Non-expected representation %s for %s, wanted %s"
% (predicted, seq_string, expected))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_NNGene.py
|
Python
|
gpl-2.0
| 24,146
|
[
"Biopython"
] |
474114214bfe5feee44a7ee545a3401c9230e143f06db0f056dd38447220235a
|
# Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Utilities to work with realms_pb2 messages."""
from .proto import realms_pb2
# Currently acceptable version of Realms API. See api_version in realms.proto.
API_VERSION = 1
def merge(permissions, realms, out=None):
"""Merges multiple realms_pb2.Realms into one, fills in `api_version`.
The given list of permissions will become authoritative: if some realm uses
a permission not in the list, it will be silently dropped from the bindings.
This can potentially happen due to the asynchronous nature of realms config
updates (e.g. a role change that deletes some permissions can be committed
into the AuthDB before realms_pb2.Realms are reevaluated). Eventually the
state should converge to be 100% consistent.
Args:
permissions: a sorted list of realms_pb2.Permission with all permissions.
realms: a dict {project ID -> realms_pb2.Realms with its realms} to merge.
out: a realms_pb2.Realms to write the result into (will not be cleared!).
Returns:
`out` or a new realms_pb2.Realms if out was None.
"""
out = out or realms_pb2.Realms()
out.api_version = API_VERSION
out.permissions.extend(permissions)
# Sorted list of pairs (project_id, realms_pb2.Realms), we'll visit it twice.
sorted_realms = sorted(realms.items())
# Merge the set of all conditions across all projects.
conds = ConditionsSet()
for _, proj_realms in sorted_realms:
for cond in proj_realms.conditions:
conds.add(cond)
out.conditions.extend(conds.flat)
# Permission name => its index in the merged realms_pb2.Realms.
perm_index = {p.name: idx for idx, p in enumerate(permissions)}
# Visit in order of project IDs.
for proj_id, proj_realms in sorted_realms:
# Calculate a mapping from the permission index in `proj_realms` to
# the index in the final merged proto (or None if undefined).
old_to_new = [perm_index.get(p.name) for p in proj_realms.permissions]
# Visit all bindings in all realms.
for old_realm in proj_realms.realms:
# Relabel permission and condition indexes, drop empty bindings that may
# appear due to unknown permissions.
bindings = []
for b in old_realm.bindings:
perms = sorted(
old_to_new[idx]
for idx in b.permissions
if old_to_new[idx] is not None
)
if perms:
bindings.append((
perms,
conds.relabel(proj_realms.conditions, b.conditions),
b.principals,
))
# Add the relabeled realm to the output.
assert old_realm.name.startswith(proj_id+':'), old_realm.name
new_realm = out.realms.add()
new_realm.name = old_realm.name
new_realm.bindings.extend(
realms_pb2.Binding(
permissions=perms,
principals=principals,
conditions=conds,
)
for perms, conds, principals in sorted(bindings)
)
if old_realm.HasField('data'):
new_realm.data.CopyFrom(old_realm.data)
return out
class ConditionsSet(object):
"""Dedups identical conditions, maps them to integer indexes.
Most often identical conditions appear from implicit root bindings that are
similar across all projects.
Assumes all incoming realms_pb2.Condition are immutable and already
normalized. Retains the order in which they were added.
"""
def __init__(self):
self.flat = [] # the final list of dedupped realms_pb2.Condition
self._mapping = {} # serialized realms_pb2.Condition => its index
self._id_mapping = {} # id(realms_pb2.Condition) => its index
self._retain = [] # list of all conditions ever passed to `add`
def add(self, cond):
idx = self._mapping.setdefault(cond.SerializeToString(), len(self.flat))
if idx == len(self.flat):
self.flat.append(cond)
self._id_mapping[id(cond)] = idx
self._retain.append(cond) # keep the pointer alive to pin id(cond)
def relabel(self, conds, indexes):
return sorted(self._id_mapping[id(conds[idx])] for idx in indexes)
|
luci/luci-py
|
appengine/components/components/auth/realms.py
|
Python
|
apache-2.0
| 4,222
|
[
"VisIt"
] |
f93289812b91ba06268c2a2e952d5d8eb4c4a4b083125525ad661f3ddca391b9
|
""" :mod: RequestTaskTests
=======================
.. module: RequestTaskTests
:synopsis: test cases for RequestTask class
test cases for RequestTask class
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id $"
# #
# @file RequestTaskTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/27 15:59:40
# @brief Definition of RequestTaskTests class.
# # imports
import unittest
import importlib
from mock import Mock, MagicMock
# # SUT
from DIRAC.RequestManagementSystem.private.RequestTask import RequestTask
# # request client
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
ReqClient = Mock( spec = ReqClient )
# # from DIRAC
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
########################################################################
class RequestTaskTests( unittest.TestCase ):
"""
.. class:: RequestTaskTests
"""
def setUp( self ):
""" test case set up """
self.handlersDict = { "ForwardDISET" : "DIRAC/RequestManagementSystem/private/ForwardDISET" }
self.req = Request()
self.req.RequestName = "foobarbaz"
self.req.OwnerGroup = "lhcb_user"
self.req.OwnerDN = "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=chaen/CN=705305/CN=Christophe Haen"
self.op = Operation( { "Type": "ForwardDISET", "Arguments" : "tts10:helloWorldee" } )
self.req.addOperation( self.op )
self.task = None
self.mockRC = MagicMock()
self.mockObjectOps = MagicMock()
self.mockObjectOps.getSections.return_value = {'OK': True,
'Value': ['DataProcessing',
'DataManager']}
self.mockObjectOps.getOptionsDict.return_value = {'OK': True,
'Value': {'Group': 'lhcb_user', 'User': 'fstagni'}}
self.mockOps = MagicMock()
self.mockOps.return_value = self.mockObjectOps
def tearDown( self ):
""" test case tear down """
del self.req
del self.op
del self.task
def testAPI( self ):
""" test API
"""
rt = importlib.import_module( 'DIRAC.RequestManagementSystem.private.RequestTask' )
rt.gMonitor = MagicMock()
rt.Operations = self.mockOps
rt.CS = MagicMock()
self.task = RequestTask( self.req.toJSON()["Value"], self.handlersDict, 'csPath', 'RequestManagement/RequestExecutingAgent',
requestClient = self.mockRC )
self.task.requestClient = Mock( return_value = Mock( spec = ReqClient ) )
self.task.requestClient().updateRequest = Mock()
self.task.requestClient().updateRequest.return_value = { "OK" : True, "Value" : None }
ret = self.task()
self.assertEqual( ret["OK"], True , "call failed" )
ret = self.task.setupProxy()
print(ret)
# # tests execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
requestTaskTests = testLoader.loadTestsFromTestCase( RequestTaskTests )
suite = unittest.TestSuite( [ requestTaskTests ] )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
|
yujikato/DIRAC
|
src/DIRAC/RequestManagementSystem/private/test/RequestTaskTests.py
|
Python
|
gpl-3.0
| 3,237
|
[
"DIRAC"
] |
141eb7a66f38efe2c14199b58ebafc8261f4b9bb2fe2fec603d6b27e5edba9d4
|
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
import re
import logging
logging.basicConfig(filename='/tmp/rts2saf_log/unittest.log', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
from rts2_environment import RTS2Environment
# sequence matters
def suite_no_connection():
suite = unittest.TestSuite()
suite.addTest(TestAnalysis('test_rts2saf_analyze'))
suite.addTest(TestAnalysis('test_rts2saf_analyze_assoc'))
return suite
def suite_with_connection():
suite = unittest.TestSuite()
suite.addTest(TestRTS2Environment('test_rts2saf_fwhm'))
suite.addTest(TestRTS2Environment('test_rts2saf_imgp'))
suite.addTest(TestRTS2Environment('test_rts2saf_focus'))
return suite
#@unittest.skip('class not yet implemented')
class TestAnalysis(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
pass
#@unittest.skip('feature not yet implemented')
def test_rts2saf_analyze(self):
logger.info('== {} =='.format(self._testMethodName))
# ...
# analyze: storing plot file: ../samples/UNK-2013-11-23T09:24:58.png
# analyze: FWHM FOC_DEF: 5437 : fitted minimum position, 2.2px FWHM, NoTemp ambient temperature
# ResultMeans: FOC_DEF: 5363 : weighted mean derived from sextracted objects
# ResultMeans: FOC_DEF: 5377 : weighted mean derived from FWHM
# ResultMeans: FOC_DEF: 5367 : weighted mean derived from std(FWHM)
# ResultMeans: FOC_DEF: 5382 : weighted mean derived from CombinedFWHM
# analyzeRuns: ('NODATE', 'NOFTW') :: NOFT 14
# rts2saf_analyze: no ambient temperature available in FITS files, no model fitted
m = re.compile('.*?(FOC_DEF:) ([0-9]{4,4}).+?([0-9\.]{3,3})px')
cmd=[ '../rts2saf_analyze.py', '--basepath', '../samples', '--conf', './rts2saf-bootes-2-autonomous.cfg', '--toconsole', '--logfile', 'unittest.log', '--topath', '/tmp/rts2saf_log' ]
proc = subprocess.Popen( cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
lines = stdo.split('\n')
pos=0
val=0.
for ln in lines:
#print 'ln: ', ln
v = m.match(ln)
if v:
pos = int(v.group(2))
val = float(v.group(3))
break
frac= abs((float(pos) - 5437.)/5437.)
self.assertAlmostEqual(frac, 0.01, places=1, msg='return value: {}, instead of {}'.format(pos, 5435))
self.assertAlmostEqual(val, 2.2, places=1, msg='return value: {}'.format(val))
self.assertEqual(stde, '', 'return value: {}'.format(repr(stde)))
#@unittest.skip('feature not yet implemented')
def test_rts2saf_analyze_assoc(self):
logger.info('== {} =='.format(self._testMethodName))
m = re.compile('.*?(FOC_DEF:) ([0-9]{4,4}).+?([0-9\.]{3,3})px')
cmd=[ '../rts2saf_analyze.py', '--associate', '--basepath', '../samples', '--conf', './rts2saf-bootes-2-autonomous.cfg', '--toconsole', '--logfile', 'unittest.log', '--topath', '/tmp/rts2saf_log' ]
proc = subprocess.Popen( cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
lines = stdo.split('\n')
pos=0
val=0.
for ln in lines:
v = m.match(ln)
if v:
pos = int(v.group(2))
val = float(v.group(3))
break
frac= abs((float(pos) - 5435.)/5435.)
self.assertAlmostEqual(frac, 0.01, places=1, msg='return value: {}, instead of {}'.format(pos, 5435))
self.assertAlmostEqual(val, 2.1, places=1, msg='return value: {}'.format(val))
self.assertEqual(stde, '', 'return value: {}'.format(repr(stde)))
#@unittest.skip('class not yet implemented')
class TestRTS2Environment(RTS2Environment):
#@unittest.skip('feature not yet implemented')
def test_rts2saf_fwhm(self):
logger.info('== {} =='.format(self._testMethodName))
# ../rts2saf_fwhm.py --fitsFn ../samples/20071205025911-725-RA.fits --toc
# sextract: no FILTA name information found, ../samples/20071205025911-725-RA.fits
# sextract: no FILTB name information found, ../samples/20071205025911-725-RA.fits
# sextract: no FILTC name information found, ../samples/20071205025911-725-RA.fits
# rts2af_fwhm: no focus run queued, fwhm: 2.77 < 35.00 (thershold)
# rts2af_fwhm: DONE
m = re.compile('.*?(no focus run queued, fwhm:) (2.7)')
cmd=[ '../rts2saf_fwhm.py', '--fitsFn', '../samples/20071205025911-725-RA.fits', '--conf', './rts2saf-bootes-2-autonomous.cfg', '--toconsole', '--logfile', 'unittest.log', '--topath', '/tmp/rts2saf_log' ]
proc = subprocess.Popen( cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
lines = stdo.split('\n')
val=0.
for ln in lines:
v = m.match(ln)
if v:
val = float(v.group(2))
break
self.assertAlmostEqual(val, 2.7, places=1, msg='return value: {}'.format(val))
self.assertEqual(stde, '', 'return value: {}'.format(repr(stde)))
#@unittest.skip('feature not yet implemented')
def test_rts2saf_imgp(self):
logger.info('== {} =='.format(self._testMethodName))
# rts2saf_imgp.py: starting
# rts2saf_imgp.py, rts2-astrometry.net: corrwerr 1 0.3624045465 39.3839441225 -0.0149071686 -0.0009854536 0.0115640672
# corrwerr 1 0.3624045465 39.3839441225 -0.0149071686 -0.0009854536 0.0115640672
# ...
m = re.compile('.*?(corrwerr).+? ([0-9.]+)')
cmd=[ '../rts2saf_imgp.py', '../imgp/20131011054939-621-RA.fits', '--conf', './rts2saf-bootes-2-autonomous.cfg', '--toconsole', '--logfile', 'unittest.log', '--topath', '/tmp/rts2saf_log' ]
proc = subprocess.Popen( cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
lines = stdo.split('\n')
val=0.
for ln in lines:
v = m.match(ln)
if v:
val = float(v.group(2))
break
# if this test fails due to no match, it is likely that astrometry could not solve field.
self.assertAlmostEqual(val, 0.3624045465, places=1, msg='return value: {}'.format(val))
self.assertEqual(stde, '', 'return value: {}'.format(repr(stde)))
#@unittest.skip('feature not yet implemented')
def test_rts2saf_focus(self):
logger.info('== {} =='.format(self._testMethodName))
# analyze: FWHM FOC_DEF: 5426 : fitted minimum position, 2.2px FWHM, NoTemp ambient temperature
m = re.compile('.*?(FOC_DEF:) ([0-9]{4,4}).+? ([0-9\.]{3,3})px')
cmd=[ '../rts2saf_focus.py', '--dryfitsfiles', '../samples', '--conf', './rts2saf-bootes-2-autonomous.cfg', '--toconsole', '--logfile', 'unittest.log', '--topath', '/tmp/rts2saf_log' ]
proc = subprocess.Popen( cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
lines = stdo.split('\n')
pos=0
val=float('nan')
for ln in lines:
#print 'ln: ', ln
v = m.match(ln)
if v:
pos = int(v.group(2))
val = float(v.group(3))
break
frac= abs((float(pos) - 5436.)/5436.)
self.assertLessEqual(frac, 0.01, msg='return value: {}, instead of {}, fraction: {}'.format(pos, 5435, frac))
frac= abs((float(val) - 2.3)/2.3)
self.assertLessEqual(frac, 0.1, msg='return value: {}, instead of {}, fraction: {}'.format(val, 2.3, frac))
self.assertEqual(stde, '', 'return value: {}'.format(repr(stde)))
if __name__ == '__main__':
suiteNoConnection = suite_no_connection()
suiteWithConnection = suite_with_connection()
alltests = unittest.TestSuite([suiteNoConnection, suiteWithConnection])
unittest.TextTestRunner(verbosity=0).run(alltests)
|
RTS2/rts2
|
scripts/rts2saf/unittest/test_executables.py
|
Python
|
lgpl-3.0
| 9,130
|
[
"VisIt"
] |
c2139346573b6fa9985fec58d57165bc913f31f8ddb14c57ec14f80ae53eb3d5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.